summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/admin-guide/pm/cpuidle.rst72
-rw-r--r--Documentation/filesystems/fiemap.rst49
-rw-r--r--Documentation/security/credentials.rst5
-rw-r--r--Documentation/sound/codecs/cs35l56.rst292
-rw-r--r--Documentation/sound/codecs/index.rst9
-rw-r--r--Documentation/sound/index.rst1
-rw-r--r--Documentation/trace/ftrace.rst6
-rw-r--r--MAINTAINERS9
-rw-r--r--Makefile2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/smap.h18
-rw-r--r--arch/x86/include/asm/special_insns.h2
-rw-r--r--arch/x86/include/asm/uaccess_64.h12
-rw-r--r--arch/x86/kernel/fred.c8
-rw-r--r--arch/x86/lib/getuser.S5
-rw-r--r--arch/x86/mm/init.c3
-rw-r--r--drivers/acpi/acpi_video.c49
-rw-r--r--drivers/block/zram/zram_drv.c1
-rw-r--r--drivers/cpufreq/Kconfig4
-rw-r--r--drivers/cpuidle/governors/teo.c91
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/gpio/gpio-xilinx.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c41
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c25
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c48
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c11
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c4
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c8
-rw-r--r--drivers/gpu/drm/drm_bridge.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c1
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c60
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c7
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h25
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c15
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c2
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c1
-rw-r--r--drivers/hwmon/acpi_power_meter.c2
-rw-r--r--drivers/hwmon/drivetemp.c2
-rw-r--r--drivers/hwmon/ltc2991.c2
-rw-r--r--drivers/hwmon/tmp513.c7
-rw-r--r--drivers/i2c/busses/i2c-rcar.c20
-rw-r--r--drivers/i2c/i2c-atr.c2
-rw-r--r--drivers/i2c/i2c-core-base.c1
-rw-r--r--drivers/i2c/i2c-slave-testunit.c19
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c6
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c3
-rw-r--r--drivers/irqchip/irqchip.c4
-rw-r--r--drivers/mtd/spi-nor/core.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c7
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c263
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c24
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c44
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/gtp.c26
-rw-r--r--drivers/net/pcs/pcs-xpcs.c4
-rw-r--r--drivers/net/pfcp.c15
-rw-r--r--drivers/of/address.c18
-rw-r--r--drivers/of/unittest-data/tests-platform.dtsi13
-rw-r--r--drivers/of/unittest.c14
-rw-r--r--drivers/pci/pcie/bwctrl.c25
-rw-r--r--drivers/platform/x86/dell/dell-uart-backlight.c5
-rw-r--r--drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c5
-rw-r--r--drivers/pmdomain/imx/imx8mp-blk-ctrl.c2
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c1
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/virt/vboxguest/Kconfig2
-rw-r--r--fs/9p/vfs_addr.c6
-rw-r--r--fs/afs/Makefile1
-rw-r--r--fs/afs/callback.c4
-rw-r--r--fs/afs/cell.c21
-rw-r--r--fs/afs/dir.c809
-rw-r--r--fs/afs/dir_edit.c383
-rw-r--r--fs/afs/dir_search.c227
-rw-r--r--fs/afs/dynroot.c227
-rw-r--r--fs/afs/file.c260
-rw-r--r--fs/afs/fs_operation.c113
-rw-r--r--fs/afs/fsclient.c62
-rw-r--r--fs/afs/inode.c140
-rw-r--r--fs/afs/internal.h143
-rw-r--r--fs/afs/main.c2
-rw-r--r--fs/afs/mntpt.c22
-rw-r--r--fs/afs/proc.c8
-rw-r--r--fs/afs/rotate.c4
-rw-r--r--fs/afs/rxrpc.c37
-rw-r--r--fs/afs/super.c4
-rw-r--r--fs/afs/validation.c31
-rw-r--r--fs/afs/vlclient.c1
-rw-r--r--fs/afs/write.c16
-rw-r--r--fs/afs/xdr_fs.h2
-rw-r--r--fs/afs/yfsclient.c54
-rw-r--r--fs/backing-file.c20
-rw-r--r--fs/btrfs/volumes.c4
-rw-r--r--fs/cachefiles/io.c4
-rw-r--r--fs/cachefiles/xattr.c9
-rw-r--r--fs/ceph/addr.c22
-rw-r--r--fs/dcache.c3
-rw-r--r--fs/erofs/zdata.c3
-rw-r--r--fs/ext4/inode.c3
-rw-r--r--fs/ext4/namei.c4
-rw-r--r--fs/fhandle.c115
-rw-r--r--fs/file.c22
-rw-r--r--fs/file_table.c7
-rw-r--r--fs/fs_context.c2
-rw-r--r--fs/gfs2/file.c1
-rw-r--r--fs/gfs2/quota.c3
-rw-r--r--fs/gfs2/quota.h4
-rw-r--r--fs/libfs.c163
-rw-r--r--fs/mount.h31
-rw-r--r--fs/namei.c34
-rw-r--r--fs/namespace.c212
-rw-r--r--fs/netfs/Makefile5
-rw-r--r--fs/netfs/buffered_read.c290
-rw-r--r--fs/netfs/direct_read.c78
-rw-r--r--fs/netfs/direct_write.c10
-rw-r--r--fs/netfs/internal.h41
-rw-r--r--fs/netfs/main.c6
-rw-r--r--fs/netfs/misc.c164
-rw-r--r--fs/netfs/objects.c21
-rw-r--r--fs/netfs/read_collect.c761
-rw-r--r--fs/netfs/read_pgpriv2.c207
-rw-r--r--fs/netfs/read_retry.c209
-rw-r--r--fs/netfs/read_single.c195
-rw-r--r--fs/netfs/rolling_buffer.c226
-rw-r--r--fs/netfs/stats.c4
-rw-r--r--fs/netfs/write_collect.c281
-rw-r--r--fs/netfs/write_issue.c241
-rw-r--r--fs/netfs/write_retry.c232
-rw-r--r--fs/nfs/fscache.c6
-rw-r--r--fs/nfs/fscache.h3
-rw-r--r--fs/nfsd/auth.c3
-rw-r--r--fs/nfsd/filecache.c2
-rw-r--r--fs/nfsd/nfs4recover.c3
-rw-r--r--fs/nfsd/nfsfh.c1
-rw-r--r--fs/nsfs.c5
-rw-r--r--fs/ocfs2/dir.c25
-rw-r--r--fs/open.c11
-rw-r--r--fs/overlayfs/dir.c4
-rw-r--r--fs/overlayfs/util.c4
-rw-r--r--fs/pidfs.c298
-rw-r--r--fs/pipe.c19
-rw-r--r--fs/pnode.c8
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/kcore.c81
-rw-r--r--fs/proc/namespaces.c2
-rw-r--r--fs/proc/vmcore.c2
-rw-r--r--fs/select.c4
-rw-r--r--fs/smb/client/cifssmb.c23
-rw-r--r--fs/smb/client/connect.c3
-rw-r--r--fs/smb/client/file.c3
-rw-r--r--fs/smb/client/smb2ops.c2
-rw-r--r--fs/smb/client/smb2pdu.c15
-rw-r--r--fs/smb/server/smb_common.c10
-rw-r--r--fs/stat.c1
-rw-r--r--fs/vboxsf/Kconfig2
-rw-r--r--fs/xfs/xfs_ioctl.c11
-rw-r--r--fs/xfs/xfs_iops.c62
-rw-r--r--include/linux/cred.h43
-rw-r--r--include/linux/exportfs.h20
-rw-r--r--include/linux/fiemap.h16
-rw-r--r--include/linux/folio_queue.h12
-rw-r--r--include/linux/fs.h30
-rw-r--r--include/linux/hrtimer.h1
-rw-r--r--include/linux/lockref.h26
-rw-r--r--include/linux/module.h3
-rw-r--r--include/linux/mount.h4
-rw-r--r--include/linux/netfs.h54
-rw-r--r--include/linux/pid.h5
-rw-r--r--include/linux/pid_namespace.h10
-rw-r--r--include/linux/pidfs.h3
-rw-r--r--include/linux/pruss_driver.h12
-rw-r--r--include/linux/pseudo_fs.h1
-rw-r--r--include/linux/rculist.h44
-rw-r--r--include/linux/rolling_buffer.h61
-rw-r--r--include/linux/seccomp.h2
-rw-r--r--include/linux/seqlock.h2
-rw-r--r--include/linux/stat.h1
-rw-r--r--include/linux/userfaultfd_k.h12
-rw-r--r--include/net/busy_poll.h8
-rw-r--r--include/net/page_pool/helpers.h2
-rw-r--r--include/net/xdp.h1
-rw-r--r--include/net/xdp_sock_drv.h14
-rw-r--r--include/trace/events/afs.h212
-rw-r--r--include/trace/events/cachefiles.h185
-rw-r--r--include/trace/events/hugetlbfs.h2
-rw-r--r--include/trace/events/mmflags.h63
-rw-r--r--include/trace/events/netfs.h229
-rw-r--r--include/uapi/linux/fiemap.h47
-rw-r--r--include/uapi/linux/fs.h6
-rw-r--r--include/uapi/linux/stat.h99
-rw-r--r--io_uring/register.c52
-rw-r--r--io_uring/rsrc.c10
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/cred.c50
-rw-r--r--kernel/pid.c139
-rw-r--r--kernel/pid_namespace.c43
-rw-r--r--kernel/sched/fair.c151
-rw-r--r--kernel/signal.c37
-rw-r--r--kernel/sysctl.c9
-rw-r--r--kernel/time/hrtimer.c11
-rw-r--r--kernel/time/timer_migration.c64
-rw-r--r--kernel/trace/pid_list.c2
-rw-r--r--kernel/trace/trace.c1
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_irqsoff.c14
-rw-r--r--kernel/trace/trace_sched_switch.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c14
-rw-r--r--kernel/watch_queue.c4
-rw-r--r--lib/alloc_tag.c3
-rw-r--r--lib/kunit_iov_iter.c4
-rw-r--r--lib/lockref.c60
-rw-r--r--lib/vsprintf.c574
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/huge_memory.c12
-rw-r--r--mm/hugetlb.c14
-rw-r--r--mm/khugepaged.c4
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/mremap.c32
-rw-r--r--mm/page-writeback.c10
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/shmem.c12
-rw-r--r--mm/vmscan.c3
-rw-r--r--mm/vmstat.c15
-rw-r--r--mm/zswap.c91
-rw-r--r--net/core/filter.c30
-rw-r--r--net/core/netdev-genl-gen.c14
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/core/pktgen.c6
-rw-r--r--net/core/xdp.c1
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv4/udp.c46
-rw-r--r--net/mptcp/options.c6
-rw-r--r--net/mptcp/protocol.h9
-rw-r--r--net/ncsi/internal.h2
-rw-r--r--net/ncsi/ncsi-manage.c16
-rw-r--r--net/ncsi/ncsi-rsp.c19
-rw-r--r--net/openvswitch/actions.c4
-rw-r--r--net/vmw_vsock/af_vsock.c18
-rw-r--r--net/vmw_vsock/virtio_transport_common.c36
-rw-r--r--net/vmw_vsock/vsock_bpf.c9
-rw-r--r--net/xdp/xsk.c14
-rw-r--r--samples/vfs/.gitignore2
-rw-r--r--samples/vfs/Makefile2
-rw-r--r--samples/vfs/mountinfo.c274
-rw-r--r--samples/vfs/samples-vfs.h241
-rw-r--r--samples/vfs/test-list-all-mounts.c150
-rwxr-xr-xscripts/decode_stacktrace.sh16
-rw-r--r--security/apparmor/apparmorfs.c2
-rw-r--r--sound/pci/hda/patch_realtek.c7
-rw-r--r--sound/soc/codecs/Kconfig1
-rw-r--r--sound/soc/codecs/cs42l43.c1
-rw-r--r--sound/soc/codecs/es8316.c10
-rw-r--r--sound/soc/codecs/es8326.c23
-rw-r--r--sound/soc/codecs/tas2781-i2c.c2
-rw-r--r--sound/soc/renesas/rcar/adg.c28
-rw-r--r--sound/soc/renesas/rcar/core.c4
-rw-r--r--sound/soc/renesas/rcar/rsnd.h2
-rw-r--r--sound/soc/samsung/Kconfig6
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py16
-rw-r--r--tools/testing/selftests/coredump/Makefile7
-rw-r--r--tools/testing/selftests/coredump/README.rst50
-rwxr-xr-xtools/testing/selftests/coredump/stackdump14
-rw-r--r--tools/testing/selftests/coredump/stackdump_test.c151
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh9
-rw-r--r--tools/testing/selftests/filesystems/nsfs/.gitignore (renamed from tools/testing/selftests/nsfs/.gitignore)1
-rw-r--r--tools/testing/selftests/filesystems/nsfs/Makefile (renamed from tools/testing/selftests/nsfs/Makefile)4
-rw-r--r--tools/testing/selftests/filesystems/nsfs/config (renamed from tools/testing/selftests/nsfs/config)0
-rw-r--r--tools/testing/selftests/filesystems/nsfs/iterate_mntns.c149
-rw-r--r--tools/testing/selftests/filesystems/nsfs/owner.c (renamed from tools/testing/selftests/nsfs/owner.c)0
-rw-r--r--tools/testing/selftests/filesystems/nsfs/pidns.c (renamed from tools/testing/selftests/nsfs/pidns.c)0
-rw-r--r--tools/testing/selftests/filesystems/statmount/Makefile2
-rw-r--r--tools/testing/selftests/filesystems/statmount/listmount_test.c66
-rw-r--r--tools/testing/selftests/mm/cow.c8
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c43
-rw-r--r--tools/testing/selftests/pid_namespace/.gitignore1
-rw-r--r--tools/testing/selftests/pid_namespace/Makefile2
-rw-r--r--tools/testing/selftests/pid_namespace/pid_max.c358
-rw-r--r--tools/testing/selftests/pidfd/.gitignore2
-rw-r--r--tools/testing/selftests/pidfd/Makefile3
-rw-r--r--tools/testing/selftests/pidfd/pidfd.h40
-rw-r--r--tools/testing/selftests/pidfd/pidfd_bind_mount.c188
-rw-r--r--tools/testing/selftests/pidfd/pidfd_file_handle_test.c503
-rw-r--r--tools/testing/selftests/pidfd/pidfd_setns_test.c47
-rw-r--r--tools/testing/selftests/pidfd/pidfd_wait.c47
-rw-r--r--tools/testing/shared/linux/maple_tree.h2
-rw-r--r--tools/testing/vma/linux/atomic.h2
344 files changed, 9744 insertions, 4387 deletions
diff --git a/.mailmap b/.mailmap
index 48de71154296..399322897938 100644
--- a/.mailmap
+++ b/.mailmap
@@ -202,6 +202,7 @@ Elliot Berman <quic_eberman@quicinc.com> <eberman@codeaurora.org>
Enric Balletbo i Serra <eballetbo@kernel.org> <enric.balletbo@collabora.com>
Enric Balletbo i Serra <eballetbo@kernel.org> <eballetbo@iseebcn.com>
Erik Kaneda <erik.kaneda@intel.com> <erik.schmauss@intel.com>
+Ethan Carter Edwards <ethan@ethancedwards.com> Ethan Edwards <ethancarteredwards@gmail.com>
Eugen Hristev <eugen.hristev@linaro.org> <eugen.hristev@microchip.com>
Eugen Hristev <eugen.hristev@linaro.org> <eugen.hristev@collabora.com>
Evgeniy Polyakov <johnpol@2ka.mipt.ru>
diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst
index 19754beb5a4e..eb58d7a5affd 100644
--- a/Documentation/admin-guide/pm/cpuidle.rst
+++ b/Documentation/admin-guide/pm/cpuidle.rst
@@ -269,27 +269,7 @@ Namely, when invoked to select an idle state for a CPU (i.e. an idle state that
the CPU will ask the processor hardware to enter), it attempts to predict the
idle duration and uses the predicted value for idle state selection.
-It first obtains the time until the closest timer event with the assumption
-that the scheduler tick will be stopped. That time, referred to as the *sleep
-length* in what follows, is the upper bound on the time before the next CPU
-wakeup. It is used to determine the sleep length range, which in turn is needed
-to get the sleep length correction factor.
-
-The ``menu`` governor maintains two arrays of sleep length correction factors.
-One of them is used when tasks previously running on the given CPU are waiting
-for some I/O operations to complete and the other one is used when that is not
-the case. Each array contains several correction factor values that correspond
-to different sleep length ranges organized so that each range represented in the
-array is approximately 10 times wider than the previous one.
-
-The correction factor for the given sleep length range (determined before
-selecting the idle state for the CPU) is updated after the CPU has been woken
-up and the closer the sleep length is to the observed idle duration, the closer
-to 1 the correction factor becomes (it must fall between 0 and 1 inclusive).
-The sleep length is multiplied by the correction factor for the range that it
-falls into to obtain the first approximation of the predicted idle duration.
-
-Next, the governor uses a simple pattern recognition algorithm to refine its
+It first uses a simple pattern recognition algorithm to obtain a preliminary
idle duration prediction. Namely, it saves the last 8 observed idle duration
values and, when predicting the idle duration next time, it computes the average
and variance of them. If the variance is small (smaller than 400 square
@@ -301,29 +281,39 @@ Again, if the variance of them is small (in the above sense), the average is
taken as the "typical interval" value and so on, until either the "typical
interval" is determined or too many data points are disregarded, in which case
the "typical interval" is assumed to equal "infinity" (the maximum unsigned
-integer value). The "typical interval" computed this way is compared with the
-sleep length multiplied by the correction factor and the minimum of the two is
-taken as the predicted idle duration.
-
-Then, the governor computes an extra latency limit to help "interactive"
-workloads. It uses the observation that if the exit latency of the selected
-idle state is comparable with the predicted idle duration, the total time spent
-in that state probably will be very short and the amount of energy to save by
-entering it will be relatively small, so likely it is better to avoid the
-overhead related to entering that state and exiting it. Thus selecting a
-shallower state is likely to be a better option then. The first approximation
-of the extra latency limit is the predicted idle duration itself which
-additionally is divided by a value depending on the number of tasks that
-previously ran on the given CPU and now they are waiting for I/O operations to
-complete. The result of that division is compared with the latency limit coming
-from the power management quality of service, or `PM QoS <cpu-pm-qos_>`_,
-framework and the minimum of the two is taken as the limit for the idle states'
-exit latency.
+integer value).
+
+If the "typical interval" computed this way is long enough, the governor obtains
+the time until the closest timer event with the assumption that the scheduler
+tick will be stopped. That time, referred to as the *sleep length* in what follows,
+is the upper bound on the time before the next CPU wakeup. It is used to determine
+the sleep length range, which in turn is needed to get the sleep length correction
+factor.
+
+The ``menu`` governor maintains an array containing several correction factor
+values that correspond to different sleep length ranges organized so that each
+range represented in the array is approximately 10 times wider than the previous
+one.
+
+The correction factor for the given sleep length range (determined before
+selecting the idle state for the CPU) is updated after the CPU has been woken
+up and the closer the sleep length is to the observed idle duration, the closer
+to 1 the correction factor becomes (it must fall between 0 and 1 inclusive).
+The sleep length is multiplied by the correction factor for the range that it
+falls into to obtain an approximation of the predicted idle duration that is
+compared to the "typical interval" determined previously and the minimum of
+the two is taken as the idle duration prediction.
+
+If the "typical interval" value is small, which means that the CPU is likely
+to be woken up soon enough, the sleep length computation is skipped as it may
+be costly and the idle duration is simply predicted to equal the "typical
+interval" value.
Now, the governor is ready to walk the list of idle states and choose one of
them. For this purpose, it compares the target residency of each state with
-the predicted idle duration and the exit latency of it with the computed latency
-limit. It selects the state with the target residency closest to the predicted
+the predicted idle duration and the exit latency of it with the with the latency
+limit coming from the power management quality of service, or `PM QoS <cpu-pm-qos_>`_,
+framework. It selects the state with the target residency closest to the predicted
idle duration, but still below it, and exit latency that does not exceed the
limit.
diff --git a/Documentation/filesystems/fiemap.rst b/Documentation/filesystems/fiemap.rst
index 93fc96f760aa..23b3ed229e49 100644
--- a/Documentation/filesystems/fiemap.rst
+++ b/Documentation/filesystems/fiemap.rst
@@ -12,21 +12,10 @@ returns a list of extents.
Request Basics
--------------
-A fiemap request is encoded within struct fiemap::
-
- struct fiemap {
- __u64 fm_start; /* logical offset (inclusive) at
- * which to start mapping (in) */
- __u64 fm_length; /* logical length of mapping which
- * userspace cares about (in) */
- __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */
- __u32 fm_mapped_extents; /* number of extents that were
- * mapped (out) */
- __u32 fm_extent_count; /* size of fm_extents array (in) */
- __u32 fm_reserved;
- struct fiemap_extent fm_extents[0]; /* array of mapped extents (out) */
- };
+A fiemap request is encoded within struct fiemap:
+.. kernel-doc:: include/uapi/linux/fiemap.h
+ :identifiers: fiemap
fm_start, and fm_length specify the logical range within the file
which the process would like mappings for. Extents returned mirror
@@ -60,6 +49,8 @@ FIEMAP_FLAG_XATTR
If this flag is set, the extents returned will describe the inodes
extended attribute lookup tree, instead of its data tree.
+FIEMAP_FLAG_CACHE
+ This flag requests caching of the extents.
Extent Mapping
--------------
@@ -77,18 +68,10 @@ complete the requested range and will not have the FIEMAP_EXTENT_LAST
flag set (see the next section on extent flags).
Each extent is described by a single fiemap_extent structure as
-returned in fm_extents::
-
- struct fiemap_extent {
- __u64 fe_logical; /* logical offset in bytes for the start of
- * the extent */
- __u64 fe_physical; /* physical offset in bytes for the start
- * of the extent */
- __u64 fe_length; /* length in bytes for the extent */
- __u64 fe_reserved64[2];
- __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */
- __u32 fe_reserved[3];
- };
+returned in fm_extents:
+
+.. kernel-doc:: include/uapi/linux/fiemap.h
+ :identifiers: fiemap_extent
All offsets and lengths are in bytes and mirror those on disk. It is valid
for an extents logical offset to start before the request or its logical
@@ -175,6 +158,8 @@ FIEMAP_EXTENT_MERGED
userspace would be highly inefficient, the kernel will try to merge most
adjacent blocks into 'extents'.
+FIEMAP_EXTENT_SHARED
+ This flag is set to request that space be shared with other files.
VFS -> File System Implementation
---------------------------------
@@ -191,14 +176,10 @@ each discovered extent::
u64 len);
->fiemap is passed struct fiemap_extent_info which describes the
-fiemap request::
-
- struct fiemap_extent_info {
- unsigned int fi_flags; /* Flags as passed from user */
- unsigned int fi_extents_mapped; /* Number of mapped extents */
- unsigned int fi_extents_max; /* Size of fiemap_extent array */
- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent array */
- };
+fiemap request:
+
+.. kernel-doc:: include/linux/fiemap.h
+ :identifiers: fiemap_extent_info
It is intended that the file system should not need to access any of this
structure directly. Filesystem handlers should be tolerant to signals and return
diff --git a/Documentation/security/credentials.rst b/Documentation/security/credentials.rst
index 357328d566c8..2aa0791bcefe 100644
--- a/Documentation/security/credentials.rst
+++ b/Documentation/security/credentials.rst
@@ -527,11 +527,6 @@ There are some functions to help manage credentials:
This gets a reference on a live set of credentials, returning a pointer to
that set of credentials.
- - ``struct cred *get_new_cred(struct cred *cred);``
-
- This gets a reference on a set of credentials that is under construction
- and is thus still mutable, returning a pointer to that set of credentials.
-
Open File Credentials
=====================
diff --git a/Documentation/sound/codecs/cs35l56.rst b/Documentation/sound/codecs/cs35l56.rst
new file mode 100644
index 000000000000..98c6f6c74394
--- /dev/null
+++ b/Documentation/sound/codecs/cs35l56.rst
@@ -0,0 +1,292 @@
+.. SPDX-License-Identifier: GPL-2.0-only
+
+=====================================================================
+Audio drivers for Cirrus Logic CS35L54/56/57 Boosted Smart Amplifiers
+=====================================================================
+:Copyright: 2025 Cirrus Logic, Inc. and
+ Cirrus Logic International Semiconductor Ltd.
+
+Contact: patches@opensource.cirrus.com
+
+Summary
+=======
+
+The high-level summary of this document is:
+
+**If you have a laptop that uses CS35L54/56/57 amplifiers but audio is not
+working, DO NOT ATTEMPT TO USE FIRMWARE AND SETTINGS FROM ANOTHER LAPTOP,
+EVEN IF THAT LAPTOP SEEMS SIMILAR.**
+
+The CS35L54/56/57 amplifiers must be correctly configured for the power
+supply voltage, speaker impedance, maximum speaker voltage/current, and
+other external hardware connections.
+
+The amplifiers feature advanced boost technology that increases the voltage
+used to drive the speakers, while proprietary speaker protection algorithms
+allow these boosted amplifiers to push the limits of the speakers without
+causing damage. These **must** be configured correctly.
+
+Supported Cirrus Logic amplifiers
+---------------------------------
+
+The cs35l56 drivers support:
+
+* CS35L54
+* CS35L56
+* CS35L57
+
+There are two drivers in the kernel
+
+*For systems using SoundWire*: sound/soc/codecs/cs35l56.c and associated files
+
+*For systems using HDA*: sound/pci/hda/cs35l56_hda.c
+
+Firmware
+========
+
+The amplifier is controlled and managed by firmware running on the internal
+DSP. Firmware files are essential to enable the full capabilities of the
+amplifier.
+
+Firmware is distributed in the linux-firmware repository:
+https://gitlab.com/kernel-firmware/linux-firmware.git
+
+On most SoundWire systems the amplifier has a default minimum capability to
+produce audio. However this will be
+
+* at low volume, to protect the speakers, since the speaker specifications
+ and power supply voltages are unknown.
+* a mono mix of left and right channels.
+
+On some SoundWire systems that have both CS42L43 and CS35L56/57 the CS35L56/57
+receive their audio from the CS42L43 instead of directly from the host
+SoundWire interface. These systems can be identified by the CS42L43 showing
+in dmesg as a SoundWire device, but the CS35L56/57 as SPI. On these systems
+the firmware is *mandatory* to enable receiving the audio from the CS42L43.
+
+On HDA systems the firmware is *mandatory* to enable HDA bridge mode. There
+will not be any audio from the amplifiers without firmware.
+
+Cirrus Logic firmware files
+---------------------------
+
+Each amplifier requires two firmware files. One file has a .wmfw suffix, the
+other has a .bin suffix.
+
+The firmware is customized by the OEM to match the hardware of each laptop,
+and the firmware is specific to that laptop. Because of this, there are many
+firmware files in linux-firmware for these amplifiers. Firmware files are
+**not interchangeable between laptops**.
+
+Cirrus Logic submits files for known laptops to the upstream linux-firmware
+repository. Providing Cirrus Logic is aware of a particular laptop and has
+permission from the manufacturer to publish the firmware, it will be pushed
+to linux-firmware. You may need to upgrade to a newer release of
+linux-firmware to obtain the firmware for your laptop.
+
+**Important:** the Makefile for linux-firmware creates symlinks that are listed
+in the WHENCE file. These symlinks are required for the CS35L56 driver to be
+able to load the firmware.
+
+How do I know which firmware file I should have?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+All firmware file names are qualified with a unique "system ID". On normal
+x86 PCs with PCI audio this is the Vendor Subsystem ID (SSID) of the host
+PCI audio interface.
+
+The SSID can be viewed using the lspci tool::
+
+ lspci -v -nn | grep -A2 -i audio
+ 0000:00:1f.3 Audio device [0403]: Intel Corporation Meteor Lake-P HD Audio Controller [8086:7e28]
+ Subsystem: Dell Meteor Lake-P HD Audio Controller [1028:0c63]
+
+In this example the SSID is 10280c63.
+
+The format of the firmware file names is:
+
+ cs35lxx-b0-dsp1-misc-SSID[-spkidX]-ampN
+
+Where:
+
+ * cs35lxx-b0 is the amplifier model and silicon revision. This information
+ is logged by the driver during initialization.
+ * SSID is the 8-digit hexadecimal SSID value.
+ * ampN is the amplifier number (for example amp1). This is the same as
+ the prefix on the ALSA control names except that it is always lower-case
+ in the file name.
+ * spkidX is an optional part, used for laptops that have firmware
+ configurations for different makes and models of internal speakers.
+
+Sound Open Firmware and ALSA topology files
+-------------------------------------------
+
+All SoundWire systems will require a Sound Open Firmware (SOF) for the
+host CPU audio DSP, together with an ALSA topology file (.tplg).
+
+The SOF firmware will usually be provided by the manufacturer of the host
+CPU (i.e. Intel or AMD). The .tplg file is normally part of the SOF firmware
+release.
+
+SOF binary builds are available from: https://github.com/thesofproject/sof-bin/releases
+
+The main SOF source is here: https://github.com/thesofproject
+
+ALSA-ucm configurations
+-----------------------
+Typically an appropriate ALSA-ucm configuration file is needed for
+use-case managers and audio servers such as PipeWire.
+
+Configuration files are available from the alsa-ucm-conf repository:
+https://git.alsa-project.org/?p=alsa-ucm-conf.git
+
+Kernel log messages
+===================
+
+SoundWire
+---------
+A successful initialization will look like this (this will be repeated for
+each amplifier)::
+
+ [ 7.568374] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_P not found, using dummy regulator
+ [ 7.605208] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_IO not found, using dummy regulator
+ [ 7.605313] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_A not found, using dummy regulator
+ [ 7.939279] cs35l56 sdw:0:0:01fa:3556:01:0: Cirrus Logic CS35L56 Rev B0 OTP3 fw:3.4.4 (patched=0)
+ [ 7.947844] cs35l56 sdw:0:0:01fa:3556:01:0: Slave 4 state check1: UNATTACHED, status was 1
+ [ 8.740280] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_B not found, using dummy regulator
+ [ 8.740552] cs35l56 sdw:0:0:01fa:3556:01:0: supply VDD_AMP not found, using dummy regulator
+ [ 9.242164] cs35l56 sdw:0:0:01fa:3556:01:0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw: format 3 timestamp 0x66b2b872
+ [ 9.242173] cs35l56 sdw:0:0:01fa:3556:01:0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw: Tue 05 Dec 2023 21:37:21 GMT Standard Time
+ [ 9.991709] cs35l56 sdw:0:0:01fa:3556:01:0: DSP1: Firmware: 1a00d6 vendor: 0x2 v3.11.23, 41 algorithms
+ [10.039098] cs35l56 sdw:0:0:01fa:3556:01:0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx-amp1.bin: v3.11.23
+ [10.879235] cs35l56 sdw:0:0:01fa:3556:01:0: Slave 4 state check1: UNATTACHED, status was 1
+ [11.401536] cs35l56 sdw:0:0:01fa:3556:01:0: Calibration applied
+
+HDA
+---
+A successful initialization will look like this (this will be repeated for
+each amplifier)::
+
+ [ 6.306475] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: Cirrus Logic CS35L56 Rev B0 OTP3 fw:3.4.4 (patched=0)
+ [ 6.613892] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP system name: 'xxxxxxxx', amp name: 'AMP1'
+ [ 8.266660] snd_hda_codec_cs8409 ehdaudio0D0: bound i2c-CSC3556:00-cs35l56-hda.0 (ops cs35l56_hda_comp_ops [snd_hda_scodec_cs35l56])
+ [ 8.287525] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw: format 3 timestamp 0x66b2b872
+ [ 8.287528] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw: Tue 05 Dec 2023 21:37:21 GMT Standard Time
+ [ 9.984335] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP1: Firmware: 1a00d6 vendor: 0x2 v3.11.23, 41 algorithms
+ [10.085797] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx-amp1.bin: v3.11.23
+ [10.655237] cs35l56-hda i2c-CSC3556:00-cs35l56-hda.0: Calibration applied
+
+Important messages
+~~~~~~~~~~~~~~~~~~
+Cirrus Logic CS35L56 Rev B0 OTP3 fw:3.4.4 (patched=0)
+ Shows that the driver has been able to read device ID registers from the
+ amplifier.
+
+ * The actual amplifier type and silicon revision (CS35L56 B0 in this
+ example) is shown, as read from the amplifier identification registers.
+ * (patched=0) is normal, and indicates that the amplifier has been hard
+ reset and is running default ROM firmware.
+ * (patched=1) means that something has previously downloaded firmware
+ to the amplifier and the driver does not have control of the RESET
+ signal to be able to replace this preloaded firmware. This is normal
+ for systems where the BIOS downloads firmware to the amplifiers
+ before OS boot.
+ This status can also be seen if the cs35l56 kernel module is unloaded
+ and reloaded on a system where the driver does not have control of
+ RESET. SoundWire systems typically do not give the driver control of
+ RESET and only a BIOS (re)boot can reset the amplifiers.
+
+DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx.wmfw
+ Shows that a .wmfw firmware file was found and downloaded.
+
+DSP1: cirrus/cs35l56-b0-dsp1-misc-xxxxxxxx-amp1.bin
+ Shows that a .bin firmware file was found and downloaded.
+
+Calibration applied
+ Factory calibration data in EFI was written to the amplifier.
+
+Error messages
+==============
+This section explains some of the error messages that the driver can log.
+
+Algorithm coefficient version %d.%d.%d but expected %d.%d.%d
+ The version of the .bin file content does not match the loaded firmware.
+ Caused by mismatched .wmfw and .bin file, or .bin file was found but
+ .wmfw was not.
+
+No %s for algorithm %x
+ The version of the .bin file content does not match the loaded firmware.
+ Caused by mismatched .wmfw and .bin file, or .bin file was found but
+ .wmfw was not.
+
+.bin file required but not found
+ HDA driver did not find a .bin file that matches this hardware.
+
+Calibration disabled due to missing firmware controls
+ Driver was not able to write EFI calibration data to firmware registers.
+ This typically means that either:
+
+ * The driver did not find a suitable wmfw for this hardware, or
+ * The amplifier has already been patched with firmware by something
+ previously, and the driver does not have control of a hard RESET line
+ to be able to reset the amplifier and download the firmware files it
+ found. This situation is indicated by the device identification
+ string in the kernel log shows "(patched=1)"
+
+Failed to write calibration
+ Same meaning and cause as "Calibration disabled due to missing firmware
+ controls"
+
+Failed to read calibration data from EFI
+ Factory calibration data in EFI is missing, empty or corrupt.
+ This is most likely to be cause by accidentally deleting the file from
+ the EFI filesystem.
+
+No calibration for silicon ID
+ The factory calibration data in EFI does not match this hardware.
+ The most likely cause is that an amplifier has been replaced on the
+ motherboard without going through manufacturer calibration process to
+ generate calibration data for the new amplifier.
+
+Did not find any buses for CSCxxxx
+ Only on HDA systems. The HDA codec driver found an ACPI entry for
+ Cirrus Logic companion amps, but could not enumerate the ACPI entries for
+ the I2C/SPI buses. The most likely cause of this is that:
+
+ * The relevant bus driver (I2C or SPI) is not part of the kernel.
+ * The HDA codec driver was built-in to the kernel but the I2C/SPI
+ bus driver is a module and so the HDA codec driver cannot call the
+ bus driver functions.
+
+init_completion timed out
+ The SoundWire bus controller (host end) did not enumerate the amplifier.
+ In other words, the ACPI says there is an amplifier but for some reason
+ it was not detected on the bus.
+
+No AF01 node
+ Indicates an error in ACPI. A SoundWire system should have a Device()
+ node named "AF01" but it was not found.
+
+Failed to get spk-id-gpios
+ ACPI says that the driver should request a GPIO but the driver was not
+ able to get that GPIO. The most likely cause is that the kernel does not
+ include the correct GPIO or PINCTRL driver for this system.
+
+Failed to read spk-id
+ ACPI says that the driver should request a GPIO but the driver was not
+ able to read that GPIO.
+
+Unexpected spk-id element count
+ AF01 contains more speaker ID GPIO entries than the driver supports
+
+Overtemp error
+ Amplifier overheat protection was triggered and the amplifier shut down
+ to protect itself.
+
+Amp short error
+ Amplifier detected a short-circuit on the speaker output pins and shut
+ down for protection. This would normally indicate a damaged speaker.
+
+Hibernate wake failed
+ The driver tried to wake the amplifier from its power-saving state but
+ did not see the expected responses from the amplifier. This can be caused
+ by using firmware that does not match the hardware.
diff --git a/Documentation/sound/codecs/index.rst b/Documentation/sound/codecs/index.rst
new file mode 100644
index 000000000000..2cb95d87bbef
--- /dev/null
+++ b/Documentation/sound/codecs/index.rst
@@ -0,0 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Codec-Specific Information
+==========================
+
+.. toctree::
+ :maxdepth: 2
+
+ cs35l56
diff --git a/Documentation/sound/index.rst b/Documentation/sound/index.rst
index c437f2a4bc85..51cd736f65b5 100644
--- a/Documentation/sound/index.rst
+++ b/Documentation/sound/index.rst
@@ -13,6 +13,7 @@ Sound Subsystem Documentation
alsa-configuration
hd-audio/index
cards/index
+ codecs/index
utimers
.. only:: subproject and html
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index 272464bb7c60..2b74f96d09d5 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -810,6 +810,12 @@ Here is the list of current tracers that may be configured.
to draw a graph of function calls similar to C code
source.
+ Note that the function graph calculates the timings of when the
+ function starts and returns internally and for each instance. If
+ there are two instances that run function graph tracer and traces
+ the same functions, the length of the timings may be slightly off as
+ each read the timestamp separately and not at the same time.
+
"blk"
The block tracer. The tracer used by the blktrace user
diff --git a/MAINTAINERS b/MAINTAINERS
index a87ddad78e26..76ded4547b7b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5465,6 +5465,7 @@ L: linux-sound@vger.kernel.org
L: patches@opensource.cirrus.com
S: Maintained
F: Documentation/devicetree/bindings/sound/cirrus,cs*
+F: Documentation/sound/codecs/cs*
F: drivers/mfd/cs42l43*
F: drivers/pinctrl/cirrus/pinctrl-cs42l43*
F: drivers/spi/spi-cs42l43*
@@ -12383,6 +12384,13 @@ F: Documentation/kbuild/kconfig*
F: scripts/Kconfig.include
F: scripts/kconfig/
+KCORE
+M: Omar Sandoval <osandov@osandov.com>
+L: linux-debuggers@vger.kernel.org
+S: Maintained
+F: fs/proc/kcore.c
+F: include/linux/kcore.h
+
KCOV
R: Dmitry Vyukov <dvyukov@google.com>
R: Andrey Konovalov <andreyknvl@gmail.com>
@@ -21988,6 +21996,7 @@ W: https://github.com/thesofproject/linux/
F: sound/soc/sof/
SOUND - GENERIC SOUND CARD (Simple-Audio-Card, Audio-Graph-Card)
+M: Mark Brown <broonie@kernel.org>
M: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
S: Supported
L: linux-sound@vger.kernel.org
diff --git a/Makefile b/Makefile
index e20a62ad397f..b9464c88ac72 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9d7bd0ae48c4..ef6cfea9df73 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -83,7 +83,6 @@ config X86
select ARCH_HAS_DMA_OPS if GART_IOMMU || XEN
select ARCH_HAS_EARLY_DEBUG if KGDB
select ARCH_HAS_ELF_RANDOMIZE
- select ARCH_HAS_EXECMEM_ROX if X86_64
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index bab490379c65..2de1e5a75c57 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -13,30 +13,26 @@
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
-/* "Raw" instruction opcodes */
-#define __ASM_CLAC ".byte 0x0f,0x01,0xca"
-#define __ASM_STAC ".byte 0x0f,0x01,0xcb"
-
#ifdef __ASSEMBLY__
#define ASM_CLAC \
- ALTERNATIVE "", __ASM_CLAC, X86_FEATURE_SMAP
+ ALTERNATIVE "", "clac", X86_FEATURE_SMAP
#define ASM_STAC \
- ALTERNATIVE "", __ASM_STAC, X86_FEATURE_SMAP
+ ALTERNATIVE "", "stac", X86_FEATURE_SMAP
#else /* __ASSEMBLY__ */
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative("", __ASM_CLAC, X86_FEATURE_SMAP);
+ alternative("", "clac", X86_FEATURE_SMAP);
}
static __always_inline void stac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative("", __ASM_STAC, X86_FEATURE_SMAP);
+ alternative("", "stac", X86_FEATURE_SMAP);
}
static __always_inline unsigned long smap_save(void)
@@ -44,7 +40,7 @@ static __always_inline unsigned long smap_save(void)
unsigned long flags;
asm volatile ("# smap_save\n\t"
- ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC "\n\t",
+ ALTERNATIVE("", "pushf; pop %0; " "clac" "\n\t",
X86_FEATURE_SMAP)
: "=rm" (flags) : : "memory", "cc");
@@ -61,9 +57,9 @@ static __always_inline void smap_restore(unsigned long flags)
/* These macros can be used in asm() statements */
#define ASM_CLAC \
- ALTERNATIVE("", __ASM_CLAC, X86_FEATURE_SMAP)
+ ALTERNATIVE("", "clac", X86_FEATURE_SMAP)
#define ASM_STAC \
- ALTERNATIVE("", __ASM_STAC, X86_FEATURE_SMAP)
+ ALTERNATIVE("", "stac", X86_FEATURE_SMAP)
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index aec6e2d3aa1d..98bfc097389c 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -217,7 +217,7 @@ fail:
#define nop() asm volatile ("nop")
-static inline void serialize(void)
+static __always_inline void serialize(void)
{
/* Instruction opcode for SERIALIZE; supported in binutils >= 2.35. */
asm volatile(".byte 0xf, 0x1, 0xe8" ::: "memory");
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index b0a887209400..c52f0133425b 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -63,13 +63,13 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
*/
static inline void __user *mask_user_address(const void __user *ptr)
{
- unsigned long mask;
+ void __user *ret;
asm("cmp %1,%0\n\t"
- "sbb %0,%0"
- :"=r" (mask)
- :"r" (ptr),
- "0" (runtime_const_ptr(USER_PTR_MAX)));
- return (__force void __user *)(mask | (__force unsigned long)ptr);
+ "cmova %1,%0"
+ :"=r" (ret)
+ :"r" (runtime_const_ptr(USER_PTR_MAX)),
+ "0" (ptr));
+ return ret;
}
#define masked_user_access_begin(x) ({ \
__auto_type __masked_ptr = (x); \
diff --git a/arch/x86/kernel/fred.c b/arch/x86/kernel/fred.c
index 8d32c3f48abc..5e2cd1004980 100644
--- a/arch/x86/kernel/fred.c
+++ b/arch/x86/kernel/fred.c
@@ -50,7 +50,13 @@ void cpu_init_fred_exceptions(void)
FRED_CONFIG_ENTRYPOINT(asm_fred_entrypoint_user));
wrmsrl(MSR_IA32_FRED_STKLVLS, 0);
- wrmsrl(MSR_IA32_FRED_RSP0, 0);
+
+ /*
+ * Ater a CPU offline/online cycle, the FRED RSP0 MSR should be
+ * resynchronized with its per-CPU cache.
+ */
+ wrmsrl(MSR_IA32_FRED_RSP0, __this_cpu_read(fred_rsp0));
+
wrmsrl(MSR_IA32_FRED_RSP1, 0);
wrmsrl(MSR_IA32_FRED_RSP2, 0);
wrmsrl(MSR_IA32_FRED_RSP3, 0);
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 4357ec2a0bfc..89ecd57c9d42 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -44,9 +44,8 @@
.pushsection runtime_ptr_USER_PTR_MAX,"a"
.long 1b - 8 - .
.popsection
- cmp %rax, %rdx
- sbb %rdx, %rdx
- or %rdx, %rax
+ cmp %rdx, %rax
+ cmova %rdx, %rax
.else
cmp $TASK_SIZE_MAX-\size+1, %eax
jae .Lbad_get_user
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index c6d29f283001..62aa4d66a032 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -1080,7 +1080,8 @@ struct execmem_info __init *execmem_arch_setup(void)
start = MODULES_VADDR + offset;
- if (IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX)) {
+ if (IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX) &&
+ cpu_feature_enabled(X86_FEATURE_PSE)) {
pgprot = PAGE_KERNEL_ROX;
flags = EXECMEM_KASAN_SHADOW | EXECMEM_ROX_CACHE;
} else {
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 8274a17872ed..a972831dbd66 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -610,16 +610,28 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
return 0;
}
+/**
+ * acpi_video_device_EDID() - Get EDID from ACPI _DDC
+ * @device: video output device (LCD, CRT, ..)
+ * @edid: address for returned EDID pointer
+ * @length: _DDC length to request (must be a multiple of 128)
+ *
+ * Get EDID from ACPI _DDC. On success, a pointer to the EDID data is written
+ * to the @edid address, and the length of the EDID is returned. The caller is
+ * responsible for freeing the edid pointer.
+ *
+ * Return the length of EDID (positive value) on success or error (negative
+ * value).
+ */
static int
-acpi_video_device_EDID(struct acpi_video_device *device,
- union acpi_object **edid, int length)
+acpi_video_device_EDID(struct acpi_video_device *device, void **edid, int length)
{
- int status;
+ acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
-
+ int ret;
*edid = NULL;
@@ -636,16 +648,17 @@ acpi_video_device_EDID(struct acpi_video_device *device,
obj = buffer.pointer;
- if (obj && obj->type == ACPI_TYPE_BUFFER)
- *edid = obj;
- else {
+ if (obj && obj->type == ACPI_TYPE_BUFFER) {
+ *edid = kmemdup(obj->buffer.pointer, obj->buffer.length, GFP_KERNEL);
+ ret = *edid ? obj->buffer.length : -ENOMEM;
+ } else {
acpi_handle_debug(device->dev->handle,
"Invalid _DDC data for length %d\n", length);
- status = -EFAULT;
- kfree(obj);
+ ret = -EFAULT;
}
- return status;
+ kfree(obj);
+ return ret;
}
/* bus */
@@ -1435,9 +1448,7 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
{
struct acpi_video_bus *video;
struct acpi_video_device *video_device;
- union acpi_object *buffer = NULL;
- acpi_status status;
- int i, length;
+ int i, length, ret;
if (!device || !acpi_driver_data(device))
return -EINVAL;
@@ -1477,16 +1488,10 @@ int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
}
for (length = 512; length > 0; length -= 128) {
- status = acpi_video_device_EDID(video_device, &buffer,
- length);
- if (ACPI_SUCCESS(status))
- break;
+ ret = acpi_video_device_EDID(video_device, edid, length);
+ if (ret > 0)
+ return ret;
}
- if (!length)
- continue;
-
- *edid = buffer->buffer.pointer;
- return length;
}
return -ENODEV;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 45df5eeabc5e..7903a4da40ac 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1468,6 +1468,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
zram->mem_pool = zs_create_pool(zram->disk->disk_name);
if (!zram->mem_pool) {
vfree(zram->table);
+ zram->table = NULL;
return false;
}
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 92a83a9bb2e1..26e98fea991a 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -325,8 +325,6 @@ config QORIQ_CPUFREQ
This adds the CPUFreq driver support for Freescale QorIQ SoCs
which are capable of changing the CPU's frequency dynamically.
-endif
-
config ACPI_CPPC_CPUFREQ
tristate "CPUFreq driver based on the ACPI CPPC spec"
depends on ACPI_PROCESSOR
@@ -355,4 +353,6 @@ config ACPI_CPPC_CPUFREQ_FIE
If in doubt, say N.
+endif
+
endmenu
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index f2992f92d8db..173ddcac540a 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -10,25 +10,27 @@
* DOC: teo-description
*
* The idea of this governor is based on the observation that on many systems
- * timer events are two or more orders of magnitude more frequent than any
- * other interrupts, so they are likely to be the most significant cause of CPU
- * wakeups from idle states. Moreover, information about what happened in the
- * (relatively recent) past can be used to estimate whether or not the deepest
- * idle state with target residency within the (known) time till the closest
- * timer event, referred to as the sleep length, is likely to be suitable for
- * the upcoming CPU idle period and, if not, then which of the shallower idle
- * states to choose instead of it.
+ * timer interrupts are two or more orders of magnitude more frequent than any
+ * other interrupt types, so they are likely to dominate CPU wakeup patterns.
+ * Moreover, in principle, the time when the next timer event is going to occur
+ * can be determined at the idle state selection time, although doing that may
+ * be costly, so it can be regarded as the most reliable source of information
+ * for idle state selection.
*
- * Of course, non-timer wakeup sources are more important in some use cases
- * which can be covered by taking a few most recent idle time intervals of the
- * CPU into account. However, even in that context it is not necessary to
- * consider idle duration values greater than the sleep length, because the
- * closest timer will ultimately wake up the CPU anyway unless it is woken up
- * earlier.
+ * Of course, non-timer wakeup sources are more important in some use cases,
+ * but even then it is generally unnecessary to consider idle duration values
+ * greater than the time time till the next timer event, referred as the sleep
+ * length in what follows, because the closest timer will ultimately wake up the
+ * CPU anyway unless it is woken up earlier.
*
- * Thus this governor estimates whether or not the prospective idle duration of
- * a CPU is likely to be significantly shorter than the sleep length and selects
- * an idle state for it accordingly.
+ * However, since obtaining the sleep length may be costly, the governor first
+ * checks if it can select a shallow idle state using wakeup pattern information
+ * from recent times, in which case it can do without knowing the sleep length
+ * at all. For this purpose, it counts CPU wakeup events and looks for an idle
+ * state whose target residency has not exceeded the idle duration (measured
+ * after wakeup) in the majority of relevant recent cases. If the target
+ * residency of that state is small enough, it may be used right away and the
+ * sleep length need not be determined.
*
* The computations carried out by this governor are based on using bins whose
* boundaries are aligned with the target residency parameter values of the CPU
@@ -39,7 +41,11 @@
* idle state 2, the third bin spans from the target residency of idle state 2
* up to, but not including, the target residency of idle state 3 and so on.
* The last bin spans from the target residency of the deepest idle state
- * supplied by the driver to infinity.
+ * supplied by the driver to the scheduler tick period length or to infinity if
+ * the tick period length is less than the target residency of that state. In
+ * the latter case, the governor also counts events with the measured idle
+ * duration between the tick period length and the target residency of the
+ * deepest idle state.
*
* Two metrics called "hits" and "intercepts" are associated with each bin.
* They are updated every time before selecting an idle state for the given CPU
@@ -49,47 +55,46 @@
* sleep length and the idle duration measured after CPU wakeup fall into the
* same bin (that is, the CPU appears to wake up "on time" relative to the sleep
* length). In turn, the "intercepts" metric reflects the relative frequency of
- * situations in which the measured idle duration is so much shorter than the
- * sleep length that the bin it falls into corresponds to an idle state
- * shallower than the one whose bin is fallen into by the sleep length (these
- * situations are referred to as "intercepts" below).
+ * non-timer wakeup events for which the measured idle duration falls into a bin
+ * that corresponds to an idle state shallower than the one whose bin is fallen
+ * into by the sleep length (these events are also referred to as "intercepts"
+ * below).
*
* In order to select an idle state for a CPU, the governor takes the following
* steps (modulo the possible latency constraint that must be taken into account
* too):
*
- * 1. Find the deepest CPU idle state whose target residency does not exceed
- * the current sleep length (the candidate idle state) and compute 2 sums as
- * follows:
+ * 1. Find the deepest enabled CPU idle state (the candidate idle state) and
+ * compute 2 sums as follows:
*
- * - The sum of the "hits" and "intercepts" metrics for the candidate state
- * and all of the deeper idle states (it represents the cases in which the
- * CPU was idle long enough to avoid being intercepted if the sleep length
- * had been equal to the current one).
+ * - The sum of the "hits" metric for all of the idle states shallower than
+ * the candidate one (it represents the cases in which the CPU was likely
+ * woken up by a timer).
*
- * - The sum of the "intercepts" metrics for all of the idle states shallower
- * than the candidate one (it represents the cases in which the CPU was not
- * idle long enough to avoid being intercepted if the sleep length had been
- * equal to the current one).
+ * - The sum of the "intercepts" metric for all of the idle states shallower
+ * than the candidate one (it represents the cases in which the CPU was
+ * likely woken up by a non-timer wakeup source).
*
- * 2. If the second sum is greater than the first one the CPU is likely to wake
- * up early, so look for an alternative idle state to select.
+ * 2. If the second sum computed in step 1 is greater than a half of the sum of
+ * both metrics for the candidate state bin and all subsequent bins(if any),
+ * a shallower idle state is likely to be more suitable, so look for it.
*
- * - Traverse the idle states shallower than the candidate one in the
+ * - Traverse the enabled idle states shallower than the candidate one in the
* descending order.
*
* - For each of them compute the sum of the "intercepts" metrics over all
* of the idle states between it and the candidate one (including the
* former and excluding the latter).
*
- * - If each of these sums that needs to be taken into account (because the
- * check related to it has indicated that the CPU is likely to wake up
- * early) is greater than a half of the corresponding sum computed in step
- * 1 (which means that the target residency of the state in question had
- * not exceeded the idle duration in over a half of the relevant cases),
- * select the given idle state instead of the candidate one.
+ * - If this sum is greater than a half of the second sum computed in step 1,
+ * use the given idle state as the new candidate one.
*
- * 3. By default, select the candidate state.
+ * 3. If the current candidate state is state 0 or its target residency is short
+ * enough, return it and prevent the scheduler tick from being stopped.
+ *
+ * 4. Obtain the sleep length value and check if it is below the target
+ * residency of the current candidate state, in which case a new shallower
+ * candidate state needs to be found, so look for it.
*/
#include <linux/cpuidle.h>
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index af018afd9cd7..2e87ca0e292a 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -249,7 +249,7 @@ static struct file *open_file_as_root(const char *filename, int flags, umode_t m
fp = file_open_root(&root, filename, flags, mode);
path_put(&root);
- revert_creds(old_cred);
+ put_cred(revert_creds(old_cred));
return fp;
}
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index c6a8f2c82680..792d94c49077 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -65,7 +65,7 @@ struct xgpio_instance {
DECLARE_BITMAP(state, 64);
DECLARE_BITMAP(last_irq_read, 64);
DECLARE_BITMAP(dir, 64);
- spinlock_t gpio_lock; /* For serializing operations */
+ raw_spinlock_t gpio_lock; /* For serializing operations */
int irq;
DECLARE_BITMAP(enable, 64);
DECLARE_BITMAP(rising_edge, 64);
@@ -179,14 +179,14 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write to GPIO signal and set its direction to output */
__assign_bit(bit, chip->state, val);
xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -210,7 +210,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64);
bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
bitmap_replace(state, chip->state, hw_bits, hw_mask, 64);
@@ -218,7 +218,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_copy(chip->state, state, 64);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -236,13 +236,13 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Set the GPIO bit in shadow register and set direction as input */
__set_bit(bit, chip->dir);
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -265,7 +265,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
struct xgpio_instance *chip = gpiochip_get_data(gc);
int bit = xgpio_to_bit(chip, gpio);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
/* Write state of GPIO signal */
__assign_bit(bit, chip->state, val);
@@ -275,7 +275,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
__clear_bit(bit, chip->dir);
xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir);
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
return 0;
}
@@ -398,7 +398,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
int bit = xgpio_to_bit(chip, irq_offset);
u32 mask = BIT(bit / 32), temp;
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__clear_bit(bit, chip->enable);
@@ -408,7 +408,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
temp &= ~mask;
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp);
}
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
gpiochip_disable_irq(&chip->gc, irq_offset);
}
@@ -428,7 +428,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
gpiochip_enable_irq(&chip->gc, irq_offset);
- spin_lock_irqsave(&chip->gpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__set_bit(bit, chip->enable);
@@ -447,7 +447,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
}
- spin_unlock_irqrestore(&chip->gpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
/**
@@ -512,7 +512,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
chained_irq_enter(irqchip, desc);
- spin_lock(&chip->gpio_lock);
+ raw_spin_lock(&chip->gpio_lock);
xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all);
@@ -529,7 +529,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
bitmap_copy(chip->last_irq_read, all, 64);
bitmap_or(all, rising, falling, 64);
- spin_unlock(&chip->gpio_lock);
+ raw_spin_unlock(&chip->gpio_lock);
dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling);
@@ -620,7 +620,7 @@ static int xgpio_probe(struct platform_device *pdev)
bitmap_set(chip->hw_map, 0, width[0]);
bitmap_set(chip->hw_map, 32, width[1]);
- spin_lock_init(&chip->gpio_lock);
+ raw_spin_lock_init(&chip->gpio_lock);
chip->gc.base = -1;
chip->gc.ngpio = bitmap_weight(chip->hw_map, 64);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 3afcd1e8aa54..c4e733c2e75e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -715,8 +715,9 @@ err:
void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
{
enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
- if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
- ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) {
+ if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
+ ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) ||
+ (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 12)) {
pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
amdgpu_gfx_off_ctrl(adev, idle);
} else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
index 2d4b67175b55..328a1b963548 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
@@ -122,6 +122,10 @@ static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
return 0;
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 2) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 3))
+ return 0;
+
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
return 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 69a6b6dba0a5..1d155463d044 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1989,6 +1989,7 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -2007,15 +2008,19 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
mutex_lock(&adev->enforce_isolation_mutex);
if (adev->enforce_isolation[idx]) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
}
void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
u32 idx;
+ bool sched_work = false;
if (!adev->gfx.enable_cleaner_shader)
return;
@@ -2031,9 +2036,12 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
mutex_lock(&adev->enforce_isolation_mutex);
if (adev->enforce_isolation[idx]) {
if (adev->kfd.init_complete)
- amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+ sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 8b512dc28df8..071f187f5e28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -193,8 +193,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
need_ctx_switch = ring->current_ctx != fence_ctx;
if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
- (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
- amdgpu_vm_need_pipeline_sync(ring, job))) {
+ need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) {
+
need_pipe_sync = true;
if (tmp)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index cd16dae534dc..5f216d626cbb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8896,6 +8896,7 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
struct amdgpu_dm_connector *aconn =
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
if (pr->config.replay_supported && !pr->replay_feature_enabled)
@@ -8922,14 +8923,15 @@ static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
* adequate number of fast atomic commits to notify KMD
* of update events. See `vblank_control_worker()`.
*/
- if (acrtc_attach->dm_irq_params.allow_sr_entry &&
+ if (!vrr_active &&
+ acrtc_attach->dm_irq_params.allow_sr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
(current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
if (pr->replay_feature_enabled && !pr->replay_allow_active)
amdgpu_dm_replay_enable(acrtc_state->stream, true);
- if (psr->psr_version >= DC_PSR_VERSION_SU_1 &&
+ if (psr->psr_version == DC_PSR_VERSION_SU_1 &&
!psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
amdgpu_dm_psr_enable(acrtc_state->stream);
}
@@ -9100,7 +9102,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
timestamp_ns;
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
mutex_unlock(&dm->dc_lock);
}
}
@@ -9266,11 +9268,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
- if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) {
if (acrtc_state->stream->link->replay_settings.replay_allow_active)
amdgpu_dm_replay_disable(acrtc_state->stream);
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
- amdgpu_dm_psr_disable(acrtc_state->stream);
+ amdgpu_dm_psr_disable(acrtc_state->stream, true);
}
mutex_unlock(&dm->dc_lock);
@@ -11379,6 +11381,25 @@ static int dm_crtc_get_cursor_mode(struct amdgpu_device *adev,
return 0;
}
+static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state, *old_plane_state;
+
+ drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
+ new_plane_state = drm_atomic_get_plane_state(state, plane);
+ old_plane_state = drm_atomic_get_plane_state(state, plane);
+
+ if (old_plane_state->fb && new_plane_state->fb &&
+ get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb))
+ return true;
+ }
+
+ return false;
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
*
@@ -11576,10 +11597,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) {
- if (old_plane_state->fb && new_plane_state->fb &&
- get_mem_type(old_plane_state->fb) !=
- get_mem_type(new_plane_state->fb))
- lock_and_validation_needed = true;
ret = dm_update_plane_state(dc, state, plane,
old_plane_state,
@@ -11874,9 +11891,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
/*
* Only allow async flips for fast updates that don't change
- * the FB pitch, the DCC state, rotation, etc.
+ * the FB pitch, the DCC state, rotation, mem_type, etc.
*/
- if (new_crtc_state->async_flip && lock_and_validation_needed) {
+ if (new_crtc_state->async_flip &&
+ (lock_and_validation_needed ||
+ amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6464a8378387..2227cd8e4a89 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -697,6 +697,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
+ uint32_t mst_local_bw;
+ uint16_t vc_full_pbn;
struct mutex handle_mst_msg_ready;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index f936a35fa9eb..0f6ba7b1575d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -30,6 +30,7 @@
#include "amdgpu_dm.h"
#include "dc.h"
#include "amdgpu_securedisplay.h"
+#include "amdgpu_dm_psr.h"
static const char *const pipe_crc_sources[] = {
"none",
@@ -224,6 +225,10 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
+ /* For PSR1, check that the panel has exited PSR */
+ if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1)
+ amdgpu_dm_psr_wait_disable(stream_state);
+
/* Enable or disable CRTC CRC generation */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
if (!dc_stream_configure_crc(stream_state->ctx->dc,
@@ -357,6 +362,17 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
}
+ /*
+ * Reading the CRC requires the vblank interrupt handler to be
+ * enabled. Keep a reference until CRC capture stops.
+ */
+ enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
+ if (!enabled && enable) {
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ goto cleanup;
+ }
+
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/* Reset secure_display when we change crc source from debugfs */
amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
@@ -367,16 +383,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
goto cleanup;
}
- /*
- * Reading the CRC requires the vblank interrupt handler to be
- * enabled. Keep a reference until CRC capture stops.
- */
- enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src);
if (!enabled && enable) {
- ret = drm_crtc_vblank_get(crtc);
- if (ret)
- goto cleanup;
-
if (dm_is_crc_source_dprx(source)) {
if (drm_dp_start_crc(aux, crtc)) {
DRM_DEBUG_DRIVER("dp start crc failed\n");
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 64a041c2af05..36a830a7440f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -93,7 +93,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
return rc;
}
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state)
{
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
@@ -142,7 +142,7 @@ static void amdgpu_dm_crtc_set_panel_sr_feature(
amdgpu_dm_replay_enable(vblank_work->stream, true);
} else if (vblank_enabled) {
if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
- amdgpu_dm_psr_disable(vblank_work->stream);
+ amdgpu_dm_psr_disable(vblank_work->stream, false);
} else if (link->psr_settings.psr_feature_enabled &&
allow_sr_entry && !is_sr_active && !is_crc_window_active) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
index 17e948753f59..c1212947a77b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
@@ -37,7 +37,7 @@ int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc);
-bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state);
+bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state);
int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 6a97bb2d9160..8b7c1a428f64 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3606,7 +3606,7 @@ static int crc_win_update_set(void *data, u64 val)
/* PSR may write to OTG CRC window control register,
* so close it before starting secure_display.
*/
- amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream);
+ amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream, true);
spin_lock_irq(&adev_to_drm(adev)->event_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 6e4359490613..1080075ccb17 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -155,6 +155,17 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
return 0;
}
+
+static inline void
+amdgpu_dm_mst_reset_mst_connector_setting(struct amdgpu_dm_connector *aconnector)
+{
+ aconnector->drm_edid = NULL;
+ aconnector->dsc_aux = NULL;
+ aconnector->mst_output_port->passthrough_aux = NULL;
+ aconnector->mst_local_bw = 0;
+ aconnector->vc_full_pbn = 0;
+}
+
static void
amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
{
@@ -182,9 +193,7 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
- aconnector->drm_edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
}
aconnector->mst_status = MST_STATUS_DEFAULT;
@@ -504,9 +513,7 @@ dm_dp_mst_detect(struct drm_connector *connector,
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
- aconnector->drm_edid = NULL;
- aconnector->dsc_aux = NULL;
- port->passthrough_aux = NULL;
+ amdgpu_dm_mst_reset_mst_connector_setting(aconnector);
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
@@ -1819,9 +1826,18 @@ enum dc_status dm_dp_mst_is_port_support_mode(
struct drm_dp_mst_port *immediate_upstream_port = NULL;
uint32_t end_link_bw = 0;
- /*Get last DP link BW capability*/
- if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) {
- if (stream_kbps > end_link_bw) {
+ /*Get last DP link BW capability. Mode shall be supported by Legacy peer*/
+ if (aconnector->mst_output_port->pdt != DP_PEER_DEVICE_DP_LEGACY_CONV &&
+ aconnector->mst_output_port->pdt != DP_PEER_DEVICE_NONE) {
+ if (aconnector->vc_full_pbn != aconnector->mst_output_port->full_pbn) {
+ dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw);
+ aconnector->vc_full_pbn = aconnector->mst_output_port->full_pbn;
+ aconnector->mst_local_bw = end_link_bw;
+ } else {
+ end_link_bw = aconnector->mst_local_bw;
+ }
+
+ if (end_link_bw > 0 && stream_kbps > end_link_bw) {
DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
"Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
@@ -1835,11 +1851,15 @@ enum dc_status dm_dp_mst_is_port_support_mode(
if (immediate_upstream_port) {
virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
- if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
- "Max dsc compression can't fit into MST available bw\n");
- return DC_FAIL_BANDWIDTH_VALIDATE;
- }
+ } else {
+ /* For topology LCT 1 case - only one mstb*/
+ virtual_channel_bw_in_kbps = root_link_bw_in_kbps;
+ }
+
+ if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ "Max dsc compression can't fit into MST available bw\n");
+ return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index f40240aafe98..45858bf1523d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -201,14 +201,13 @@ void amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*
* Return: true if success
*/
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait)
{
- unsigned int power_opt = 0;
bool psr_enable = false;
DRM_DEBUG_DRIVER("Disabling psr...\n");
- return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt);
+ return dc_link_set_psr_allow_active(stream->link, &psr_enable, wait, false, NULL);
}
/*
@@ -251,3 +250,33 @@ bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm)
return allow_active;
}
+
+/**
+ * amdgpu_dm_psr_wait_disable() - Wait for eDP panel to exit PSR
+ * @stream: stream state attached to the eDP link
+ *
+ * Waits for a max of 500ms for the eDP panel to exit PSR.
+ *
+ * Return: true if panel exited PSR, false otherwise.
+ */
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream)
+{
+ enum dc_psr_state psr_state = PSR_STATE0;
+ struct dc_link *link = stream->link;
+ int retry_count;
+
+ if (link == NULL)
+ return false;
+
+ for (retry_count = 0; retry_count <= 1000; retry_count++) {
+ dc_link_get_psr_state(link, &psr_state);
+ if (psr_state == PSR_STATE0)
+ break;
+ udelay(500);
+ }
+
+ if (retry_count == 1000)
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index cd2d45c2b5ef..e2366321a3c1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -34,8 +34,9 @@
void amdgpu_dm_set_psr_caps(struct dc_link *link);
void amdgpu_dm_psr_enable(struct dc_stream_state *stream);
bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
-bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
+bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait);
bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm);
+bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream);
#endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index bf636b28e3e1..5bb8b78bf250 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -63,7 +63,8 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
bool should_use_dmub_lock(struct dc_link *link)
{
- if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_1)
return true;
if (link->replay_settings.replay_feature_enabled)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index beed7adbbd43..47d785204f29 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
.dcn_downspread_percent = 0.5,
.gpuvm_min_page_size_bytes = 4096,
.hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = 1,
+ .do_urgent_latency_adjustment = 0,
.urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
};
void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 601320b1be81..ecfa3c898e09 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -579,8 +579,8 @@ static void CalculateBytePerPixelAndBlockSizes(
{
*BytePerPixelDETY = 0;
*BytePerPixelDETC = 0;
- *BytePerPixelY = 0;
- *BytePerPixelC = 0;
+ *BytePerPixelY = 1;
+ *BytePerPixelC = 1;
if (SourcePixelFormat == dml2_444_64) {
*BytePerPixelDETY = 8;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index fa11f075d1f9..d9136d252ee1 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1398,12 +1398,12 @@ void dcn32_disable_link_output(struct dc_link *link,
link_hwss->disable_link_output(link, link_res, signal);
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
-
- if (signal == SIGNAL_TYPE_EDP &&
- link->dc->hwss.edp_power_control &&
- !link->skip_implict_edp_power_control)
- link->dc->hwss.edp_power_control(link, false);
- else if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ /*
+ * Add the logic to extract BOTH power up and power down sequences
+ * from enable/disable link output and only call edp panel control
+ * in enable_link_dp and disable_link_dp once.
+ */
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 2a09b27788e8..0551a3311217 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2643,11 +2643,12 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
&backend_workload_mask);
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
- if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
- ((smu->adev->pm.fw_version == 0x004e6601) ||
- (smu->adev->pm.fw_version >= 0x004e7300))) ||
- (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
- smu->adev->pm.fw_version >= 0x00504500)) {
+ if ((workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) &&
+ ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
+ ((smu->adev->pm.fw_version == 0x004e6601) ||
+ (smu->adev->pm.fw_version >= 0x004e7300))) ||
+ (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ smu->adev->pm.fw_version >= 0x00504500))) {
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
PP_SMC_POWER_PROFILE_POWERSAVING);
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index cbabd4e20d3e..5f138a5692c7 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -845,8 +845,8 @@ static int it6263_probe(struct i2c_client *client)
it->lvds_i2c = devm_i2c_new_dummy_device(dev, client->adapter,
LVDS_INPUT_CTRL_I2C_ADDR);
if (IS_ERR(it->lvds_i2c))
- dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
- "failed to allocate I2C device for LVDS\n");
+ return dev_err_probe(it->dev, PTR_ERR(it->lvds_i2c),
+ "failed to allocate I2C device for LVDS\n");
it->lvds_regmap = devm_regmap_init_i2c(it->lvds_i2c,
&it6263_lvds_regmap_config);
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 320c297008aa..3a5a4f92c979 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -459,7 +459,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return ERR_PTR(-EINVAL);
- if (bridge_connector->bridge_hdmi)
+ if (bridge_connector->bridge_hdmi) {
+ if (!connector->ycbcr_420_allowed)
+ supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420);
+
ret = drmm_connector_hdmi_init(drm, connector,
bridge_connector->bridge_hdmi->vendor,
bridge_connector->bridge_hdmi->product,
@@ -468,10 +471,11 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
connector_type, ddc,
supported_formats,
max_bpc);
- else
+ } else {
ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs,
connector_type, ddc);
+ }
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index c6af46dd02bf..241a384ebce3 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -207,6 +207,10 @@ void drm_bridge_add(struct drm_bridge *bridge)
{
mutex_init(&bridge->hpd_mutex);
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI)
+ bridge->ycbcr_420_allowed = !!(bridge->supported_formats &
+ BIT(HDMI_COLORSPACE_YUV420));
+
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index fc35f47e2849..ca7f43c8d6f1 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -507,6 +507,9 @@ int drmm_connector_hdmi_init(struct drm_device *dev,
if (!supported_formats || !(supported_formats & BIT(HDMI_COLORSPACE_RGB)))
return -EINVAL;
+ if (connector->ycbcr_420_allowed != !!(supported_formats & BIT(HDMI_COLORSPACE_YUV420)))
+ return -EINVAL;
+
if (!(max_bpc == 8 || max_bpc == 10 || max_bpc == 12))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 6a7060889f40..223c4218c019 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -1694,7 +1694,7 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
* arithmetic related to alignment and offset calculation.
*/
if (is_gen12_ccs_cc_plane(&fb->base, i)) {
- if (IS_ALIGNED(fb->base.offsets[i], PAGE_SIZE))
+ if (IS_ALIGNED(fb->base.offsets[i], 64))
continue;
else
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 8f0c69aad248..21b56cc7605c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -384,7 +384,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
if (ret < 0)
return NULL;
- return kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+ return edid;
}
bool nouveau_acpi_video_backlight_use_native(void)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 09686d038d60..7cc84472cece 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -387,11 +387,13 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
if (f) {
struct nouveau_channel *prev;
bool must_wait = true;
+ bool local;
rcu_read_lock();
prev = rcu_dereference(f->channel);
- if (prev && (prev == chan ||
- fctx->sync(f, prev, chan) == 0))
+ local = prev && prev->cli->drm == chan->cli->drm;
+ if (local && (prev == chan ||
+ fctx->sync(f, prev, chan) == 0))
must_wait = false;
rcu_read_unlock();
if (!must_wait)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
index 841e3b69fcaf..5a0c9b8a79f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c
@@ -31,6 +31,7 @@ mcp77_sor = {
.state = g94_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
+ .bl = &nv50_sor_bl,
.hdmi = &g84_sor_hdmi,
.dp = &g94_sor_dp,
};
diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c
index 6bba97d0be88..3d48d676e001 100644
--- a/drivers/gpu/drm/tests/drm_connector_test.c
+++ b/drivers/gpu/drm/tests/drm_connector_test.c
@@ -635,6 +635,64 @@ static void drm_test_connector_hdmi_init_formats_no_rgb(struct kunit *test)
KUNIT_EXPECT_LT(test, ret, 0);
}
+struct drm_connector_hdmi_init_formats_yuv420_allowed_test {
+ unsigned long supported_formats;
+ bool yuv420_allowed;
+ int expected_result;
+};
+
+#define YUV420_ALLOWED_TEST(_formats, _allowed, _result) \
+ { \
+ .supported_formats = BIT(HDMI_COLORSPACE_RGB) | (_formats), \
+ .yuv420_allowed = _allowed, \
+ .expected_result = _result, \
+ }
+
+static const struct drm_connector_hdmi_init_formats_yuv420_allowed_test
+drm_connector_hdmi_init_formats_yuv420_allowed_tests[] = {
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV420), true, 0),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV420), false, -EINVAL),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV422), true, -EINVAL),
+ YUV420_ALLOWED_TEST(BIT(HDMI_COLORSPACE_YUV422), false, 0),
+};
+
+static void
+drm_connector_hdmi_init_formats_yuv420_allowed_desc(const struct drm_connector_hdmi_init_formats_yuv420_allowed_test *t,
+ char *desc)
+{
+ sprintf(desc, "supported_formats=0x%lx yuv420_allowed=%d",
+ t->supported_formats, t->yuv420_allowed);
+}
+
+KUNIT_ARRAY_PARAM(drm_connector_hdmi_init_formats_yuv420_allowed,
+ drm_connector_hdmi_init_formats_yuv420_allowed_tests,
+ drm_connector_hdmi_init_formats_yuv420_allowed_desc);
+
+/*
+ * Test that the registration of an HDMI connector succeeds only when
+ * the presence of YUV420 in the supported formats matches the value
+ * of the ycbcr_420_allowed flag.
+ */
+static void drm_test_connector_hdmi_init_formats_yuv420_allowed(struct kunit *test)
+{
+ const struct drm_connector_hdmi_init_formats_yuv420_allowed_test *params;
+ struct drm_connector_init_priv *priv = test->priv;
+ int ret;
+
+ params = test->param_value;
+ priv->connector.ycbcr_420_allowed = params->yuv420_allowed;
+
+ ret = drmm_connector_hdmi_init(&priv->drm, &priv->connector,
+ "Vendor", "Product",
+ &dummy_funcs,
+ &dummy_hdmi_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ &priv->ddc,
+ params->supported_formats,
+ 8);
+ KUNIT_EXPECT_EQ(test, ret, params->expected_result);
+}
+
/*
* Test that the registration of an HDMI connector with an HDMI
* connector type succeeds.
@@ -726,6 +784,8 @@ static struct kunit_case drmm_connector_hdmi_init_tests[] = {
KUNIT_CASE(drm_test_connector_hdmi_init_bpc_null),
KUNIT_CASE(drm_test_connector_hdmi_init_formats_empty),
KUNIT_CASE(drm_test_connector_hdmi_init_formats_no_rgb),
+ KUNIT_CASE_PARAM(drm_test_connector_hdmi_init_formats_yuv420_allowed,
+ drm_connector_hdmi_init_formats_yuv420_allowed_gen_params),
KUNIT_CASE(drm_test_connector_hdmi_init_null_ddc),
KUNIT_CASE(drm_test_connector_hdmi_init_null_product),
KUNIT_CASE(drm_test_connector_hdmi_init_null_vendor),
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 04a6b8cc62ac..3c0b7824c0be 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -320,8 +320,7 @@ static void kunit_action_drm_mode_destroy(void *ptr)
}
/**
- * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC
- for a KUnit test
+ * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC for a KUnit test
* @test: The test context object
* @dev: DRM device
* @video_code: CEA VIC of the mode
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 20bf33702c3c..da203045df9b 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -108,6 +108,7 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->bin_job = NULL;
status = IRQ_HANDLED;
}
@@ -118,6 +119,7 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->render_job = NULL;
status = IRQ_HANDLED;
}
@@ -128,6 +130,7 @@ v3d_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
trace_v3d_csd_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->csd_job = NULL;
status = IRQ_HANDLED;
}
@@ -165,6 +168,7 @@ v3d_hub_irq(int irq, void *arg)
v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
+ v3d->tfu_job = NULL;
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index a0e433fbcba6..9b5b8c1f063b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -228,7 +228,6 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_VRAM);
buf->places[0].lpfn = PFN_UP(bo->resource->size);
- buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
ret = ttm_bo_validate(bo, &buf->placement, &ctx);
/* For some reason we didn't end up at the start of vram */
@@ -443,7 +442,8 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
if (params->pin)
ttm_bo_pin(&vmw_bo->tbo);
- ttm_bo_unreserve(&vmw_bo->tbo);
+ if (!params->keep_resv)
+ ttm_bo_unreserve(&vmw_bo->tbo);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 43b5439ec9f7..11e330c7c7f5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -56,8 +56,9 @@ struct vmw_bo_params {
u32 domain;
u32 busy_domain;
enum ttm_bo_type bo_type;
- size_t size;
bool pin;
+ bool keep_resv;
+ size_t size;
struct dma_resv *resv;
struct sg_table *sg;
};
@@ -83,7 +84,6 @@ struct vmw_bo {
struct ttm_placement placement;
struct ttm_place places[5];
- struct ttm_place busy_places[5];
/* Protected by reservation */
struct ttm_bo_kmap_obj map;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2c46897876dd..b19a062592b0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -403,7 +403,8 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_kernel,
.size = PAGE_SIZE,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
/*
@@ -415,10 +416,6 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
- BUG_ON(ret != 0);
- vmw_bo_pin_reserved(vbo, true);
-
ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index b9857f37ca1a..ed5015ced392 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -206,6 +206,7 @@ struct drm_gem_object *vmw_prime_import_sg_table(struct drm_device *dev,
.bo_type = ttm_bo_type_sg,
.size = attach->dmabuf->size,
.pin = false,
+ .keep_resv = true,
.resv = attach->dmabuf->resv,
.sg = table,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8db38927729b..800a79e035ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -750,6 +750,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
struct vmw_bo *old_bo = NULL;
struct vmw_bo *new_bo = NULL;
+ struct ww_acquire_ctx ctx;
s32 hotspot_x, hotspot_y;
int ret;
@@ -769,9 +770,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
if (du->cursor_surface)
du->cursor_age = du->cursor_surface->snooper.age;
+ ww_acquire_init(&ctx, &reservation_ww_class);
+
if (!vmw_user_object_is_null(&old_vps->uo)) {
old_bo = vmw_user_object_buffer(&old_vps->uo);
- ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx);
if (ret != 0)
return;
}
@@ -779,9 +782,14 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
if (!vmw_user_object_is_null(&vps->uo)) {
new_bo = vmw_user_object_buffer(&vps->uo);
if (old_bo != new_bo) {
- ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
- if (ret != 0)
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx);
+ if (ret != 0) {
+ if (old_bo) {
+ ttm_bo_unreserve(&old_bo->tbo);
+ ww_acquire_fini(&ctx);
+ }
return;
+ }
} else {
new_bo = NULL;
}
@@ -803,10 +811,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x, hotspot_y);
}
- if (old_bo)
- ttm_bo_unreserve(&old_bo->tbo);
if (new_bo)
ttm_bo_unreserve(&new_bo->tbo);
+ if (old_bo)
+ ttm_bo_unreserve(&old_bo->tbo);
+
+ ww_acquire_fini(&ctx);
du->cursor_x = new_state->crtc_x + du->set_gui_x;
du->cursor_y = new_state->crtc_y + du->set_gui_y;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index a01ca3226d0a..7fb1c88bcc47 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -896,7 +896,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
if (!vmw_shader_id_ok(user_key, shader_type))
@@ -906,10 +907,6 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto out;
- ret = ttm_bo_reserve(&buf->tbo, false, true, NULL);
- if (unlikely(ret != 0))
- goto no_reserve;
-
/* Map and copy shader bytecode. */
ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 621d98b376bb..5553892d7c3e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -572,15 +572,14 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
.busy_domain = domain,
.bo_type = ttm_bo_type_kernel,
.size = bo_size,
- .pin = true
+ .pin = true,
+ .keep_resv = true,
};
ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
- BUG_ON(ret != 0);
ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt =
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 3e0ae40ebbd2..812046f5d741 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -257,10 +257,9 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
* however seems quite fragile not to also restart the GT. Try
* to do that here by triggering a GT reset.
*/
- for_each_gt(__gt, xe, id) {
- xe_gt_reset_async(__gt);
- flush_work(&__gt->reset.worker);
- }
+ for_each_gt(__gt, xe, id)
+ xe_gt_reset(__gt);
+
if (err) {
KUNIT_FAIL(test, "restore kernel err=%pe\n",
ERR_PTR(err));
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 6f9b7a266b41..2698e433c6fa 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -162,8 +162,7 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
- xe_gt_reset_async(gt);
- flush_work(&gt->reset.worker);
+ xe_gt_reset(gt);
kunit_info(test, "mocs_reset_test after reset\n");
if (flags & HAS_GLOBAL_MOCS)
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 82b9b7f82fca..eb2946963420 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -57,6 +57,31 @@ int xe_gt_sanitize_freq(struct xe_gt *gt);
void xe_gt_remove(struct xe_gt *gt);
/**
+ * xe_gt_wait_for_reset - wait for gt's async reset to finalize.
+ * @gt: GT structure
+ * Return:
+ * %true if it waited for the work to finish execution,
+ * %false if there was no scheduled reset or it was done.
+ */
+static inline bool xe_gt_wait_for_reset(struct xe_gt *gt)
+{
+ return flush_work(&gt->reset.worker);
+}
+
+/**
+ * xe_gt_reset - perform synchronous reset
+ * @gt: GT structure
+ * Return:
+ * %true if it waited for the reset to finish,
+ * %false if there was no scheduled reset.
+ */
+static inline bool xe_gt_reset(struct xe_gt *gt)
+{
+ xe_gt_reset_async(gt);
+ return xe_gt_wait_for_reset(gt);
+}
+
+/**
* xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
* first that matches the same reset domain as @class
* @gt: GT structure
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
index b6adfb9f2030..50fffc9ebf62 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
@@ -150,7 +150,7 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr,
xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
gt->ccs_mode = num_engines;
xe_gt_record_user_engines(gt);
- xe_gt_reset_async(gt);
+ xe_gt_reset(gt);
}
mutex_unlock(&xe->drm.filelist_mutex);
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 3e8c351a0eab..e7792858b1e4 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -132,11 +132,9 @@ static int force_reset(struct xe_gt *gt, struct drm_printer *p)
static int force_reset_sync(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset_async(gt);
+ xe_gt_reset(gt);
xe_pm_runtime_put(gt_to_xe(gt));
- flush_work(&gt->reset.worker);
-
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 5013d674e17d..d929a78c26a9 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -550,9 +550,9 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
* Returns true if the caller should steer to the @group/@instance values
* returned. Returns false if the caller need not perform any steering
*/
-static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
- struct xe_reg_mcr reg_mcr,
- u8 *group, u8 *instance)
+bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance)
{
const struct xe_reg reg = to_xe_reg(reg_mcr);
const struct xe_mmio_range *implicit_ranges;
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index c0cd36021c24..bc06520befab 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -26,6 +26,10 @@ void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
u32 value);
+bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance);
+
void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance);
u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 4e746ae98888..dd7f9d0effa9 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -29,6 +29,7 @@
#include "xe_platform_types.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
+#include "xe_gt_mcr.h"
/* Slack of a few additional entries per engine */
#define ADS_REGSET_EXTRA_MAX 8
@@ -701,6 +702,20 @@ static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
.flags = reg.masked ? GUC_REGSET_MASKED : 0,
};
+ if (reg.mcr) {
+ struct xe_reg_mcr mcr_reg = XE_REG_MCR(reg.addr);
+ u8 group, instance;
+
+ bool steer = xe_gt_mcr_get_nonterminated_steering(ads_to_gt(ads), mcr_reg,
+ &group, &instance);
+
+ if (steer) {
+ entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, group);
+ entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, instance);
+ entry.flags |= GUC_REGSET_STEERING_NEEDED;
+ }
+ }
+
xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry),
&entry, sizeof(entry));
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 1557acee3523..eeeb98f66982 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -419,7 +419,7 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
* Bspec: 72161
*/
const u8 mocs_write_idx = gt->mocs.uc_index;
- const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
+ const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
(GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
gt->mocs.wb_index : gt->mocs.uc_index;
u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 5cc0f6f9bc11..678fa40e4cea 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -2198,6 +2198,7 @@ static const struct xe_mmio_range xe2_oa_mux_regs[] = {
{ .start = 0x5194, .end = 0x5194 }, /* SYS_MEM_LAT_MEASURE_MERTF_GRP_3D */
{ .start = 0x8704, .end = 0x8704 }, /* LMEM_LAT_MEASURE_MCFG_GRP */
{ .start = 0xB1BC, .end = 0xB1BC }, /* L3_BANK_LAT_MEASURE_LBCF_GFX */
+ { .start = 0xD0E0, .end = 0xD0F4 }, /* VISACTL */
{ .start = 0xE18C, .end = 0xE18C }, /* SAMPLER_MODE */
{ .start = 0xE590, .end = 0xE590 }, /* TDL_LSC_LAT_MEASURE_TDL_GFX */
{ .start = 0x13000, .end = 0x137FC }, /* PES_0_PESL0 - PES_63_UPPER_PESL3 */
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 2f1c9d97ad21..3db1b9d09c32 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -682,7 +682,7 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
/* _PMD method is optional. */
res = read_domain_devices(resource);
- if (res != -ENODEV)
+ if (res && res != -ENODEV)
return res;
if (resource->caps.flags & POWER_METER_CAN_MEASURE) {
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
index 2a4ec55ddb47..291d91f68646 100644
--- a/drivers/hwmon/drivetemp.c
+++ b/drivers/hwmon/drivetemp.c
@@ -194,7 +194,7 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
scsi_cmd[14] = ata_command;
err = scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
- ATA_SECT_SIZE, HZ, 5, NULL);
+ ATA_SECT_SIZE, 10 * HZ, 5, NULL);
if (err > 0)
err = -EIO;
return err;
diff --git a/drivers/hwmon/ltc2991.c b/drivers/hwmon/ltc2991.c
index 7ca139e4b6af..6d5d4cb846da 100644
--- a/drivers/hwmon/ltc2991.c
+++ b/drivers/hwmon/ltc2991.c
@@ -125,7 +125,7 @@ static int ltc2991_get_curr(struct ltc2991_state *st, u32 reg, int channel,
/* Vx-Vy, 19.075uV/LSB */
*val = DIV_ROUND_CLOSEST(sign_extend32(reg_val, 14) * 19075,
- st->r_sense_uohm[channel]);
+ (s32)st->r_sense_uohm[channel]);
return 0;
}
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
index 1c2cb12071b8..5acbfd7d088d 100644
--- a/drivers/hwmon/tmp513.c
+++ b/drivers/hwmon/tmp513.c
@@ -207,7 +207,8 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
*val = sign_extend32(regval,
reg == TMP51X_SHUNT_CURRENT_RESULT ?
16 - tmp51x_get_pga_shift(data) : 15);
- *val = DIV_ROUND_CLOSEST(*val * 10 * MILLI, data->shunt_uohms);
+ *val = DIV_ROUND_CLOSEST(*val * 10 * (long)MILLI, (long)data->shunt_uohms);
+
break;
case TMP51X_BUS_VOLTAGE_RESULT:
case TMP51X_BUS_VOLTAGE_H_LIMIT:
@@ -223,7 +224,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos,
case TMP51X_BUS_CURRENT_RESULT:
// Current = (ShuntVoltage * CalibrationRegister) / 4096
*val = sign_extend32(regval, 15) * (long)data->curr_lsb_ua;
- *val = DIV_ROUND_CLOSEST(*val, MILLI);
+ *val = DIV_ROUND_CLOSEST(*val, (long)MILLI);
break;
case TMP51X_LOCAL_TEMP_RESULT:
case TMP51X_REMOTE_TEMP_RESULT_1:
@@ -263,7 +264,7 @@ static int tmp51x_set_value(struct tmp51x_data *data, u8 reg, long val)
* The user enter current value and we convert it to
* voltage. 1lsb = 10uV
*/
- val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10 * MILLI);
+ val = DIV_ROUND_CLOSEST(val * (long)data->shunt_uohms, 10 * (long)MILLI);
max_val = U16_MAX >> tmp51x_get_pga_shift(data);
regval = clamp_val(val, -max_val, max_val);
break;
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index a7b77d14ee86..5693a38da7b5 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -130,6 +130,8 @@
#define ID_P_PM_BLOCKED BIT(31)
#define ID_P_MASK GENMASK(31, 27)
+#define ID_SLAVE_NACK BIT(0)
+
enum rcar_i2c_type {
I2C_RCAR_GEN1,
I2C_RCAR_GEN2,
@@ -166,6 +168,7 @@ struct rcar_i2c_priv {
int irq;
struct i2c_client *host_notify_client;
+ u8 slave_flags;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -655,6 +658,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
{
u32 ssr_raw, ssr_filtered;
u8 value;
+ int ret;
ssr_raw = rcar_i2c_read(priv, ICSSR) & 0xff;
ssr_filtered = ssr_raw & rcar_i2c_read(priv, ICSIER);
@@ -670,7 +674,10 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICRXTX, value);
rcar_i2c_write(priv, ICSIER, SDE | SSR | SAR);
} else {
- i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ if (ret)
+ priv->slave_flags |= ID_SLAVE_NACK;
+
rcar_i2c_read(priv, ICRXTX); /* dummy read */
rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
}
@@ -683,18 +690,21 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
if (ssr_filtered & SSR) {
i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */
+ priv->slave_flags &= ~ID_SLAVE_NACK;
rcar_i2c_write(priv, ICSIER, SAR);
rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
}
/* master wants to write to us */
if (ssr_filtered & SDR) {
- int ret;
-
value = rcar_i2c_read(priv, ICRXTX);
ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
- /* Send NACK in case of error */
- rcar_i2c_write(priv, ICSCR, SIE | SDBS | (ret < 0 ? FNA : 0));
+ if (ret)
+ priv->slave_flags |= ID_SLAVE_NACK;
+
+ /* Send NACK in case of error, but it will come 1 byte late :( */
+ rcar_i2c_write(priv, ICSCR, SIE | SDBS |
+ (priv->slave_flags & ID_SLAVE_NACK ? FNA : 0));
rcar_i2c_write(priv, ICSSR, ~SDR & 0xff);
}
diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
index b7c10ced5a43..8fe9ddff8e96 100644
--- a/drivers/i2c/i2c-atr.c
+++ b/drivers/i2c/i2c-atr.c
@@ -412,7 +412,7 @@ static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
dev_name(dev), ret);
break;
- case BUS_NOTIFY_DEL_DEVICE:
+ case BUS_NOTIFY_REMOVED_DEVICE:
i2c_atr_detach_client(client->adapter, client);
break;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 7c810893bfa3..75d30861ffe2 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1562,6 +1562,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
res = device_add(&adap->dev);
if (res) {
pr_err("adapter '%s': can't register device (%d)\n", adap->name, res);
+ put_device(&adap->dev);
goto out_list;
}
diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c
index 0d6fbaa48248..6de4307050dd 100644
--- a/drivers/i2c/i2c-slave-testunit.c
+++ b/drivers/i2c/i2c-slave-testunit.c
@@ -38,6 +38,7 @@ enum testunit_regs {
enum testunit_flags {
TU_FLAG_IN_PROCESS,
+ TU_FLAG_NACK,
};
struct testunit_data {
@@ -90,8 +91,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
switch (event) {
case I2C_SLAVE_WRITE_REQUESTED:
- if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
- return -EBUSY;
+ if (test_bit(TU_FLAG_IN_PROCESS | TU_FLAG_NACK, &tu->flags)) {
+ ret = -EBUSY;
+ break;
+ }
memset(tu->regs, 0, TU_NUM_REGS);
tu->reg_idx = 0;
@@ -99,8 +102,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
break;
case I2C_SLAVE_WRITE_RECEIVED:
- if (test_bit(TU_FLAG_IN_PROCESS, &tu->flags))
- return -EBUSY;
+ if (test_bit(TU_FLAG_IN_PROCESS | TU_FLAG_NACK, &tu->flags)) {
+ ret = -EBUSY;
+ break;
+ }
if (tu->reg_idx < TU_NUM_REGS)
tu->regs[tu->reg_idx] = *val;
@@ -129,6 +134,8 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
* here because we still need them in the workqueue!
*/
tu->reg_idx = 0;
+
+ clear_bit(TU_FLAG_NACK, &tu->flags);
break;
case I2C_SLAVE_READ_PROCESSED:
@@ -151,6 +158,10 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client,
break;
}
+ /* If an error occurred somewhen, we NACK everything until next STOP */
+ if (ret)
+ set_bit(TU_FLAG_NACK, &tu->flags);
+
return ret;
}
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index dce18f763a09..77a740561fd7 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -68,7 +68,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
}
/*
- * Check if there are pinctrl states at all. Note: we cant' use
+ * Check if there are pinctrl states at all. Note: we can't use
* devm_pinctrl_get_select() because we need to distinguish between
* the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state().
*/
@@ -261,7 +261,9 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
pm_runtime_no_callbacks(&pdev->dev);
/* switch to first parent as active master */
- i2c_demux_activate_master(priv, 0);
+ err = i2c_demux_activate_master(priv, 0);
+ if (err)
+ goto err_rollback;
err = device_create_file(&pdev->dev, &dev_attr_available_masters);
if (err)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 92244cfa0464..8c3ec5734f1e 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2045,7 +2045,7 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
if (!is_v4(its_dev->its))
return -EINVAL;
- guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock);
+ guard(raw_spinlock)(&its_dev->event_map.vlpi_lock);
/* Unmap request? */
if (!info)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 79d8cc80693c..76dce0aac246 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1522,7 +1522,7 @@ static int gic_retrigger(struct irq_data *data)
static int gic_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
{
- if (cmd == CPU_PM_EXIT) {
+ if (cmd == CPU_PM_EXIT || cmd == CPU_PM_ENTER_FAILED) {
if (gic_dist_security_disabled())
gic_enable_redist(true);
gic_cpu_sys_reg_enable();
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index bb92fd85e975..0b4312152024 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -186,7 +186,8 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type;
- gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
+ gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
+ IRQCHIP_SKIP_SET_WAKE;
gc->chip_types[0].regs.ack = reg_offs->pend;
gc->chip_types[0].regs.mask = reg_offs->enable;
gc->chip_types[0].regs.type = reg_offs->ctrl;
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index 1eeb0d0156ce..0ee7b6b71f5f 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -35,11 +35,10 @@ void __init irqchip_init(void)
int platform_irqchip_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct device_node *par_np = of_irq_find_parent(np);
+ struct device_node *par_np __free(device_node) = of_irq_find_parent(np);
of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
if (!irq_init_cb) {
- of_node_put(par_np);
return -EINVAL;
}
@@ -55,7 +54,6 @@ int platform_irqchip_probe(struct platform_device *pdev)
* interrupt controller can check for specific domains as necessary.
*/
if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) {
- of_node_put(par_np);
return -EPROBE_DEFER;
}
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 66949d9f0cc5..b6f374ded390 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -89,7 +89,7 @@ void spi_nor_spimem_setup_op(const struct spi_nor *nor,
op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
if (op->dummy.nbytes)
- op->dummy.buswidth = spi_nor_get_protocol_data_nbits(proto);
+ op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
if (op->data.nbytes)
op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 6a716337f48b..268399dfcf22 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -923,7 +923,6 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -945,14 +944,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
phy_write(phy_data->phydev, 0x04, 0x0d01);
phy_write(phy_data->phydev, 0x00, 0x9140);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
-
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
@@ -964,7 +956,6 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -1028,13 +1019,7 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
reg = phy_read(phy_data->phydev, 0x00);
phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- supported);
- linkmode_set_bit_array(phy_gbit_features_array,
- ARRAY_SIZE(phy_gbit_features_array),
- supported);
- linkmode_copy(phy_data->phydev->supported, supported);
+ linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES);
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index aeaa74f03046..b6f844cac80e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4708,7 +4708,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* Changing allocation mode of RX rings.
* TODO: Update when extending xdp_rxq_info to support allocation modes.
*/
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
struct net_device *dev = bp->dev;
@@ -4729,15 +4729,30 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
bp->rx_skb_func = bnxt_rx_page_skb;
}
bp->rx_dir = DMA_BIDIRECTIONAL;
- /* Disable LRO or GRO_HW */
- netdev_update_features(dev);
} else {
dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb;
}
- return 0;
+}
+
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+ __bnxt_set_rx_skb_mode(bp, page_mode);
+
+ if (!page_mode) {
+ int rx, tx;
+
+ bnxt_get_max_rings(bp, &rx, &tx, true);
+ if (rx > 1) {
+ bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features |= NETIF_F_LRO;
+ }
+ }
+
+ /* Update LRO and GRO_HW availability */
+ netdev_update_features(bp->dev);
}
static void bnxt_free_vnic_attributes(struct bnxt *bp)
@@ -16214,7 +16229,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (bp->max_fltr < BNXT_MAX_FLTR)
bp->max_fltr = BNXT_MAX_FLTR;
bnxt_init_l2_fltr_tbl(bp);
- bnxt_set_rx_skb_mode(bp, false);
+ __bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
bnxt_rdma_aux_device_init(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 7df7a2233307..f11ed59203d9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2846,7 +2846,7 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index f88b641533fc..dc51dce209d5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -422,15 +422,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
bnxt_set_rx_skb_mode(bp, true);
xdp_features_set_redirect_target(dev, true);
} else {
- int rx, tx;
-
xdp_features_clear_redirect_target(dev);
bnxt_set_rx_skb_mode(bp, false);
- bnxt_get_max_rings(bp, &rx, &tx, true);
- if (rx > 1) {
- bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
- bp->dev->hw_features |= NETIF_F_LRO;
- }
}
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1b55047c0237..4566848e1d7c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1591,19 +1591,22 @@ static void fec_enet_tx(struct net_device *ndev, int budget)
fec_enet_tx_queue(ndev, i, budget);
}
-static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+static int fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
struct bufdesc *bdp, int index)
{
struct page *new_page;
dma_addr_t phys_addr;
new_page = page_pool_dev_alloc_pages(rxq->page_pool);
- WARN_ON(!new_page);
- rxq->rx_skb_info[index].page = new_page;
+ if (unlikely(!new_page))
+ return -ENOMEM;
+ rxq->rx_skb_info[index].page = new_page;
rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+
+ return 0;
}
static u32
@@ -1698,6 +1701,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
int cpu = smp_processor_id();
struct xdp_buff xdp;
struct page *page;
+ __fec32 cbd_bufaddr;
u32 sub_len = 4;
#if !defined(CONFIG_M5272)
@@ -1766,12 +1770,17 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
index = fec_enet_get_bd_index(bdp, &rxq->bd);
page = rxq->rx_skb_info[index].page;
+ cbd_bufaddr = bdp->cbd_bufaddr;
+ if (fec_enet_update_cbd(rxq, bdp, index)) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+
dma_sync_single_for_cpu(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
+ fec32_to_cpu(cbd_bufaddr),
pkt_len,
DMA_FROM_DEVICE);
prefetch(page_address(page));
- fec_enet_update_cbd(rxq, bdp, index);
if (xdp_prog) {
xdp_buff_clear_frags_flag(&xdp);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index ef14cff9a333..46f9726d9a8a 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1665,6 +1665,7 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
u8 global_scid[2];
u8 phy_scid[2];
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 496d86cbd13f..532024f34ce4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4096,6 +4096,57 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
}
/**
+ * ice_get_phy_lane_number - Get PHY lane number for current adapter
+ * @hw: pointer to the hw struct
+ *
+ * Return: PHY lane number on success, negative error code otherwise.
+ */
+int ice_get_phy_lane_number(struct ice_hw *hw)
+{
+ struct ice_aqc_get_port_options_elem *options;
+ unsigned int lport = 0;
+ unsigned int lane;
+ int err;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) {
+ u8 options_count = ICE_AQC_PORT_OPT_MAX;
+ u8 speed, active_idx, pending_idx;
+ bool active_valid, pending_valid;
+
+ err = ice_aq_get_port_options(hw, options, &options_count, lane,
+ true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (err)
+ goto err;
+
+ if (!active_valid)
+ continue;
+
+ speed = options[active_idx].max_lane_speed;
+ /* If we don't get speed for this lane, it's unoccupied */
+ if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G)
+ continue;
+
+ if (hw->pf_id == lport) {
+ kfree(options);
+ return lane;
+ }
+
+ lport++;
+ }
+
+ /* PHY lane not found */
+ err = -ENXIO;
+err:
+ kfree(options);
+ return err;
+}
+
+/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
* @lport: bits [7:0] = logical port, bit [8] = logical port valid
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 27208a60cece..fe6f88cfd948 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -193,6 +193,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
int
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option);
+int ice_get_phy_lane_number(struct ice_hw *hw);
int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 0ab35607e5d5..89fa3d53d317 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1144,7 +1144,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
- ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
+ ice_ptp_link_change(pf, link_up);
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
@@ -6790,7 +6790,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
- ice_ptp_link_change(pf, pf->hw.pf_id, true);
+ ice_ptp_link_change(pf, true);
}
/* Perform an initial read of the statistics registers now to
@@ -7260,7 +7260,7 @@ int ice_down(struct ice_vsi *vsi)
if (vsi->netdev) {
vlan_err = ice_vsi_del_vlan_zero(vsi);
- ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
+ ice_ptp_link_change(vsi->back, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index a999fface272..efd770dfec44 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1388,10 +1388,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
/**
* ice_ptp_link_change - Reconfigure PTP after link status change
* @pf: Board private structure
- * @port: Port for which the PHY start is set
* @linkup: Link is up or down
*/
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
@@ -1399,14 +1398,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
if (pf->ptp.state != ICE_PTP_READY)
return;
- if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
- return;
-
ptp_port = &pf->ptp.port;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- port *= 2;
- if (WARN_ON_ONCE(ptp_port->port_num != port))
- return;
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
@@ -3164,10 +3156,17 @@ void ice_ptp_init(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
- int err;
+ int lane_num, err;
ptp->state = ICE_PTP_INITIALIZING;
+ lane_num = ice_get_phy_lane_number(hw);
+ if (lane_num < 0) {
+ err = lane_num;
+ goto err_exit;
+ }
+
+ ptp->port.port_num = (u8)lane_num;
ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3188,10 +3187,6 @@ void ice_ptp_init(struct ice_pf *pf)
if (err)
goto err_exit;
- ptp->port.port_num = hw->pf_id;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- ptp->port.port_num = hw->pf_id * 2;
-
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
goto err_exit;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 824e73b677a4..c490d98fd9c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -310,7 +310,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf,
enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
@@ -358,7 +358,7 @@ static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
-static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index d75f0eddd631..a8e57cf05a9c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -131,7 +131,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
.rx_offset = {
.serdes = 0xffffeb27, /* -10.42424 */
.no_fec = 0xffffcccd, /* -25.6 */
- .fc = 0xfffe0014, /* -255.96 */
+ .fc = 0xfffc557b, /* -469.26 */
.sfd = 0x4a4, /* 2.32 */
.bs_ds = 0x32 /* 0.0969697 */
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 518893f23372..02e84f5b1d45 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -901,30 +901,45 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
*/
/**
+ * ice_ptp_get_dest_dev_e825 - get destination PHY for given port number
+ * @hw: pointer to the HW struct
+ * @port: destination port
+ *
+ * Return: destination sideband queue PHY device.
+ */
+static enum ice_sbq_msg_dev ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
+ u8 port)
+{
+ /* On a single complex E825, PHY 0 is always destination device phy_0
+ * and PHY 1 is phy_0_peer.
+ */
+ if (port >= hw->ptp.ports_per_phy)
+ return eth56g_phy_1;
+ else
+ return eth56g_phy_0;
+}
+
+/**
* ice_write_phy_eth56g - Write a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to write to PHY
*/
-static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 val)
+static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_wr,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr),
+ .data = val
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_wr;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = val;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
-
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
@@ -935,41 +950,36 @@ static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
/**
* ice_read_phy_eth56g - Read a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to read from PHY
*/
-static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 *val)
+static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_rd,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr)
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_rd;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = 0;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
- if (err) {
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
- return err;
- }
-
- *val = phy_msg.data;
+ else
+ *val = msg.data;
- return 0;
+ return err;
}
/**
* ice_phy_res_address_eth56g - Calculate a PHY port register address
- * @port: Port number to be written
+ * @hw: pointer to the HW struct
+ * @lane: Lane number to be written
* @res_type: resource type (register/memory)
* @offset: Offset from PHY port register base
* @addr: The result address
@@ -978,17 +988,19 @@ static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
* * %0 - success
* * %EINVAL - invalid port number or resource type
*/
-static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
- u32 offset, u32 *addr)
+static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane,
+ enum eth56g_res_type res_type,
+ u32 offset,
+ u32 *addr)
{
- u8 lane = port % ICE_PORTS_PER_QUAD;
- u8 phy = ICE_GET_QUAD_NUM(port);
-
if (res_type >= NUM_ETH56G_PHY_RES)
return -EINVAL;
- *addr = eth56g_phy_res[res_type].base[phy] +
+ /* Lanes 4..7 are in fact 0..3 on a second PHY */
+ lane %= hw->ptp.ports_per_phy;
+ *addr = eth56g_phy_res[res_type].base[0] +
lane * eth56g_phy_res[res_type].step + offset;
+
return 0;
}
@@ -1008,19 +1020,17 @@ static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_write_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_write_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1039,19 +1049,17 @@ static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 *val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_read_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_read_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1201,6 +1209,56 @@ static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
}
/**
+ * ice_write_quad_ptp_reg_eth56g - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+
+ return ice_write_phy_eth56g(hw, port, addr, val);
+}
+
+/**
+ * ice_read_quad_ptp_reg_eth56g - Read a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to read
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 *val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+
+ return ice_read_phy_eth56g(hw, port, addr, val);
+}
+
+/**
* ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 64bit register
@@ -1919,7 +1977,6 @@ ice_phy_get_speed_eth56g(struct ice_link_status *li)
*/
static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 val;
int err;
@@ -1934,8 +1991,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
case ICE_ETH56G_LNK_SPD_1G:
case ICE_ETH56G_LNK_SPD_2_5G:
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, &val);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1946,8 +2003,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M,
ICE_ETH56G_NOMINAL_TX_THRESH);
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1988,50 +2045,47 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
*/
int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
- u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1);
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr, val, peer_delay;
bool enable, sfd_ena;
- u32 val, peer_delay;
int err;
enable = hw->ptp.phy.eth56g.onestep_ena;
peer_delay = hw->ptp.phy.eth56g.peer_delay;
sfd_ena = hw->ptp.phy.eth56g.sfd_ena;
- /* PHY_PTP_1STEP_CONFIG */
- err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val);
+ addr = PHY_PTP_1STEP_CONFIG;
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
if (enable)
- val |= blk_port;
+ val |= BIT(quad_lane);
else
- val &= ~blk_port;
+ val &= ~BIT(quad_lane);
val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M);
- err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_PTP_1STEP_PEER_DELAY */
+ addr = PHY_PTP_1STEP_PEER_DELAY(quad_lane);
val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay);
if (peer_delay)
val |= PHY_PTP_1STEP_PD_ADD_PD_M;
val |= PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
val &= ~PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_MAC_XIF_MODE */
- err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val);
+ addr = PHY_MAC_XIF_MODE;
+ err = ice_read_mac_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
@@ -2051,7 +2105,7 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) |
FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena);
- return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val);
+ return ice_write_mac_reg_eth56g(hw, port, addr, val);
}
/**
@@ -2093,21 +2147,22 @@ static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs,
bool fc, bool rs,
enum ice_eth56g_link_spd spd)
{
- u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1);
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 bitslip;
int err;
if (!bs || rs)
return 0;
- if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G)
+ if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) {
err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP,
&bitslip);
- else
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_REG_SD_BIT_SLIP(port_offset),
- &bitslip);
+ } else {
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr;
+
+ addr = PHY_REG_SD_BIT_SLIP(quad_lane);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &bitslip);
+ }
if (err)
return 0;
@@ -2667,59 +2722,29 @@ static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port,
}
/**
- * ice_is_muxed_topo - detect breakout 2x50G topology for E825C
- * @hw: pointer to the HW struct
- *
- * Return: true if it's 2x50 breakout topology, false otherwise
- */
-static bool ice_is_muxed_topo(struct ice_hw *hw)
-{
- u8 link_topo;
- bool mux;
- u32 val;
-
- val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
- mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val);
- val = rd32(hw, GLGEN_MAC_LINK_TOPO);
- link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val);
-
- return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS);
-}
-
-/**
- * ice_ptp_init_phy_e825c - initialize PHY parameters
+ * ice_ptp_init_phy_e825 - initialize PHY parameters
* @hw: pointer to the HW struct
*/
-static void ice_ptp_init_phy_e825c(struct ice_hw *hw)
+static void ice_ptp_init_phy_e825(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
struct ice_eth56g_params *params;
- u8 phy;
+ u32 phy_rev;
+ int err;
ptp->phy_model = ICE_PHY_ETH56G;
params = &ptp->phy.eth56g;
params->onestep_ena = false;
params->peer_delay = 0;
params->sfd_ena = false;
- params->phy_addr[0] = eth56g_phy_0;
- params->phy_addr[1] = eth56g_phy_1;
params->num_phys = 2;
ptp->ports_per_phy = 4;
ptp->num_lports = params->num_phys * ptp->ports_per_phy;
ice_sb_access_ena_eth56g(hw, true);
- for (phy = 0; phy < params->num_phys; phy++) {
- u32 phy_rev;
- int err;
-
- err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev);
- if (err || phy_rev != PHY_REVISION_ETH56G) {
- ptp->phy_model = ICE_PHY_UNSUP;
- return;
- }
- }
-
- ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw);
+ err = ice_read_phy_eth56g(hw, hw->pf_id, PHY_REG_REVISION, &phy_rev);
+ if (err || phy_rev != PHY_REVISION_ETH56G)
+ ptp->phy_model = ICE_PHY_UNSUP;
}
/* E822 family functions
@@ -2738,10 +2763,9 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
struct ice_sbq_msg_input *msg, u8 port,
u16 offset)
{
- int phy_port, phy, quadtype;
+ int phy_port, quadtype;
phy_port = port % hw->ptp.ports_per_phy;
- phy = port / hw->ptp.ports_per_phy;
quadtype = ICE_GET_QUAD_NUM(port) %
ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy);
@@ -2753,12 +2777,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
}
- if (phy == 0)
- msg->dest_dev = rmn_0;
- else if (phy == 1)
- msg->dest_dev = rmn_1;
- else
- msg->dest_dev = rmn_2;
+ msg->dest_dev = rmn_0;
}
/**
@@ -5478,7 +5497,7 @@ void ice_ptp_init_hw(struct ice_hw *hw)
else if (ice_is_e810(hw))
ice_ptp_init_phy_e810(ptp);
else if (ice_is_e825c(hw))
- ice_ptp_init_phy_e825c(hw);
+ ice_ptp_init_phy_e825(hw);
else
ptp->phy_model = ICE_PHY_UNSUP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index adb168860711..4a9ef722635f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -850,7 +850,6 @@ struct ice_mbx_data {
struct ice_eth56g_params {
u8 num_phys;
- u8 phy_addr[2];
bool onestep_ena;
bool sfd_ena;
u32 peer_delay;
@@ -881,7 +880,6 @@ struct ice_ptp_hw {
union ice_phy_params phy;
u8 num_lports;
u8 ports_per_phy;
- bool is_2x50g_muxed_topo;
};
/* Port hardware description */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index ca92e518be76..1baf8933a07c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -724,6 +724,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
/* check esn */
if (x->props.flags & XFRM_STATE_ESN)
mlx5e_ipsec_update_esn_state(sa_entry);
+ else
+ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
+ * the first packet sent using a given SA will contain a sequence
+ * number of 1.
+ */
+ sa_entry->esn_state.esn = 1;
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
@@ -768,9 +774,12 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
MLX5_IPSEC_RESCHED);
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
- x->props.mode == XFRM_MODE_TUNNEL)
- xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
- MLX5E_IPSEC_TUNNEL_SA);
+ x->props.mode == XFRM_MODE_TUNNEL) {
+ xa_lock_bh(&ipsec->sadb);
+ __xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
+ MLX5E_IPSEC_TUNNEL_SA);
+ xa_unlock_bh(&ipsec->sadb);
+ }
out:
x->xso.offload_handle = (unsigned long)sa_entry;
@@ -797,7 +806,6 @@ err_xfrm:
static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *old;
@@ -806,12 +814,6 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
WARN_ON(old != sa_entry);
-
- if (attrs->mode == XFRM_MODE_TUNNEL &&
- attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- /* Make sure that no ARP requests are running in parallel */
- flush_workqueue(ipsec->wq);
-
}
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index e51b03d4c717..57861d34d46f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -1718,23 +1718,21 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
- else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
-
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
break;
case XFRM_DEV_OFFLOAD_PACKET:
- if (attrs->reqid)
- setup_fte_reg_c4(spec, attrs->reqid);
+ setup_fte_reg_c4(spec, attrs->reqid);
err = setup_pkt_reformat(ipsec, attrs, &flow_act);
if (err)
goto err_pkt_reformat;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 53cfa39188cb..820debf3fbbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -91,8 +91,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
- struct mlx5_accel_esp_xfrm_attrs *attrs)
+ struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
void *aso_ctx;
aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
@@ -120,8 +121,12 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
* active.
*/
MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
- if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
+ if (!attrs->replay_esn.trigger)
+ MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
+ sa_entry->esn_state.esn);
+ }
if (attrs->lft.hard_packet_limit != XFRM_INF) {
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
@@ -175,7 +180,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
res = &mdev->mlx5e_res.hw_objs;
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
- mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
+ mlx5e_ipsec_packet_setup(obj, res->pdn, sa_entry);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 2eabfcc247c6..0ce999706d41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2709,6 +2709,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
break;
case MLX5_FLOW_NAMESPACE_RDMA_TX:
root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_BYPASS_PRIO;
break;
case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
root_ns = steering->rdma_rx_root_ns;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index ab2717012b79..39e80704b1c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -530,7 +530,7 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
set_tt_map(port_sel, hash_type);
err = mlx5_lag_create_definers(ldev, hash_type, ports);
if (err)
- return err;
+ goto clear_port_sel;
if (port_sel->tunnel) {
err = mlx5_lag_create_inner_ttc_table(ldev);
@@ -549,6 +549,8 @@ destroy_inner:
mlx5_destroy_ttc_table(port_sel->inner.ttc);
destroy_definers:
mlx5_lag_destroy_definers(ldev);
+clear_port_sel:
+ memset(port_sel, 0, sizeof(*port_sel));
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index a96be98be032..b96909fbeb12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -257,6 +257,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
return 0;
esw_err:
+ mlx5_sf_function_id_erase(table, sf);
mlx5_sf_free(table, sf);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
index 1bed75eca97d..740b719e7072 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -382,6 +382,7 @@ err_alloc_bfreg:
bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
{
+ struct mutex *wc_state_lock = &mdev->wc_state_lock;
struct mlx5_core_dev *parent = NULL;
if (!MLX5_CAP_GEN(mdev, bf)) {
@@ -400,32 +401,31 @@ bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
*/
goto out;
- mutex_lock(&mdev->wc_state_lock);
-
- if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
- goto unlock;
-
#ifdef CONFIG_MLX5_SF
- if (mlx5_core_is_sf(mdev))
+ if (mlx5_core_is_sf(mdev)) {
parent = mdev->priv.parent_mdev;
+ wc_state_lock = &parent->wc_state_lock;
+ }
#endif
- if (parent) {
- mutex_lock(&parent->wc_state_lock);
+ mutex_lock(wc_state_lock);
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ goto unlock;
+
+ if (parent) {
mlx5_core_test_wc(parent);
mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
parent->wc_state);
mdev->wc_state = parent->wc_state;
- mutex_unlock(&parent->wc_state_lock);
+ } else {
+ mlx5_core_test_wc(mdev);
}
- mlx5_core_test_wc(mdev);
-
unlock:
- mutex_unlock(&mdev->wc_state_lock);
+ mutex_unlock(wc_state_lock);
out:
mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 2dc0c6ad54be..be95336ce089 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1656,9 +1656,9 @@ static int __init mana_driver_init(void)
static void __exit mana_driver_exit(void)
{
- debugfs_remove(mana_debugfs_root);
-
pci_unregister_driver(&mana_driver);
+
+ debugfs_remove(mana_debugfs_root);
}
module_init(mana_driver_init);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 9d97cd281f18..c03558adda91 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
map_id_full = be64_to_cpu(cbe->map_ptr);
map_id = map_id_full;
- if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ if (size_add(pkt_size, data_size) > INT_MAX ||
+ len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
return -EINVAL;
if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
return -EINVAL;
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 739707a7b40f..8a3959bb2360 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -16,7 +16,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
-#include <linux/hwmon.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
@@ -5347,43 +5346,6 @@ static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
return false;
}
-static umode_t r8169_hwmon_is_visible(const void *drvdata,
- enum hwmon_sensor_types type,
- u32 attr, int channel)
-{
- return 0444;
-}
-
-static int r8169_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
-{
- struct rtl8169_private *tp = dev_get_drvdata(dev);
- int val_raw;
-
- val_raw = phy_read_paged(tp->phydev, 0xbd8, 0x12) & 0x3ff;
- if (val_raw >= 512)
- val_raw -= 1024;
-
- *val = 1000 * val_raw / 2;
-
- return 0;
-}
-
-static const struct hwmon_ops r8169_hwmon_ops = {
- .is_visible = r8169_hwmon_is_visible,
- .read = r8169_hwmon_read,
-};
-
-static const struct hwmon_channel_info * const r8169_hwmon_info[] = {
- HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
- NULL
-};
-
-static const struct hwmon_chip_info r8169_hwmon_chip_info = {
- .ops = &r8169_hwmon_ops,
- .info = r8169_hwmon_info,
-};
-
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct rtl8169_private *tp;
@@ -5563,12 +5525,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* The temperature sensor is available from RTl8125B */
- if (IS_REACHABLE(CONFIG_HWMON) && tp->mac_version >= RTL_GIGA_MAC_VER_63)
- /* ignore errors */
- devm_hwmon_device_register_with_info(&pdev->dev, "nic_temp", tp,
- &r8169_hwmon_chip_info,
- NULL);
rc = register_netdev(dev);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ac0f093f647a..bc395294a32d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2763,6 +2763,7 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .tx_max_frame_size = SZ_2K,
.rx_max_frame_size = SZ_2K,
.rx_buffer_size = SZ_2K +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 52e4e350b734..5cc72a91f220 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -127,15 +127,15 @@ struct cpsw_ale_dev_id {
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
- int idx, idx2;
+ int idx, idx2, index;
u32 hi_val = 0;
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be fetched exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
+ index = 2 - idx2; /* flip */
+ hi_val = ale_entry[index] << ((idx2 * 32) - start);
}
start -= idx * 32;
idx = 2 - idx; /* flip */
@@ -145,16 +145,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
u32 value)
{
- int idx, idx2;
+ int idx, idx2, index;
value &= BITMASK(bits);
idx = start / 32;
idx2 = (start + bits - 1) / 32;
/* Check if bits to be set exceed a word */
if (idx != idx2) {
- idx2 = 2 - idx2; /* flip */
- ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
- ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
+ index = 2 - idx2; /* flip */
+ ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32)));
+ ale_entry[index] |= (value >> ((idx2 * 32) - start));
}
start -= idx * 32;
idx = 2 - idx; /* flip */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 0f4b02fe6f85..ae743991117c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2056,6 +2056,12 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EBUSY;
}
+ if (ecoalesce->rx_max_coalesced_frames > 255 ||
+ ecoalesce->tx_max_coalesced_frames > 255) {
+ NL_SET_ERR_MSG(extack, "frames must be less than 256");
+ return -EINVAL;
+ }
+
if (ecoalesce->rx_max_coalesced_frames)
lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
if (ecoalesce->rx_coalesce_usecs)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 89a996ad8cd0..fbabada7d3ba 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1524,8 +1524,8 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
goto out_encap;
}
- gn = net_generic(dev_net(dev), gtp_net_id);
- list_add_rcu(&gtp->list, &gn->gtp_dev_list);
+ gn = net_generic(src_net, gtp_net_id);
+ list_add(&gtp->list, &gn->gtp_dev_list);
dev->priv_destructor = gtp_destructor;
netdev_dbg(dev, "registered new GTP interface\n");
@@ -1551,7 +1551,7 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head)
hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
- list_del_rcu(&gtp->list);
+ list_del(&gtp->list);
unregister_netdevice_queue(dev, head);
}
@@ -2271,16 +2271,19 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
int i, j, bucket = cb->args[0], skip = cb->args[1];
struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
struct pdp_ctx *pctx;
- struct gtp_net *gn;
-
- gn = net_generic(net, gtp_net_id);
if (cb->args[4])
return 0;
rcu_read_lock();
- list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
+ for_each_netdev_rcu(net, dev) {
+ if (dev->rtnl_link_ops != &gtp_link_ops)
+ continue;
+
+ gtp = netdev_priv(dev);
+
if (last_gtp && last_gtp != gtp)
continue;
else
@@ -2475,9 +2478,14 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
list_for_each_entry(net, net_list, exit_list) {
struct gtp_net *gn = net_generic(net, gtp_net_id);
- struct gtp_dev *gtp;
+ struct gtp_dev *gtp, *gtp_next;
+ struct net_device *dev;
+
+ for_each_netdev(net, dev)
+ if (dev->rtnl_link_ops == &gtp_link_ops)
+ gtp_dellink(dev, dev_to_kill);
- list_for_each_entry(gtp, &gn->gtp_dev_list, list)
+ list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
gtp_dellink(gtp->dev, dev_to_kill);
}
}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 7246a910728d..3059435af596 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -684,7 +684,9 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs,
if (ret < 0)
return ret;
- mask = DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+ val = 0;
+ mask = DW_VR_MII_DIG_CTRL1_2G5_EN | DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+
if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
val = DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c
index 69434fd13f96..68d0d9e92a22 100644
--- a/drivers/net/pfcp.c
+++ b/drivers/net/pfcp.c
@@ -206,8 +206,8 @@ static int pfcp_newlink(struct net *net, struct net_device *dev,
goto exit_del_pfcp_sock;
}
- pn = net_generic(dev_net(dev), pfcp_net_id);
- list_add_rcu(&pfcp->list, &pn->pfcp_dev_list);
+ pn = net_generic(net, pfcp_net_id);
+ list_add(&pfcp->list, &pn->pfcp_dev_list);
netdev_dbg(dev, "registered new PFCP interface\n");
@@ -224,7 +224,7 @@ static void pfcp_dellink(struct net_device *dev, struct list_head *head)
{
struct pfcp_dev *pfcp = netdev_priv(dev);
- list_del_rcu(&pfcp->list);
+ list_del(&pfcp->list);
unregister_netdevice_queue(dev, head);
}
@@ -247,11 +247,16 @@ static int __net_init pfcp_net_init(struct net *net)
static void __net_exit pfcp_net_exit(struct net *net)
{
struct pfcp_net *pn = net_generic(net, pfcp_net_id);
- struct pfcp_dev *pfcp;
+ struct pfcp_dev *pfcp, *pfcp_next;
+ struct net_device *dev;
LIST_HEAD(list);
rtnl_lock();
- list_for_each_entry(pfcp, &pn->pfcp_dev_list, list)
+ for_each_netdev(net, dev)
+ if (dev->rtnl_link_ops == &pfcp_link_ops)
+ pfcp_dellink(dev, &list);
+
+ list_for_each_entry_safe(pfcp, pfcp_next, &pn->pfcp_dev_list, list)
pfcp_dellink(pfcp->dev, &list);
unregister_netdevice_many(&list);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index c1f1c810e810..8770004d9b08 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -340,6 +340,15 @@ static int of_bus_default_flags_match(struct device_node *np)
return of_property_present(np, "#address-cells") && (of_bus_n_addr_cells(np) == 3);
}
+static int of_bus_default_match(struct device_node *np)
+{
+ /*
+ * Check for presence first since of_bus_n_addr_cells() will warn when
+ * walking parent nodes.
+ */
+ return of_property_present(np, "#address-cells");
+}
+
/*
* Array of bus specific translators
*/
@@ -384,7 +393,7 @@ static const struct of_bus of_busses[] = {
{
.name = "default",
.addresses = "reg",
- .match = NULL,
+ .match = of_bus_default_match,
.count_cells = of_bus_default_count_cells,
.map = of_bus_default_map,
.translate = of_bus_default_translate,
@@ -399,7 +408,6 @@ static const struct of_bus *of_match_bus(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(of_busses); i++)
if (!of_busses[i].match || of_busses[i].match(np))
return &of_busses[i];
- BUG();
return NULL;
}
@@ -521,6 +529,8 @@ static u64 __of_translate_address(struct device_node *node,
if (parent == NULL)
return OF_BAD_ADDR;
bus = of_match_bus(parent);
+ if (!bus)
+ return OF_BAD_ADDR;
/* Count address cells & copy address locally */
bus->count_cells(dev, &na, &ns);
@@ -564,6 +574,8 @@ static u64 __of_translate_address(struct device_node *node,
/* Get new parent bus and counts */
pbus = of_match_bus(parent);
+ if (!pbus)
+ return OF_BAD_ADDR;
pbus->count_cells(dev, &pna, &pns);
if (!OF_CHECK_COUNTS(pna, pns)) {
pr_err("Bad cell count for %pOF\n", dev);
@@ -703,7 +715,7 @@ const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
/* match the parent's bus type */
bus = of_match_bus(parent);
- if (strcmp(bus->name, "pci") && (bar_no >= 0))
+ if (!bus || (strcmp(bus->name, "pci") && (bar_no >= 0)))
return NULL;
/* Get "reg" or "assigned-addresses" property */
diff --git a/drivers/of/unittest-data/tests-platform.dtsi b/drivers/of/unittest-data/tests-platform.dtsi
index fa39611071b3..cd310b26b50c 100644
--- a/drivers/of/unittest-data/tests-platform.dtsi
+++ b/drivers/of/unittest-data/tests-platform.dtsi
@@ -34,5 +34,18 @@
};
};
};
+
+ platform-tests-2 {
+ // No #address-cells or #size-cells
+ node {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ test-device@100 {
+ compatible = "test-sub-device";
+ reg = <0x100 1>;
+ };
+ };
+ };
};
};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 438fd70fa995..0fa0c0fd9a6a 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1380,6 +1380,7 @@ static void __init of_unittest_bus_3cell_ranges(void)
static void __init of_unittest_reg(void)
{
struct device_node *np;
+ struct resource res;
int ret;
u64 addr, size;
@@ -1396,6 +1397,19 @@ static void __init of_unittest_reg(void)
np, addr);
of_node_put(np);
+
+ np = of_find_node_by_path("/testcase-data/platform-tests-2/node/test-device@100");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ unittest(ret == -EINVAL, "of_address_to_resource(%pOF) expected error on untranslatable address\n",
+ np);
+
+ of_node_put(np);
+
}
struct of_unittest_expected_res {
diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c
index b59cacc740fa..0a5e7efbce2c 100644
--- a/drivers/pci/pcie/bwctrl.c
+++ b/drivers/pci/pcie/bwctrl.c
@@ -303,14 +303,17 @@ static int pcie_bwnotif_probe(struct pcie_device *srv)
if (ret)
return ret;
- ret = devm_request_irq(&srv->device, srv->irq, pcie_bwnotif_irq,
- IRQF_SHARED, "PCIe bwctrl", srv);
- if (ret)
- return ret;
-
scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
- port->link_bwctrl = no_free_ptr(data);
+ port->link_bwctrl = data;
+
+ ret = request_irq(srv->irq, pcie_bwnotif_irq,
+ IRQF_SHARED, "PCIe bwctrl", srv);
+ if (ret) {
+ port->link_bwctrl = NULL;
+ return ret;
+ }
+
pcie_bwnotif_enable(srv);
}
}
@@ -331,11 +334,15 @@ static void pcie_bwnotif_remove(struct pcie_device *srv)
pcie_cooling_device_unregister(data->cdev);
- pcie_bwnotif_disable(srv->port);
+ scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
+ scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
+ pcie_bwnotif_disable(srv->port);
+
+ free_irq(srv->irq, srv);
- scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem)
- scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem)
srv->port->link_bwctrl = NULL;
+ }
+ }
}
static int pcie_bwnotif_suspend(struct pcie_device *srv)
diff --git a/drivers/platform/x86/dell/dell-uart-backlight.c b/drivers/platform/x86/dell/dell-uart-backlight.c
index 6e5dc7e3674f..bcc5c0f3bb4d 100644
--- a/drivers/platform/x86/dell/dell-uart-backlight.c
+++ b/drivers/platform/x86/dell/dell-uart-backlight.c
@@ -283,6 +283,9 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
init_waitqueue_head(&dell_bl->wait_queue);
dell_bl->dev = dev;
+ serdev_device_set_drvdata(serdev, dell_bl);
+ serdev_device_set_client_ops(serdev, &dell_uart_bl_serdev_ops);
+
ret = devm_serdev_device_open(dev, serdev);
if (ret)
return dev_err_probe(dev, ret, "opening UART device\n");
@@ -290,8 +293,6 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
/* 9600 bps, no flow control, these are the default but set them to be sure */
serdev_device_set_baudrate(serdev, 9600);
serdev_device_set_flow_control(serdev, false);
- serdev_device_set_drvdata(serdev, dell_bl);
- serdev_device_set_client_ops(serdev, &dell_uart_bl_serdev_ops);
get_version[0] = DELL_SOF(GET_CMD_LEN);
get_version[1] = CMD_GET_VERSION;
diff --git a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
index d2699ca24f34..a96b215cd2c5 100644
--- a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
+++ b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
@@ -199,14 +199,15 @@ static int yt2_1380_fc_serdev_probe(struct serdev_device *serdev)
if (ret)
return ret;
+ serdev_device_set_drvdata(serdev, fc);
+ serdev_device_set_client_ops(serdev, &yt2_1380_fc_serdev_ops);
+
ret = devm_serdev_device_open(dev, serdev);
if (ret)
return dev_err_probe(dev, ret, "opening UART device\n");
serdev_device_set_baudrate(serdev, 600);
serdev_device_set_flow_control(serdev, false);
- serdev_device_set_drvdata(serdev, fc);
- serdev_device_set_client_ops(serdev, &yt2_1380_fc_serdev_ops);
ret = devm_extcon_register_notifier_all(dev, fc->extcon, &fc->nb);
if (ret)
diff --git a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
index e3a0f64c144c..3668fe66b22c 100644
--- a/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx8mp-blk-ctrl.c
@@ -770,7 +770,7 @@ static void imx8mp_blk_ctrl_remove(struct platform_device *pdev)
of_genpd_del_provider(pdev->dev.of_node);
- for (i = 0; bc->onecell_data.num_domains; i++) {
+ for (i = 0; i < bc->onecell_data.num_domains; i++) {
struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i];
pm_genpd_remove(&domain->genpd);
diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
index 12d0535a874b..8a7f167e405e 100644
--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
@@ -176,6 +176,7 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
vdev->dev.parent = dev;
priv->vdev = vdev;
+ device_set_of_node_from_dev(&vdev->dev, dev);
error = platform_device_add(vdev);
if (error)
goto err_device_put;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index adee6f60c966..0cc6a0f77b09 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -210,6 +210,9 @@ static int scsi_check_passthrough(struct scsi_cmnd *scmd,
struct scsi_sense_hdr sshdr;
enum sam_status status;
+ if (!scmd->result)
+ return 0;
+
if (!failures)
return 0;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fde7de3b1e55..9b47f91c5b97 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -4104,7 +4104,7 @@ iscsi_if_rx(struct sk_buff *skb)
}
do {
/*
- * special case for GET_STATS:
+ * special case for GET_STATS, GET_CHAP and GET_HOST_STATS:
* on success - sending reply and stats from
* inside of if_recv_msg(),
* on error - fall through.
@@ -4113,6 +4113,8 @@ iscsi_if_rx(struct sk_buff *skb)
break;
if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
break;
+ if (ev->type == ISCSI_UEVENT_GET_HOST_STATS && !err)
+ break;
err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
ev, sizeof(*ev));
if (err == -EAGAIN && --retries < 0) {
diff --git a/drivers/virt/vboxguest/Kconfig b/drivers/virt/vboxguest/Kconfig
index cc329887bfae..11b153e7454e 100644
--- a/drivers/virt/vboxguest/Kconfig
+++ b/drivers/virt/vboxguest/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config VBOXGUEST
tristate "Virtual Box Guest integration support"
- depends on X86 && PCI && INPUT
+ depends on (ARM64 || X86) && PCI && INPUT
help
This is a driver for the Virtual Box Guest PCI device used in
Virtual Box virtual machines. Enabling this driver will add
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 3bc9ce6c575e..32619d146cbc 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -81,13 +81,13 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
if (pos + total >= i_size_read(rreq->inode))
__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
-
- if (!err) {
+ if (!err && total) {
subreq->transferred += total;
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
}
- netfs_read_subreq_terminated(subreq, err, false);
+ subreq->error = err;
+ netfs_read_subreq_terminated(subreq);
}
/**
diff --git a/fs/afs/Makefile b/fs/afs/Makefile
index dcdc0f1bb76f..5efd7e13b304 100644
--- a/fs/afs/Makefile
+++ b/fs/afs/Makefile
@@ -11,6 +11,7 @@ kafs-y := \
cmservice.o \
dir.o \
dir_edit.o \
+ dir_search.o \
dir_silly.o \
dynroot.o \
file.o \
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 99b2c8172021..69e1dd55b160 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -41,7 +41,7 @@ static void afs_volume_init_callback(struct afs_volume *volume)
list_for_each_entry(vnode, &volume->open_mmaps, cb_mmap_link) {
if (vnode->cb_v_check != atomic_read(&volume->cb_v_break)) {
- atomic64_set(&vnode->cb_expires_at, AFS_NO_CB_PROMISE);
+ afs_clear_cb_promise(vnode, afs_cb_promise_clear_vol_init_cb);
queue_work(system_unbound_wq, &vnode->cb_work);
}
}
@@ -79,7 +79,7 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
_enter("");
clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
- if (atomic64_xchg(&vnode->cb_expires_at, AFS_NO_CB_PROMISE) != AFS_NO_CB_PROMISE) {
+ if (afs_clear_cb_promise(vnode, afs_cb_promise_clear_cb_break)) {
vnode->cb_break++;
vnode->cb_v_check = atomic_read(&vnode->volume->cb_v_break);
afs_clear_permits(vnode);
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index caa09875f520..cee42646736c 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -146,18 +146,20 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
return ERR_PTR(-ENOMEM);
}
- cell->name = kmalloc(namelen + 1, GFP_KERNEL);
+ cell->name = kmalloc(1 + namelen + 1, GFP_KERNEL);
if (!cell->name) {
kfree(cell);
return ERR_PTR(-ENOMEM);
}
- cell->net = net;
+ cell->name[0] = '.';
+ cell->name++;
cell->name_len = namelen;
for (i = 0; i < namelen; i++)
cell->name[i] = tolower(name[i]);
cell->name[i] = 0;
+ cell->net = net;
refcount_set(&cell->ref, 1);
atomic_set(&cell->active, 0);
INIT_WORK(&cell->manager, afs_manage_cell_work);
@@ -211,7 +213,7 @@ parse_failed:
if (ret == -EINVAL)
printk(KERN_ERR "kAFS: bad VL server IP address\n");
error:
- kfree(cell->name);
+ kfree(cell->name - 1);
kfree(cell);
_leave(" = %d", ret);
return ERR_PTR(ret);
@@ -365,6 +367,14 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
len = cp - rootcell;
}
+ if (len == 0 || !rootcell[0] || rootcell[0] == '.' || rootcell[len - 1] == '.')
+ return -EINVAL;
+ if (memchr(rootcell, '/', len))
+ return -EINVAL;
+ cp = strstr(rootcell, "..");
+ if (cp && cp < rootcell + len)
+ return -EINVAL;
+
/* allocate a cell record for the root cell */
new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
if (IS_ERR(new_root)) {
@@ -502,7 +512,7 @@ static void afs_cell_destroy(struct rcu_head *rcu)
afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias);
key_put(cell->anonymous_key);
- kfree(cell->name);
+ kfree(cell->name - 1);
kfree(cell);
afs_dec_cells_outstanding(net);
@@ -710,7 +720,8 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
afs_proc_cell_remove(cell);
mutex_lock(&net->proc_cells_lock);
- hlist_del_rcu(&cell->proc_link);
+ if (!hlist_unhashed(&cell->proc_link))
+ hlist_del_rcu(&cell->proc_link);
afs_dynroot_rmdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index ada363af5aab..a843c36fc471 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -13,6 +13,7 @@
#include <linux/ctype.h>
#include <linux/sched.h>
#include <linux/iversion.h>
+#include <linux/iov_iter.h>
#include <linux/task_io_accounting_ops.h>
#include "internal.h"
#include "afs_fs.h"
@@ -42,15 +43,6 @@ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir,
static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags);
-static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags);
-static void afs_dir_invalidate_folio(struct folio *folio, size_t offset,
- size_t length);
-
-static bool afs_dir_dirty_folio(struct address_space *mapping,
- struct folio *folio)
-{
- BUG(); /* This should never happen. */
-}
const struct file_operations afs_dir_file_operations = {
.open = afs_dir_open,
@@ -75,10 +67,7 @@ const struct inode_operations afs_dir_inode_operations = {
};
const struct address_space_operations afs_dir_aops = {
- .dirty_folio = afs_dir_dirty_folio,
- .release_folio = afs_dir_release_folio,
- .invalidate_folio = afs_dir_invalidate_folio,
- .migrate_folio = filemap_migrate_folio,
+ .writepages = afs_single_writepages,
};
const struct dentry_operations afs_fs_dentry_operations = {
@@ -99,152 +88,124 @@ struct afs_lookup_one_cookie {
struct afs_lookup_cookie {
struct dir_context ctx;
struct qstr name;
- bool found;
- bool one_only;
unsigned short nr_fids;
struct afs_fid fids[50];
};
+static void afs_dir_unuse_cookie(struct afs_vnode *dvnode, int ret)
+{
+ if (ret == 0) {
+ struct afs_vnode_cache_aux aux;
+ loff_t i_size = i_size_read(&dvnode->netfs.inode);
+
+ afs_set_cache_aux(dvnode, &aux);
+ fscache_unuse_cookie(afs_vnode_cache(dvnode), &aux, &i_size);
+ } else {
+ fscache_unuse_cookie(afs_vnode_cache(dvnode), NULL, NULL);
+ }
+}
+
/*
- * Drop the refs that we're holding on the folios we were reading into. We've
- * got refs on the first nr_pages pages.
+ * Iterate through a kmapped directory segment, dumping a summary of
+ * the contents.
*/
-static void afs_dir_read_cleanup(struct afs_read *req)
+static size_t afs_dir_dump_step(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2)
{
- struct address_space *mapping = req->vnode->netfs.inode.i_mapping;
- struct folio *folio;
- pgoff_t last = req->nr_pages - 1;
+ do {
+ union afs_xdr_dir_block *block = iter_base;
- XA_STATE(xas, &mapping->i_pages, 0);
+ pr_warn("[%05zx] %32phN\n", progress, block);
+ iter_base += AFS_DIR_BLOCK_SIZE;
+ progress += AFS_DIR_BLOCK_SIZE;
+ len -= AFS_DIR_BLOCK_SIZE;
+ } while (len > 0);
- if (unlikely(!req->nr_pages))
- return;
+ return len;
+}
- rcu_read_lock();
- xas_for_each(&xas, folio, last) {
- if (xas_retry(&xas, folio))
- continue;
- BUG_ON(xa_is_value(folio));
- ASSERTCMP(folio->mapping, ==, mapping);
+/*
+ * Dump the contents of a directory.
+ */
+static void afs_dir_dump(struct afs_vnode *dvnode)
+{
+ struct iov_iter iter;
+ unsigned long long i_size = i_size_read(&dvnode->netfs.inode);
- folio_put(folio);
- }
+ pr_warn("DIR %llx:%llx is=%llx\n",
+ dvnode->fid.vid, dvnode->fid.vnode, i_size);
- rcu_read_unlock();
+ iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size);
+ iterate_folioq(&iter, iov_iter_count(&iter), NULL, NULL,
+ afs_dir_dump_step);
}
/*
* check that a directory folio is valid
*/
-static bool afs_dir_check_folio(struct afs_vnode *dvnode, struct folio *folio,
- loff_t i_size)
+static bool afs_dir_check_block(struct afs_vnode *dvnode, size_t progress,
+ union afs_xdr_dir_block *block)
{
- union afs_xdr_dir_block *block;
- size_t offset, size;
- loff_t pos;
+ if (block->hdr.magic != AFS_DIR_MAGIC) {
+ pr_warn("%s(%lx): [%zx] bad magic %04x\n",
+ __func__, dvnode->netfs.inode.i_ino,
+ progress, ntohs(block->hdr.magic));
+ trace_afs_dir_check_failed(dvnode, progress);
+ trace_afs_file_error(dvnode, -EIO, afs_file_error_dir_bad_magic);
+ return false;
+ }
- /* Determine how many magic numbers there should be in this folio, but
- * we must take care because the directory may change size under us.
+ /* Make sure each block is NUL terminated so we can reasonably
+ * use string functions on it. The filenames in the folio
+ * *should* be NUL-terminated anyway.
*/
- pos = folio_pos(folio);
- if (i_size <= pos)
- goto checked;
-
- size = min_t(loff_t, folio_size(folio), i_size - pos);
- for (offset = 0; offset < size; offset += sizeof(*block)) {
- block = kmap_local_folio(folio, offset);
- if (block->hdr.magic != AFS_DIR_MAGIC) {
- printk("kAFS: %s(%lx): [%llx] bad magic %zx/%zx is %04hx\n",
- __func__, dvnode->netfs.inode.i_ino,
- pos, offset, size, ntohs(block->hdr.magic));
- trace_afs_dir_check_failed(dvnode, pos + offset, i_size);
- kunmap_local(block);
- trace_afs_file_error(dvnode, -EIO, afs_file_error_dir_bad_magic);
- goto error;
- }
-
- /* Make sure each block is NUL terminated so we can reasonably
- * use string functions on it. The filenames in the folio
- * *should* be NUL-terminated anyway.
- */
- ((u8 *)block)[AFS_DIR_BLOCK_SIZE - 1] = 0;
-
- kunmap_local(block);
- }
-checked:
+ ((u8 *)block)[AFS_DIR_BLOCK_SIZE - 1] = 0;
afs_stat_v(dvnode, n_read_dir);
return true;
-
-error:
- return false;
}
/*
- * Dump the contents of a directory.
+ * Iterate through a kmapped directory segment, checking the content.
*/
-static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
+static size_t afs_dir_check_step(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2)
{
- union afs_xdr_dir_block *block;
- struct address_space *mapping = dvnode->netfs.inode.i_mapping;
- struct folio *folio;
- pgoff_t last = req->nr_pages - 1;
- size_t offset, size;
-
- XA_STATE(xas, &mapping->i_pages, 0);
-
- pr_warn("DIR %llx:%llx f=%llx l=%llx al=%llx\n",
- dvnode->fid.vid, dvnode->fid.vnode,
- req->file_size, req->len, req->actual_len);
- pr_warn("DIR %llx %x %zx %zx\n",
- req->pos, req->nr_pages,
- req->iter->iov_offset, iov_iter_count(req->iter));
-
- xas_for_each(&xas, folio, last) {
- if (xas_retry(&xas, folio))
- continue;
+ struct afs_vnode *dvnode = priv;
- BUG_ON(folio->mapping != mapping);
+ if (WARN_ON_ONCE(progress % AFS_DIR_BLOCK_SIZE ||
+ len % AFS_DIR_BLOCK_SIZE))
+ return len;
- size = min_t(loff_t, folio_size(folio), req->actual_len - folio_pos(folio));
- for (offset = 0; offset < size; offset += sizeof(*block)) {
- block = kmap_local_folio(folio, offset);
- pr_warn("[%02lx] %32phN\n", folio->index + offset, block);
- kunmap_local(block);
- }
- }
+ do {
+ if (!afs_dir_check_block(dvnode, progress, iter_base))
+ break;
+ iter_base += AFS_DIR_BLOCK_SIZE;
+ len -= AFS_DIR_BLOCK_SIZE;
+ } while (len > 0);
+
+ return len;
}
/*
- * Check all the blocks in a directory. All the folios are held pinned.
+ * Check all the blocks in a directory.
*/
-static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
+static int afs_dir_check(struct afs_vnode *dvnode)
{
- struct address_space *mapping = dvnode->netfs.inode.i_mapping;
- struct folio *folio;
- pgoff_t last = req->nr_pages - 1;
- int ret = 0;
+ struct iov_iter iter;
+ unsigned long long i_size = i_size_read(&dvnode->netfs.inode);
+ size_t checked = 0;
- XA_STATE(xas, &mapping->i_pages, 0);
-
- if (unlikely(!req->nr_pages))
+ if (unlikely(!i_size))
return 0;
- rcu_read_lock();
- xas_for_each(&xas, folio, last) {
- if (xas_retry(&xas, folio))
- continue;
-
- BUG_ON(folio->mapping != mapping);
-
- if (!afs_dir_check_folio(dvnode, folio, req->actual_len)) {
- afs_dir_dump(dvnode, req);
- ret = -EIO;
- break;
- }
+ iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size);
+ checked = iterate_folioq(&iter, iov_iter_count(&iter), dvnode, NULL,
+ afs_dir_check_step);
+ if (checked != i_size) {
+ afs_dir_dump(dvnode);
+ return -EIO;
}
-
- rcu_read_unlock();
- return ret;
+ return 0;
}
/*
@@ -264,134 +225,140 @@ static int afs_dir_open(struct inode *inode, struct file *file)
}
/*
- * Read the directory into the pagecache in one go, scrubbing the previous
- * contents. The list of folios is returned, pinning them so that they don't
- * get reclaimed during the iteration.
+ * Read a file in a single download.
*/
-static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
- __acquires(&dvnode->validate_lock)
+static ssize_t afs_do_read_single(struct afs_vnode *dvnode, struct file *file)
{
- struct address_space *mapping = dvnode->netfs.inode.i_mapping;
- struct afs_read *req;
+ struct iov_iter iter;
+ ssize_t ret;
loff_t i_size;
- int nr_pages, i;
- int ret;
- loff_t remote_size = 0;
-
- _enter("");
+ bool is_dir = (S_ISDIR(dvnode->netfs.inode.i_mode) &&
+ !test_bit(AFS_VNODE_MOUNTPOINT, &dvnode->flags));
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return ERR_PTR(-ENOMEM);
-
- refcount_set(&req->usage, 1);
- req->vnode = dvnode;
- req->key = key_get(key);
- req->cleanup = afs_dir_read_cleanup;
-
-expand:
i_size = i_size_read(&dvnode->netfs.inode);
- if (i_size < remote_size)
- i_size = remote_size;
- if (i_size < 2048) {
- ret = afs_bad(dvnode, afs_file_error_dir_small);
- goto error;
- }
- if (i_size > 2048 * 1024) {
- trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big);
- ret = -EFBIG;
- goto error;
+ if (is_dir) {
+ if (i_size < AFS_DIR_BLOCK_SIZE)
+ return afs_bad(dvnode, afs_file_error_dir_small);
+ if (i_size > AFS_DIR_BLOCK_SIZE * 1024) {
+ trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big);
+ return -EFBIG;
+ }
+ } else {
+ if (i_size > AFSPATHMAX) {
+ trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big);
+ return -EFBIG;
+ }
}
- _enter("%llu", i_size);
+ /* Expand the storage. TODO: Shrink the storage too. */
+ if (dvnode->directory_size < i_size) {
+ size_t cur_size = dvnode->directory_size;
- nr_pages = (i_size + PAGE_SIZE - 1) / PAGE_SIZE;
+ ret = netfs_alloc_folioq_buffer(NULL,
+ &dvnode->directory, &cur_size, i_size,
+ mapping_gfp_mask(dvnode->netfs.inode.i_mapping));
+ dvnode->directory_size = cur_size;
+ if (ret < 0)
+ return ret;
+ }
- req->actual_len = i_size; /* May change */
- req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */
- req->data_version = dvnode->status.data_version; /* May change */
- iov_iter_xarray(&req->def_iter, ITER_DEST, &dvnode->netfs.inode.i_mapping->i_pages,
- 0, i_size);
- req->iter = &req->def_iter;
+ iov_iter_folio_queue(&iter, ITER_DEST, dvnode->directory, 0, 0, dvnode->directory_size);
- /* Fill in any gaps that we might find where the memory reclaimer has
- * been at work and pin all the folios. If there are any gaps, we will
- * need to reread the entire directory contents.
+ /* AFS requires us to perform the read of a directory synchronously as
+ * a single unit to avoid issues with the directory contents being
+ * changed between reads.
*/
- i = req->nr_pages;
- while (i < nr_pages) {
- struct folio *folio;
-
- folio = filemap_get_folio(mapping, i);
- if (IS_ERR(folio)) {
- if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- afs_stat_v(dvnode, n_inval);
- folio = __filemap_get_folio(mapping,
- i, FGP_LOCK | FGP_CREAT,
- mapping->gfp_mask);
- if (IS_ERR(folio)) {
- ret = PTR_ERR(folio);
- goto error;
- }
- folio_attach_private(folio, (void *)1);
- folio_unlock(folio);
+ ret = netfs_read_single(&dvnode->netfs.inode, file, &iter);
+ if (ret >= 0) {
+ i_size = i_size_read(&dvnode->netfs.inode);
+ if (i_size > ret) {
+ /* The content has grown, so we need to expand the
+ * buffer.
+ */
+ ret = -ESTALE;
+ } else if (is_dir) {
+ int ret2 = afs_dir_check(dvnode);
+
+ if (ret2 < 0)
+ ret = ret2;
+ } else if (i_size < folioq_folio_size(dvnode->directory, 0)) {
+ /* NUL-terminate a symlink. */
+ char *symlink = kmap_local_folio(folioq_folio(dvnode->directory, 0), 0);
+
+ symlink[i_size] = 0;
+ kunmap_local(symlink);
}
-
- req->nr_pages += folio_nr_pages(folio);
- i += folio_nr_pages(folio);
}
- /* If we're going to reload, we need to lock all the pages to prevent
- * races.
- */
+ return ret;
+}
+
+ssize_t afs_read_single(struct afs_vnode *dvnode, struct file *file)
+{
+ ssize_t ret;
+
+ fscache_use_cookie(afs_vnode_cache(dvnode), false);
+ ret = afs_do_read_single(dvnode, file);
+ fscache_unuse_cookie(afs_vnode_cache(dvnode), NULL, NULL);
+ return ret;
+}
+
+/*
+ * Read the directory into a folio_queue buffer in one go, scrubbing the
+ * previous contents. We return -ESTALE if the caller needs to call us again.
+ */
+ssize_t afs_read_dir(struct afs_vnode *dvnode, struct file *file)
+ __acquires(&dvnode->validate_lock)
+{
+ ssize_t ret;
+ loff_t i_size;
+
+ i_size = i_size_read(&dvnode->netfs.inode);
+
ret = -ERESTARTSYS;
if (down_read_killable(&dvnode->validate_lock) < 0)
goto error;
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- goto success;
+ /* We only need to reread the data if it became invalid - or if we
+ * haven't read it yet.
+ */
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+ test_bit(AFS_VNODE_DIR_READ, &dvnode->flags)) {
+ ret = i_size;
+ goto valid;
+ }
up_read(&dvnode->validate_lock);
if (down_write_killable(&dvnode->validate_lock) < 0)
goto error;
- if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
- trace_afs_reload_dir(dvnode);
- ret = afs_fetch_data(dvnode, req);
- if (ret < 0)
- goto error_unlock;
-
- task_io_account_read(PAGE_SIZE * req->nr_pages);
-
- if (req->len < req->file_size) {
- /* The content has grown, so we need to expand the
- * buffer.
- */
- up_write(&dvnode->validate_lock);
- remote_size = req->file_size;
- goto expand;
- }
+ if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+ afs_invalidate_cache(dvnode, 0);
- /* Validate the data we just read. */
- ret = afs_dir_check(dvnode, req);
+ if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) ||
+ !test_bit(AFS_VNODE_DIR_READ, &dvnode->flags)) {
+ trace_afs_reload_dir(dvnode);
+ ret = afs_read_single(dvnode, file);
if (ret < 0)
goto error_unlock;
// TODO: Trim excess pages
set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags);
+ set_bit(AFS_VNODE_DIR_READ, &dvnode->flags);
+ } else {
+ ret = i_size;
}
downgrade_write(&dvnode->validate_lock);
-success:
- return req;
+valid:
+ return ret;
error_unlock:
up_write(&dvnode->validate_lock);
error:
- afs_put_read(req);
- _leave(" = %d", ret);
- return ERR_PTR(ret);
+ _leave(" = %zd", ret);
+ return ret;
}
/*
@@ -399,79 +366,69 @@ error:
*/
static int afs_dir_iterate_block(struct afs_vnode *dvnode,
struct dir_context *ctx,
- union afs_xdr_dir_block *block,
- unsigned blkoff)
+ union afs_xdr_dir_block *block)
{
union afs_xdr_dirent *dire;
- unsigned offset, next, curr, nr_slots;
+ unsigned int blknum, base, hdr, pos, next, nr_slots;
size_t nlen;
int tmp;
- _enter("%llx,%x", ctx->pos, blkoff);
+ blknum = ctx->pos / AFS_DIR_BLOCK_SIZE;
+ base = blknum * AFS_DIR_SLOTS_PER_BLOCK;
+ hdr = (blknum == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS);
+ pos = DIV_ROUND_UP(ctx->pos, AFS_DIR_DIRENT_SIZE) - base;
- curr = (ctx->pos - blkoff) / sizeof(union afs_xdr_dirent);
+ _enter("%llx,%x", ctx->pos, blknum);
/* walk through the block, an entry at a time */
- for (offset = (blkoff == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS);
- offset < AFS_DIR_SLOTS_PER_BLOCK;
- offset = next
- ) {
+ for (unsigned int slot = hdr; slot < AFS_DIR_SLOTS_PER_BLOCK; slot = next) {
/* skip entries marked unused in the bitmap */
- if (!(block->hdr.bitmap[offset / 8] &
- (1 << (offset % 8)))) {
- _debug("ENT[%zu.%u]: unused",
- blkoff / sizeof(union afs_xdr_dir_block), offset);
- next = offset + 1;
- if (offset >= curr)
- ctx->pos = blkoff +
- next * sizeof(union afs_xdr_dirent);
+ if (!(block->hdr.bitmap[slot / 8] &
+ (1 << (slot % 8)))) {
+ _debug("ENT[%x]: Unused", base + slot);
+ next = slot + 1;
+ if (next >= pos)
+ ctx->pos = (base + next) * sizeof(union afs_xdr_dirent);
continue;
}
/* got a valid entry */
- dire = &block->dirents[offset];
+ dire = &block->dirents[slot];
nlen = strnlen(dire->u.name,
- sizeof(*block) -
- offset * sizeof(union afs_xdr_dirent));
+ (unsigned long)(block + 1) - (unsigned long)dire->u.name - 1);
if (nlen > AFSNAMEMAX - 1) {
- _debug("ENT[%zu]: name too long (len %u/%zu)",
- blkoff / sizeof(union afs_xdr_dir_block),
- offset, nlen);
+ _debug("ENT[%x]: Name too long (len %zx)",
+ base + slot, nlen);
return afs_bad(dvnode, afs_file_error_dir_name_too_long);
}
- _debug("ENT[%zu.%u]: %s %zu \"%s\"",
- blkoff / sizeof(union afs_xdr_dir_block), offset,
- (offset < curr ? "skip" : "fill"),
+ _debug("ENT[%x]: %s %zx \"%s\"",
+ base + slot, (slot < pos ? "skip" : "fill"),
nlen, dire->u.name);
nr_slots = afs_dir_calc_slots(nlen);
- next = offset + nr_slots;
+ next = slot + nr_slots;
if (next > AFS_DIR_SLOTS_PER_BLOCK) {
- _debug("ENT[%zu.%u]:"
- " %u extends beyond end dir block"
- " (len %zu)",
- blkoff / sizeof(union afs_xdr_dir_block),
- offset, next, nlen);
+ _debug("ENT[%x]: extends beyond end dir block (len %zx)",
+ base + slot, nlen);
return afs_bad(dvnode, afs_file_error_dir_over_end);
}
/* Check that the name-extension dirents are all allocated */
for (tmp = 1; tmp < nr_slots; tmp++) {
- unsigned int ix = offset + tmp;
- if (!(block->hdr.bitmap[ix / 8] & (1 << (ix % 8)))) {
- _debug("ENT[%zu.u]:"
- " %u unmarked extension (%u/%u)",
- blkoff / sizeof(union afs_xdr_dir_block),
- offset, tmp, nr_slots);
+ unsigned int xslot = slot + tmp;
+
+ if (!(block->hdr.bitmap[xslot / 8] & (1 << (xslot % 8)))) {
+ _debug("ENT[%x]: Unmarked extension (%x/%x)",
+ base + slot, tmp, nr_slots);
return afs_bad(dvnode, afs_file_error_dir_unmarked_ext);
}
}
/* skip if starts before the current position */
- if (offset < curr) {
- if (next > curr)
- ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
+ if (slot < pos) {
+ if (next > pos)
+ ctx->pos = (base + next) * sizeof(union afs_xdr_dirent);
continue;
}
@@ -485,75 +442,110 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
return 0;
}
- ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
+ ctx->pos = (base + next) * sizeof(union afs_xdr_dirent);
}
_leave(" = 1 [more]");
return 1;
}
+struct afs_dir_iteration_ctx {
+ struct dir_context *dir_ctx;
+ int error;
+};
+
/*
- * iterate through the data blob that lists the contents of an AFS directory
+ * Iterate through a kmapped directory segment.
*/
-static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
- struct key *key, afs_dataversion_t *_dir_version)
+static size_t afs_dir_iterate_step(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2)
{
- struct afs_vnode *dvnode = AFS_FS_I(dir);
- union afs_xdr_dir_block *dblock;
- struct afs_read *req;
- struct folio *folio;
- unsigned offset, size;
+ struct afs_dir_iteration_ctx *ctx = priv2;
+ struct afs_vnode *dvnode = priv;
int ret;
- _enter("{%lu},%u,,", dir->i_ino, (unsigned)ctx->pos);
-
- if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) {
- _leave(" = -ESTALE");
- return -ESTALE;
+ if (WARN_ON_ONCE(progress % AFS_DIR_BLOCK_SIZE ||
+ len % AFS_DIR_BLOCK_SIZE)) {
+ pr_err("Mis-iteration prog=%zx len=%zx\n",
+ progress % AFS_DIR_BLOCK_SIZE,
+ len % AFS_DIR_BLOCK_SIZE);
+ return len;
}
- req = afs_read_dir(dvnode, key);
- if (IS_ERR(req))
- return PTR_ERR(req);
- *_dir_version = req->data_version;
+ do {
+ ret = afs_dir_iterate_block(dvnode, ctx->dir_ctx, iter_base);
+ if (ret != 1)
+ break;
- /* round the file position up to the next entry boundary */
- ctx->pos += sizeof(union afs_xdr_dirent) - 1;
- ctx->pos &= ~(sizeof(union afs_xdr_dirent) - 1);
+ ctx->dir_ctx->pos = round_up(ctx->dir_ctx->pos, AFS_DIR_BLOCK_SIZE);
+ iter_base += AFS_DIR_BLOCK_SIZE;
+ len -= AFS_DIR_BLOCK_SIZE;
+ } while (len > 0);
- /* walk through the blocks in sequence */
- ret = 0;
- while (ctx->pos < req->actual_len) {
- /* Fetch the appropriate folio from the directory and re-add it
- * to the LRU. We have all the pages pinned with an extra ref.
- */
- folio = __filemap_get_folio(dir->i_mapping, ctx->pos / PAGE_SIZE,
- FGP_ACCESSED, 0);
- if (IS_ERR(folio)) {
- ret = afs_bad(dvnode, afs_file_error_dir_missing_page);
- break;
- }
+ return len;
+}
- offset = round_down(ctx->pos, sizeof(*dblock)) - folio_pos(folio);
- size = min_t(loff_t, folio_size(folio),
- req->actual_len - folio_pos(folio));
+/*
+ * Iterate through the directory folios.
+ */
+static int afs_dir_iterate_contents(struct inode *dir, struct dir_context *dir_ctx)
+{
+ struct afs_dir_iteration_ctx ctx = { .dir_ctx = dir_ctx };
+ struct afs_vnode *dvnode = AFS_FS_I(dir);
+ struct iov_iter iter;
+ unsigned long long i_size = i_size_read(dir);
- do {
- dblock = kmap_local_folio(folio, offset);
- ret = afs_dir_iterate_block(dvnode, ctx, dblock,
- folio_pos(folio) + offset);
- kunmap_local(dblock);
- if (ret != 1)
- goto out;
+ /* Round the file position up to the next entry boundary */
+ dir_ctx->pos = round_up(dir_ctx->pos, sizeof(union afs_xdr_dirent));
- } while (offset += sizeof(*dblock), offset < size);
+ if (i_size <= 0 || dir_ctx->pos >= i_size)
+ return 0;
- ret = 0;
- }
+ iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size);
+ iov_iter_advance(&iter, round_down(dir_ctx->pos, AFS_DIR_BLOCK_SIZE));
+
+ iterate_folioq(&iter, iov_iter_count(&iter), dvnode, &ctx,
+ afs_dir_iterate_step);
+
+ if (ctx.error == -ESTALE)
+ afs_invalidate_dir(dvnode, afs_dir_invalid_iter_stale);
+ return ctx.error;
+}
+
+/*
+ * iterate through the data blob that lists the contents of an AFS directory
+ */
+static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
+ struct file *file, afs_dataversion_t *_dir_version)
+{
+ struct afs_vnode *dvnode = AFS_FS_I(dir);
+ int retry_limit = 100;
+ int ret;
+
+ _enter("{%lu},%llx,,", dir->i_ino, ctx->pos);
+
+ do {
+ if (--retry_limit < 0) {
+ pr_warn("afs_read_dir(): Too many retries\n");
+ ret = -ESTALE;
+ break;
+ }
+ ret = afs_read_dir(dvnode, file);
+ if (ret < 0) {
+ if (ret != -ESTALE)
+ break;
+ if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) {
+ ret = -ESTALE;
+ break;
+ }
+ continue;
+ }
+ *_dir_version = inode_peek_iversion_raw(dir);
+
+ ret = afs_dir_iterate_contents(dir, ctx);
+ up_read(&dvnode->validate_lock);
+ } while (ret == -ESTALE);
-out:
- up_read(&dvnode->validate_lock);
- afs_put_read(req);
_leave(" = %d", ret);
return ret;
}
@@ -565,8 +557,7 @@ static int afs_readdir(struct file *file, struct dir_context *ctx)
{
afs_dataversion_t dir_version;
- return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file),
- &dir_version);
+ return afs_dir_iterate(file_inode(file), ctx, file, &dir_version);
}
/*
@@ -607,7 +598,7 @@ static bool afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
* - just returns the FID the dentry name maps to if found
*/
static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
- struct afs_fid *fid, struct key *key,
+ struct afs_fid *fid,
afs_dataversion_t *_dir_version)
{
struct afs_super_info *as = dir->i_sb->s_fs_info;
@@ -621,7 +612,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
_enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
/* search the directory */
- ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version);
+ ret = afs_dir_iterate(dir, &cookie.ctx, NULL, _dir_version);
if (ret < 0) {
_leave(" = %d [iter]", ret);
return ret;
@@ -656,19 +647,10 @@ static bool afs_lookup_filldir(struct dir_context *ctx, const char *name,
BUILD_BUG_ON(sizeof(union afs_xdr_dir_block) != 2048);
BUILD_BUG_ON(sizeof(union afs_xdr_dirent) != 32);
- if (cookie->found) {
- if (cookie->nr_fids < 50) {
- cookie->fids[cookie->nr_fids].vnode = ino;
- cookie->fids[cookie->nr_fids].unique = dtype;
- cookie->nr_fids++;
- }
- } else if (cookie->name.len == nlen &&
- memcmp(cookie->name.name, name, nlen) == 0) {
- cookie->fids[1].vnode = ino;
- cookie->fids[1].unique = dtype;
- cookie->found = 1;
- if (cookie->one_only)
- return false;
+ if (cookie->nr_fids < 50) {
+ cookie->fids[cookie->nr_fids].vnode = ino;
+ cookie->fids[cookie->nr_fids].unique = dtype;
+ cookie->nr_fids++;
}
return cookie->nr_fids < 50;
@@ -788,8 +770,7 @@ static bool afs_server_supports_ibulk(struct afs_vnode *dvnode)
* files in one go and create inodes for them. The inode of the file we were
* asked for is returned.
*/
-static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
- struct key *key)
+static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry)
{
struct afs_lookup_cookie *cookie;
struct afs_vnode_param *vp;
@@ -797,6 +778,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
struct inode *inode = NULL, *ti;
afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
+ bool supports_ibulk;
long ret;
int i;
@@ -813,19 +795,19 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
cookie->nr_fids = 2; /* slot 1 is saved for the fid we actually want
* and slot 0 for the directory */
- if (!afs_server_supports_ibulk(dvnode))
- cookie->one_only = true;
-
- /* search the directory */
- ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version);
+ /* Search the directory for the named entry using the hash table... */
+ ret = afs_dir_search(dvnode, &dentry->d_name, &cookie->fids[1], &data_version);
if (ret < 0)
goto out;
- dentry->d_fsdata = (void *)(unsigned long)data_version;
+ supports_ibulk = afs_server_supports_ibulk(dvnode);
+ if (supports_ibulk) {
+ /* ...then scan linearly from that point for entries to lookup-ahead. */
+ cookie->ctx.pos = (ret + 1) * AFS_DIR_DIRENT_SIZE;
+ afs_dir_iterate(dir, &cookie->ctx, NULL, &data_version);
+ }
- ret = -ENOENT;
- if (!cookie->found)
- goto out;
+ dentry->d_fsdata = (void *)(unsigned long)data_version;
/* Check to see if we already have an inode for the primary fid. */
inode = ilookup5(dir->i_sb, cookie->fids[1].vnode,
@@ -884,7 +866,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
* the whole operation.
*/
afs_op_set_error(op, -ENOTSUPP);
- if (!cookie->one_only) {
+ if (supports_ibulk) {
op->ops = &afs_inline_bulk_status_operation;
afs_begin_vnode_operation(op);
afs_wait_for_operation(op);
@@ -926,8 +908,7 @@ out:
/*
* Look up an entry in a directory with @sys substitution.
*/
-static struct dentry *afs_lookup_atsys(struct inode *dir, struct dentry *dentry,
- struct key *key)
+static struct dentry *afs_lookup_atsys(struct inode *dir, struct dentry *dentry)
{
struct afs_sysnames *subs;
struct afs_net *net = afs_i2net(dir);
@@ -975,7 +956,6 @@ out_s:
afs_put_sysnames(subs);
kfree(buf);
out_p:
- key_put(key);
return ret;
}
@@ -989,7 +969,6 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
struct afs_fid fid = {};
struct inode *inode;
struct dentry *d;
- struct key *key;
int ret;
_enter("{%llx:%llu},%p{%pd},",
@@ -1007,15 +986,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
return ERR_PTR(-ESTALE);
}
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- _leave(" = %ld [key]", PTR_ERR(key));
- return ERR_CAST(key);
- }
-
- ret = afs_validate(dvnode, key);
+ ret = afs_validate(dvnode, NULL);
if (ret < 0) {
- key_put(key);
+ afs_dir_unuse_cookie(dvnode, ret);
_leave(" = %d [val]", ret);
return ERR_PTR(ret);
}
@@ -1025,11 +998,10 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
dentry->d_name.name[dentry->d_name.len - 3] == 's' &&
dentry->d_name.name[dentry->d_name.len - 2] == 'y' &&
dentry->d_name.name[dentry->d_name.len - 1] == 's')
- return afs_lookup_atsys(dir, dentry, key);
+ return afs_lookup_atsys(dir, dentry);
afs_stat_v(dvnode, n_lookup);
- inode = afs_do_lookup(dir, dentry, key);
- key_put(key);
+ inode = afs_do_lookup(dir, dentry);
if (inode == ERR_PTR(-ENOENT))
inode = afs_try_auto_mntpt(dentry, dir);
@@ -1155,7 +1127,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
afs_stat_v(dir, n_reval);
/* search the directory for this vnode */
- ret = afs_do_lookup_one(&dir->netfs.inode, dentry, &fid, key, &dir_version);
+ ret = afs_do_lookup_one(&dir->netfs.inode, dentry, &fid, &dir_version);
switch (ret) {
case 0:
/* the filename maps to something */
@@ -1282,6 +1254,7 @@ void afs_check_for_remote_deletion(struct afs_operation *op)
*/
static void afs_vnode_new_inode(struct afs_operation *op)
{
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_vnode_param *vp = &op->file[1];
struct afs_vnode *vnode;
struct inode *inode;
@@ -1301,6 +1274,10 @@ static void afs_vnode_new_inode(struct afs_operation *op)
vnode = AFS_FS_I(inode);
set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
+ if (S_ISDIR(inode->i_mode))
+ afs_mkdir_init_dir(vnode, dvp->vnode);
+ else if (S_ISLNK(inode->i_mode))
+ afs_init_new_symlink(vnode, op);
if (!afs_op_error(op))
afs_cache_permit(vnode, op->key, vnode->cb_break, &vp->scb);
d_instantiate(op->dentry, inode);
@@ -1317,18 +1294,21 @@ static void afs_create_success(struct afs_operation *op)
static void afs_create_edit_dir(struct afs_operation *op)
{
+ struct netfs_cache_resources cres = {};
struct afs_vnode_param *dvp = &op->file[0];
struct afs_vnode_param *vp = &op->file[1];
struct afs_vnode *dvnode = dvp->vnode;
_enter("op=%08x", op->debug_id);
+ fscache_begin_write_operation(&cres, afs_vnode_cache(dvnode));
down_write(&dvnode->validate_lock);
if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
afs_edit_dir_add(dvnode, &op->dentry->d_name, &vp->fid,
op->create.reason);
up_write(&dvnode->validate_lock);
+ fscache_end_operation(&cres);
}
static void afs_create_put(struct afs_operation *op)
@@ -1356,6 +1336,7 @@ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
{
struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
+ int ret;
_enter("{%llx:%llu},{%pd},%ho",
dvnode->fid.vid, dvnode->fid.vnode, dentry, mode);
@@ -1366,6 +1347,8 @@ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
return PTR_ERR(op);
}
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
op->file[0].modification = true;
@@ -1375,7 +1358,9 @@ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
op->create.reason = afs_edit_dir_for_mkdir;
op->mtime = current_time(dir);
op->ops = &afs_mkdir_operation;
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
}
/*
@@ -1388,8 +1373,8 @@ static void afs_dir_remove_subdir(struct dentry *dentry)
clear_nlink(&vnode->netfs.inode);
set_bit(AFS_VNODE_DELETED, &vnode->flags);
- atomic64_set(&vnode->cb_expires_at, AFS_NO_CB_PROMISE);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_clear_cb_promise(vnode, afs_cb_promise_clear_rmdir);
+ afs_invalidate_dir(vnode, afs_dir_invalid_subdir_removed);
}
}
@@ -1403,18 +1388,21 @@ static void afs_rmdir_success(struct afs_operation *op)
static void afs_rmdir_edit_dir(struct afs_operation *op)
{
+ struct netfs_cache_resources cres = {};
struct afs_vnode_param *dvp = &op->file[0];
struct afs_vnode *dvnode = dvp->vnode;
_enter("op=%08x", op->debug_id);
afs_dir_remove_subdir(op->dentry);
+ fscache_begin_write_operation(&cres, afs_vnode_cache(dvnode));
down_write(&dvnode->validate_lock);
if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
afs_edit_dir_remove(dvnode, &op->dentry->d_name,
afs_edit_dir_for_rmdir);
up_write(&dvnode->validate_lock);
+ fscache_end_operation(&cres);
}
static void afs_rmdir_put(struct afs_operation *op)
@@ -1449,6 +1437,8 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
if (IS_ERR(op))
return PTR_ERR(op);
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
op->file[0].modification = true;
@@ -1472,10 +1462,18 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
op->file[1].vnode = vnode;
}
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+
+ /* Not all systems that can host afs servers have ENOTEMPTY. */
+ if (ret == -EEXIST)
+ ret = -ENOTEMPTY;
+out:
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
error:
- return afs_put_operation(op);
+ ret = afs_put_operation(op);
+ goto out;
}
/*
@@ -1538,16 +1536,19 @@ static void afs_unlink_success(struct afs_operation *op)
static void afs_unlink_edit_dir(struct afs_operation *op)
{
+ struct netfs_cache_resources cres = {};
struct afs_vnode_param *dvp = &op->file[0];
struct afs_vnode *dvnode = dvp->vnode;
_enter("op=%08x", op->debug_id);
+ fscache_begin_write_operation(&cres, afs_vnode_cache(dvnode));
down_write(&dvnode->validate_lock);
if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
afs_edit_dir_remove(dvnode, &op->dentry->d_name,
afs_edit_dir_for_unlink);
up_write(&dvnode->validate_lock);
+ fscache_end_operation(&cres);
}
static void afs_unlink_put(struct afs_operation *op)
@@ -1586,6 +1587,8 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
if (IS_ERR(op))
return PTR_ERR(op);
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
op->file[0].modification = true;
@@ -1632,10 +1635,10 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
afs_wait_for_operation(op);
}
- return afs_put_operation(op);
-
error:
- return afs_put_operation(op);
+ ret = afs_put_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
}
static const struct afs_operation_ops afs_create_operation = {
@@ -1669,6 +1672,8 @@ static int afs_create(struct mnt_idmap *idmap, struct inode *dir,
goto error;
}
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
op->file[0].modification = true;
@@ -1679,7 +1684,9 @@ static int afs_create(struct mnt_idmap *idmap, struct inode *dir,
op->create.reason = afs_edit_dir_for_create;
op->mtime = current_time(dir);
op->ops = &afs_create_operation;
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
error:
d_drop(dentry);
@@ -1744,6 +1751,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
goto error;
}
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
ret = afs_validate(vnode, op->key);
if (ret < 0)
goto error_op;
@@ -1759,10 +1768,13 @@ static int afs_link(struct dentry *from, struct inode *dir,
op->dentry_2 = from;
op->ops = &afs_link_operation;
op->create.reason = afs_edit_dir_for_link;
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
error_op:
afs_put_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
error:
d_drop(dentry);
_leave(" = %d", ret);
@@ -1806,6 +1818,8 @@ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir,
goto error;
}
+ fscache_use_cookie(afs_vnode_cache(dvnode), true);
+
afs_op_set_vnode(op, 0, dvnode);
op->file[0].dv_delta = 1;
@@ -1814,7 +1828,9 @@ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir,
op->create.reason = afs_edit_dir_for_symlink;
op->create.symlink = content;
op->mtime = current_time(dir);
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+ afs_dir_unuse_cookie(dvnode, ret);
+ return ret;
error:
d_drop(dentry);
@@ -1846,6 +1862,7 @@ static void afs_rename_success(struct afs_operation *op)
write_seqlock(&vnode->cb_lock);
new_dv = vnode->status.data_version + 1;
+ trace_afs_set_dv(vnode, new_dv);
vnode->status.data_version = new_dv;
inode_set_iversion_raw(&vnode->netfs.inode, new_dv);
@@ -1855,6 +1872,7 @@ static void afs_rename_success(struct afs_operation *op)
static void afs_rename_edit_dir(struct afs_operation *op)
{
+ struct netfs_cache_resources orig_cres = {}, new_cres = {};
struct afs_vnode_param *orig_dvp = &op->file[0];
struct afs_vnode_param *new_dvp = &op->file[1];
struct afs_vnode *orig_dvnode = orig_dvp->vnode;
@@ -1871,6 +1889,10 @@ static void afs_rename_edit_dir(struct afs_operation *op)
op->rename.rehash = NULL;
}
+ fscache_begin_write_operation(&orig_cres, afs_vnode_cache(orig_dvnode));
+ if (new_dvnode != orig_dvnode)
+ fscache_begin_write_operation(&new_cres, afs_vnode_cache(new_dvnode));
+
down_write(&orig_dvnode->validate_lock);
if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) &&
orig_dvnode->status.data_version == orig_dvp->dv_before + orig_dvp->dv_delta)
@@ -1920,6 +1942,9 @@ static void afs_rename_edit_dir(struct afs_operation *op)
d_move(old_dentry, new_dentry);
up_write(&new_dvnode->validate_lock);
+ fscache_end_operation(&orig_cres);
+ if (new_dvnode != orig_dvnode)
+ fscache_end_operation(&new_cres);
}
static void afs_rename_put(struct afs_operation *op)
@@ -1972,6 +1997,10 @@ static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (IS_ERR(op))
return PTR_ERR(op);
+ fscache_use_cookie(afs_vnode_cache(orig_dvnode), true);
+ if (new_dvnode != orig_dvnode)
+ fscache_use_cookie(afs_vnode_cache(new_dvnode), true);
+
ret = afs_validate(vnode, op->key);
afs_op_set_error(op, ret);
if (ret < 0)
@@ -2039,47 +2068,43 @@ static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
*/
d_drop(old_dentry);
- return afs_do_sync_operation(op);
+ ret = afs_do_sync_operation(op);
+out:
+ afs_dir_unuse_cookie(orig_dvnode, ret);
+ if (new_dvnode != orig_dvnode)
+ afs_dir_unuse_cookie(new_dvnode, ret);
+ return ret;
error:
- return afs_put_operation(op);
-}
-
-/*
- * Release a directory folio and clean up its private state if it's not busy
- * - return true if the folio can now be released, false if not
- */
-static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags)
-{
- struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
-
- _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio->index);
-
- folio_detach_private(folio);
-
- /* The directory will need reloading. */
- if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- afs_stat_v(dvnode, n_relpg);
- return true;
+ ret = afs_put_operation(op);
+ goto out;
}
/*
- * Invalidate part or all of a folio.
+ * Write the file contents to the cache as a single blob.
*/
-static void afs_dir_invalidate_folio(struct folio *folio, size_t offset,
- size_t length)
+int afs_single_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
-
- _enter("{%lu},%zu,%zu", folio->index, offset, length);
-
- BUG_ON(!folio_test_locked(folio));
+ struct afs_vnode *dvnode = AFS_FS_I(mapping->host);
+ struct iov_iter iter;
+ bool is_dir = (S_ISDIR(dvnode->netfs.inode.i_mode) &&
+ !test_bit(AFS_VNODE_MOUNTPOINT, &dvnode->flags));
+ int ret = 0;
- /* The directory will need reloading. */
- if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
- afs_stat_v(dvnode, n_inval);
+ /* Need to lock to prevent the folio queue and folios from being thrown
+ * away.
+ */
+ down_read(&dvnode->validate_lock);
+
+ if (is_dir ?
+ test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) :
+ atomic64_read(&dvnode->cb_expires_at) != AFS_NO_CB_PROMISE) {
+ iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0,
+ i_size_read(&dvnode->netfs.inode));
+ ret = netfs_writeback_single(mapping, wbc, &iter);
+ }
- /* we clean up only if the entire folio is being invalidated */
- if (offset == 0 && length == folio_size(folio))
- folio_detach_private(folio);
+ up_read(&dvnode->validate_lock);
+ return ret;
}
diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
index fe223fb78111..60a549f1d9c5 100644
--- a/fs/afs/dir_edit.c
+++ b/fs/afs/dir_edit.c
@@ -10,6 +10,7 @@
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/iversion.h>
+#include <linux/folio_queue.h>
#include "internal.h"
#include "xdr_fs.h"
@@ -105,23 +106,57 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
}
/*
- * Get a new directory folio.
+ * Get a specific block, extending the directory storage to cover it as needed.
*/
-static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index)
+static union afs_xdr_dir_block *afs_dir_get_block(struct afs_dir_iter *iter, size_t block)
{
- struct address_space *mapping = vnode->netfs.inode.i_mapping;
+ struct folio_queue *fq;
+ struct afs_vnode *dvnode = iter->dvnode;
struct folio *folio;
+ size_t blpos = block * AFS_DIR_BLOCK_SIZE;
+ size_t blend = (block + 1) * AFS_DIR_BLOCK_SIZE, fpos = iter->fpos;
+ int ret;
+
+ if (dvnode->directory_size < blend) {
+ size_t cur_size = dvnode->directory_size;
+
+ ret = netfs_alloc_folioq_buffer(
+ NULL, &dvnode->directory, &cur_size, blend,
+ mapping_gfp_mask(dvnode->netfs.inode.i_mapping));
+ dvnode->directory_size = cur_size;
+ if (ret < 0)
+ goto fail;
+ }
- folio = __filemap_get_folio(mapping, index,
- FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
- mapping->gfp_mask);
- if (IS_ERR(folio)) {
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
- return NULL;
+ fq = iter->fq;
+ if (!fq)
+ fq = dvnode->directory;
+
+ /* Search the folio queue for the folio containing the block... */
+ for (; fq; fq = fq->next) {
+ for (int s = iter->fq_slot; s < folioq_count(fq); s++) {
+ size_t fsize = folioq_folio_size(fq, s);
+
+ if (blend <= fpos + fsize) {
+ /* ... and then return the mapped block. */
+ folio = folioq_folio(fq, s);
+ if (WARN_ON_ONCE(folio_pos(folio) != fpos))
+ goto fail;
+ iter->fq = fq;
+ iter->fq_slot = s;
+ iter->fpos = fpos;
+ return kmap_local_folio(folio, blpos - fpos);
+ }
+ fpos += fsize;
+ }
+ iter->fq_slot = 0;
}
- if (!folio_test_private(folio))
- folio_attach_private(folio, (void *)1);
- return folio;
+
+fail:
+ iter->fq = NULL;
+ iter->fq_slot = 0;
+ afs_invalidate_dir(dvnode, afs_dir_invalid_edit_get_block);
+ return NULL;
}
/*
@@ -209,9 +244,8 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
{
union afs_xdr_dir_block *meta, *block;
union afs_xdr_dirent *de;
- struct folio *folio0, *folio;
- unsigned int need_slots, nr_blocks, b;
- pgoff_t index;
+ struct afs_dir_iter iter = { .dvnode = vnode };
+ unsigned int nr_blocks, b, entry;
loff_t i_size;
int slot;
@@ -220,20 +254,17 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
i_size = i_size_read(&vnode->netfs.inode);
if (i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS ||
(i_size & (AFS_DIR_BLOCK_SIZE - 1))) {
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_add_bad_size);
return;
}
- folio0 = afs_dir_get_folio(vnode, 0);
- if (!folio0) {
- _leave(" [fgp]");
+ meta = afs_dir_get_block(&iter, 0);
+ if (!meta)
return;
- }
/* Work out how many slots we're going to need. */
- need_slots = afs_dir_calc_slots(name->len);
+ iter.nr_slots = afs_dir_calc_slots(name->len);
- meta = kmap_local_folio(folio0, 0);
if (i_size == 0)
goto new_directory;
nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
@@ -245,22 +276,21 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
/* If the directory extended into a new folio, then we need to
* tack a new folio on the end.
*/
- index = b / AFS_DIR_BLOCKS_PER_PAGE;
if (nr_blocks >= AFS_DIR_MAX_BLOCKS)
- goto error;
- if (index >= folio_nr_pages(folio0)) {
- folio = afs_dir_get_folio(vnode, index);
- if (!folio)
- goto error;
- } else {
- folio = folio0;
- }
+ goto error_too_many_blocks;
- block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_pos(folio));
+ /* Lower dir blocks have a counter in the header we can check. */
+ if (b < AFS_DIR_BLOCKS_WITH_CTR &&
+ meta->meta.alloc_ctrs[b] < iter.nr_slots)
+ continue;
+
+ block = afs_dir_get_block(&iter, b);
+ if (!block)
+ goto error;
/* Abandon the edit if we got a callback break. */
if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
- goto invalidated;
+ goto already_invalidated;
_debug("block %u: %2u %3u %u",
b,
@@ -275,31 +305,23 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
}
- /* Only lower dir blocks have a counter in the header. */
- if (b >= AFS_DIR_BLOCKS_WITH_CTR ||
- meta->meta.alloc_ctrs[b] >= need_slots) {
- /* We need to try and find one or more consecutive
- * slots to hold the entry.
- */
- slot = afs_find_contig_bits(block, need_slots);
- if (slot >= 0) {
- _debug("slot %u", slot);
- goto found_space;
- }
+ /* We need to try and find one or more consecutive slots to
+ * hold the entry.
+ */
+ slot = afs_find_contig_bits(block, iter.nr_slots);
+ if (slot >= 0) {
+ _debug("slot %u", slot);
+ goto found_space;
}
kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
}
/* There are no spare slots of sufficient size, yet the operation
* succeeded. Download the directory again.
*/
trace_afs_edit_dir(vnode, why, afs_edit_dir_create_nospc, 0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_add_no_slots);
goto out_unmap;
new_directory:
@@ -307,8 +329,7 @@ new_directory:
i_size = AFS_DIR_BLOCK_SIZE;
afs_set_i_size(vnode, i_size);
slot = AFS_DIR_RESV_BLOCKS0;
- folio = folio0;
- block = kmap_local_folio(folio, 0);
+ block = afs_dir_get_block(&iter, 0);
nr_blocks = 1;
b = 0;
@@ -326,41 +347,39 @@ found_space:
de->u.name[name->len] = 0;
/* Adjust the bitmap. */
- afs_set_contig_bits(block, slot, need_slots);
- kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
+ afs_set_contig_bits(block, slot, iter.nr_slots);
/* Adjust the allocation counter. */
if (b < AFS_DIR_BLOCKS_WITH_CTR)
- meta->meta.alloc_ctrs[b] -= need_slots;
+ meta->meta.alloc_ctrs[b] -= iter.nr_slots;
+
+ /* Adjust the hash chain. */
+ entry = b * AFS_DIR_SLOTS_PER_BLOCK + slot;
+ iter.bucket = afs_dir_hash_name(name);
+ de->u.hash_next = meta->meta.hashtable[iter.bucket];
+ meta->meta.hashtable[iter.bucket] = htons(entry);
+ kunmap_local(block);
inode_inc_iversion_raw(&vnode->netfs.inode);
afs_stat_v(vnode, n_dir_cr);
_debug("Insert %s in %u[%u]", name->name, b, slot);
+ netfs_single_mark_inode_dirty(&vnode->netfs.inode);
+
out_unmap:
kunmap_local(meta);
- folio_unlock(folio0);
- folio_put(folio0);
_leave("");
return;
-invalidated:
+already_invalidated:
trace_afs_edit_dir(vnode, why, afs_edit_dir_create_inval, 0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
goto out_unmap;
+error_too_many_blocks:
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_add_too_many_blocks);
error:
trace_afs_edit_dir(vnode, why, afs_edit_dir_create_error, 0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out_unmap;
}
@@ -374,13 +393,14 @@ error:
void afs_edit_dir_remove(struct afs_vnode *vnode,
struct qstr *name, enum afs_edit_dir_reason why)
{
- union afs_xdr_dir_block *meta, *block;
- union afs_xdr_dirent *de;
- struct folio *folio0, *folio;
- unsigned int need_slots, nr_blocks, b;
- pgoff_t index;
+ union afs_xdr_dir_block *meta, *block, *pblock;
+ union afs_xdr_dirent *de, *pde;
+ struct afs_dir_iter iter = { .dvnode = vnode };
+ struct afs_fid fid;
+ unsigned int b, slot, entry;
loff_t i_size;
- int slot;
+ __be16 next;
+ int found;
_enter(",,{%d,%s},", name->len, name->name);
@@ -388,81 +408,95 @@ void afs_edit_dir_remove(struct afs_vnode *vnode,
if (i_size < AFS_DIR_BLOCK_SIZE ||
i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS ||
(i_size & (AFS_DIR_BLOCK_SIZE - 1))) {
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_rem_bad_size);
return;
}
- nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
- folio0 = afs_dir_get_folio(vnode, 0);
- if (!folio0) {
- _leave(" [fgp]");
+ if (!afs_dir_init_iter(&iter, name))
return;
- }
-
- /* Work out how many slots we're going to discard. */
- need_slots = afs_dir_calc_slots(name->len);
-
- meta = kmap_local_folio(folio0, 0);
-
- /* Find a block that has sufficient slots available. Each folio
- * contains two or more directory blocks.
- */
- for (b = 0; b < nr_blocks; b++) {
- index = b / AFS_DIR_BLOCKS_PER_PAGE;
- if (index >= folio_nr_pages(folio0)) {
- folio = afs_dir_get_folio(vnode, index);
- if (!folio)
- goto error;
- } else {
- folio = folio0;
- }
- block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_pos(folio));
-
- /* Abandon the edit if we got a callback break. */
- if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
- goto invalidated;
-
- if (b > AFS_DIR_BLOCKS_WITH_CTR ||
- meta->meta.alloc_ctrs[b] <= AFS_DIR_SLOTS_PER_BLOCK - 1 - need_slots) {
- slot = afs_dir_scan_block(block, name, b);
- if (slot >= 0)
- goto found_dirent;
- }
+ meta = afs_dir_find_block(&iter, 0);
+ if (!meta)
+ return;
- kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
+ /* Find the entry in the blob. */
+ found = afs_dir_search_bucket(&iter, name, &fid);
+ if (found < 0) {
+ /* Didn't find the dirent to clobber. Re-download. */
+ trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_noent,
+ 0, 0, 0, 0, name->name);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_rem_wrong_name);
+ goto out_unmap;
}
- /* Didn't find the dirent to clobber. Download the directory again. */
- trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_noent,
- 0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
- goto out_unmap;
+ entry = found;
+ b = entry / AFS_DIR_SLOTS_PER_BLOCK;
+ slot = entry % AFS_DIR_SLOTS_PER_BLOCK;
-found_dirent:
+ block = afs_dir_find_block(&iter, b);
+ if (!block)
+ goto error;
+ if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
+ goto already_invalidated;
+
+ /* Check and clear the entry. */
de = &block->dirents[slot];
+ if (de->u.valid != 1)
+ goto error_unmap;
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete, b, slot,
ntohl(de->u.vnode), ntohl(de->u.unique),
name->name);
- memset(de, 0, sizeof(*de) * need_slots);
-
/* Adjust the bitmap. */
- afs_clear_contig_bits(block, slot, need_slots);
- kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
+ afs_clear_contig_bits(block, slot, iter.nr_slots);
/* Adjust the allocation counter. */
if (b < AFS_DIR_BLOCKS_WITH_CTR)
- meta->meta.alloc_ctrs[b] += need_slots;
+ meta->meta.alloc_ctrs[b] += iter.nr_slots;
+
+ /* Clear the constituent entries. */
+ next = de->u.hash_next;
+ memset(de, 0, sizeof(*de) * iter.nr_slots);
+ kunmap_local(block);
+
+ /* Adjust the hash chain: if iter->prev_entry is 0, the hashtable head
+ * index is previous; otherwise it's slot number of the previous entry.
+ */
+ if (!iter.prev_entry) {
+ __be16 prev_next = meta->meta.hashtable[iter.bucket];
+
+ if (unlikely(prev_next != htons(entry))) {
+ pr_warn("%llx:%llx:%x: not head of chain b=%x p=%x,%x e=%x %*s",
+ vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique,
+ iter.bucket, iter.prev_entry, prev_next, entry,
+ name->len, name->name);
+ goto error;
+ }
+ meta->meta.hashtable[iter.bucket] = next;
+ } else {
+ unsigned int pb = iter.prev_entry / AFS_DIR_SLOTS_PER_BLOCK;
+ unsigned int ps = iter.prev_entry % AFS_DIR_SLOTS_PER_BLOCK;
+ __be16 prev_next;
+
+ pblock = afs_dir_find_block(&iter, pb);
+ if (!pblock)
+ goto error;
+ pde = &pblock->dirents[ps];
+ prev_next = pde->u.hash_next;
+ if (prev_next != htons(entry)) {
+ kunmap_local(pblock);
+ pr_warn("%llx:%llx:%x: not prev in chain b=%x p=%x,%x e=%x %*s",
+ vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique,
+ iter.bucket, iter.prev_entry, prev_next, entry,
+ name->len, name->name);
+ goto error;
+ }
+ pde->u.hash_next = next;
+ kunmap_local(pblock);
+ }
+
+ netfs_single_mark_inode_dirty(&vnode->netfs.inode);
inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version);
afs_stat_v(vnode, n_dir_rm);
@@ -470,26 +504,20 @@ found_dirent:
out_unmap:
kunmap_local(meta);
- folio_unlock(folio0);
- folio_put(folio0);
_leave("");
return;
-invalidated:
+already_invalidated:
+ kunmap_local(block);
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_inval,
0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
- kunmap_local(block);
- if (folio != folio0) {
- folio_unlock(folio);
- folio_put(folio);
- }
goto out_unmap;
+error_unmap:
+ kunmap_local(block);
error:
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_error,
0, 0, 0, 0, name->name);
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out_unmap;
}
@@ -502,9 +530,8 @@ void afs_edit_dir_update_dotdot(struct afs_vnode *vnode, struct afs_vnode *new_d
{
union afs_xdr_dir_block *block;
union afs_xdr_dirent *de;
- struct folio *folio;
+ struct afs_dir_iter iter = { .dvnode = vnode };
unsigned int nr_blocks, b;
- pgoff_t index;
loff_t i_size;
int slot;
@@ -512,39 +539,35 @@ void afs_edit_dir_update_dotdot(struct afs_vnode *vnode, struct afs_vnode *new_d
i_size = i_size_read(&vnode->netfs.inode);
if (i_size < AFS_DIR_BLOCK_SIZE) {
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_upd_bad_size);
return;
}
+
nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
/* Find a block that has sufficient slots available. Each folio
* contains two or more directory blocks.
*/
for (b = 0; b < nr_blocks; b++) {
- index = b / AFS_DIR_BLOCKS_PER_PAGE;
- folio = afs_dir_get_folio(vnode, index);
- if (!folio)
+ block = afs_dir_get_block(&iter, b);
+ if (!block)
goto error;
- block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_pos(folio));
-
/* Abandon the edit if we got a callback break. */
if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
- goto invalidated;
+ goto already_invalidated;
slot = afs_dir_scan_block(block, &dotdot_name, b);
if (slot >= 0)
goto found_dirent;
kunmap_local(block);
- folio_unlock(folio);
- folio_put(folio);
}
/* Didn't find the dirent to clobber. Download the directory again. */
trace_afs_edit_dir(vnode, why, afs_edit_dir_update_nodd,
0, 0, 0, 0, "..");
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ afs_invalidate_dir(vnode, afs_dir_invalid_edit_upd_no_dd);
goto out;
found_dirent:
@@ -556,26 +579,70 @@ found_dirent:
ntohl(de->u.vnode), ntohl(de->u.unique), "..");
kunmap_local(block);
- folio_unlock(folio);
- folio_put(folio);
+ netfs_single_mark_inode_dirty(&vnode->netfs.inode);
inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version);
out:
_leave("");
return;
-invalidated:
+already_invalidated:
kunmap_local(block);
- folio_unlock(folio);
- folio_put(folio);
trace_afs_edit_dir(vnode, why, afs_edit_dir_update_inval,
0, 0, 0, 0, "..");
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out;
error:
trace_afs_edit_dir(vnode, why, afs_edit_dir_update_error,
0, 0, 0, 0, "..");
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
goto out;
}
+
+/*
+ * Initialise a new directory. We need to fill in the "." and ".." entries.
+ */
+void afs_mkdir_init_dir(struct afs_vnode *dvnode, struct afs_vnode *parent_dvnode)
+{
+ union afs_xdr_dir_block *meta;
+ struct afs_dir_iter iter = { .dvnode = dvnode };
+ union afs_xdr_dirent *de;
+ unsigned int slot = AFS_DIR_RESV_BLOCKS0;
+ loff_t i_size;
+
+ i_size = i_size_read(&dvnode->netfs.inode);
+ if (i_size != AFS_DIR_BLOCK_SIZE) {
+ afs_invalidate_dir(dvnode, afs_dir_invalid_edit_add_bad_size);
+ return;
+ }
+
+ meta = afs_dir_get_block(&iter, 0);
+ if (!meta)
+ return;
+
+ afs_edit_init_block(meta, meta, 0);
+
+ de = &meta->dirents[slot];
+ de->u.valid = 1;
+ de->u.vnode = htonl(dvnode->fid.vnode);
+ de->u.unique = htonl(dvnode->fid.unique);
+ memcpy(de->u.name, ".", 2);
+ trace_afs_edit_dir(dvnode, afs_edit_dir_for_mkdir, afs_edit_dir_mkdir, 0, slot,
+ dvnode->fid.vnode, dvnode->fid.unique, ".");
+ slot++;
+
+ de = &meta->dirents[slot];
+ de->u.valid = 1;
+ de->u.vnode = htonl(parent_dvnode->fid.vnode);
+ de->u.unique = htonl(parent_dvnode->fid.unique);
+ memcpy(de->u.name, "..", 3);
+ trace_afs_edit_dir(dvnode, afs_edit_dir_for_mkdir, afs_edit_dir_mkdir, 0, slot,
+ parent_dvnode->fid.vnode, parent_dvnode->fid.unique, "..");
+
+ afs_set_contig_bits(meta, AFS_DIR_RESV_BLOCKS0, 2);
+ meta->meta.alloc_ctrs[0] -= 2;
+ kunmap_local(meta);
+
+ netfs_single_mark_inode_dirty(&dvnode->netfs.inode);
+ set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags);
+ set_bit(AFS_VNODE_DIR_READ, &dvnode->flags);
+}
diff --git a/fs/afs/dir_search.c b/fs/afs/dir_search.c
new file mode 100644
index 000000000000..b25bd892db4d
--- /dev/null
+++ b/fs/afs/dir_search.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Search a directory's hash table.
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * https://tools.ietf.org/html/draft-keiser-afs3-directory-object-00
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/iversion.h>
+#include "internal.h"
+#include "afs_fs.h"
+#include "xdr_fs.h"
+
+/*
+ * Calculate the name hash.
+ */
+unsigned int afs_dir_hash_name(const struct qstr *name)
+{
+ const unsigned char *p = name->name;
+ unsigned int hash = 0, i;
+ int bucket;
+
+ for (i = 0; i < name->len; i++)
+ hash = (hash * 173) + p[i];
+ bucket = hash & (AFS_DIR_HASHTBL_SIZE - 1);
+ if (hash > INT_MAX) {
+ bucket = AFS_DIR_HASHTBL_SIZE - bucket;
+ bucket &= (AFS_DIR_HASHTBL_SIZE - 1);
+ }
+ return bucket;
+}
+
+/*
+ * Reset a directory iterator.
+ */
+static bool afs_dir_reset_iter(struct afs_dir_iter *iter)
+{
+ unsigned long long i_size = i_size_read(&iter->dvnode->netfs.inode);
+ unsigned int nblocks;
+
+ /* Work out the maximum number of steps we can take. */
+ nblocks = umin(i_size / AFS_DIR_BLOCK_SIZE, AFS_DIR_MAX_BLOCKS);
+ if (!nblocks)
+ return false;
+ iter->loop_check = nblocks * (AFS_DIR_SLOTS_PER_BLOCK - AFS_DIR_RESV_BLOCKS);
+ iter->prev_entry = 0; /* Hash head is previous */
+ return true;
+}
+
+/*
+ * Initialise a directory iterator for looking up a name.
+ */
+bool afs_dir_init_iter(struct afs_dir_iter *iter, const struct qstr *name)
+{
+ iter->nr_slots = afs_dir_calc_slots(name->len);
+ iter->bucket = afs_dir_hash_name(name);
+ return afs_dir_reset_iter(iter);
+}
+
+/*
+ * Get a specific block.
+ */
+union afs_xdr_dir_block *afs_dir_find_block(struct afs_dir_iter *iter, size_t block)
+{
+ struct folio_queue *fq = iter->fq;
+ struct afs_vnode *dvnode = iter->dvnode;
+ struct folio *folio;
+ size_t blpos = block * AFS_DIR_BLOCK_SIZE;
+ size_t blend = (block + 1) * AFS_DIR_BLOCK_SIZE, fpos = iter->fpos;
+ int slot = iter->fq_slot;
+
+ _enter("%zx,%d", block, slot);
+
+ if (iter->block) {
+ kunmap_local(iter->block);
+ iter->block = NULL;
+ }
+
+ if (dvnode->directory_size < blend)
+ goto fail;
+
+ if (!fq || blpos < fpos) {
+ fq = dvnode->directory;
+ slot = 0;
+ fpos = 0;
+ }
+
+ /* Search the folio queue for the folio containing the block... */
+ for (; fq; fq = fq->next) {
+ for (; slot < folioq_count(fq); slot++) {
+ size_t fsize = folioq_folio_size(fq, slot);
+
+ if (blend <= fpos + fsize) {
+ /* ... and then return the mapped block. */
+ folio = folioq_folio(fq, slot);
+ if (WARN_ON_ONCE(folio_pos(folio) != fpos))
+ goto fail;
+ iter->fq = fq;
+ iter->fq_slot = slot;
+ iter->fpos = fpos;
+ iter->block = kmap_local_folio(folio, blpos - fpos);
+ return iter->block;
+ }
+ fpos += fsize;
+ }
+ slot = 0;
+ }
+
+fail:
+ iter->fq = NULL;
+ iter->fq_slot = 0;
+ afs_invalidate_dir(dvnode, afs_dir_invalid_edit_get_block);
+ return NULL;
+}
+
+/*
+ * Search through a directory bucket.
+ */
+int afs_dir_search_bucket(struct afs_dir_iter *iter, const struct qstr *name,
+ struct afs_fid *_fid)
+{
+ const union afs_xdr_dir_block *meta;
+ unsigned int entry;
+ int ret = -ESTALE;
+
+ meta = afs_dir_find_block(iter, 0);
+ if (!meta)
+ return -ESTALE;
+
+ entry = ntohs(meta->meta.hashtable[iter->bucket & (AFS_DIR_HASHTBL_SIZE - 1)]);
+ _enter("%x,%x", iter->bucket, entry);
+
+ while (entry) {
+ const union afs_xdr_dir_block *block;
+ const union afs_xdr_dirent *dire;
+ unsigned int blnum = entry / AFS_DIR_SLOTS_PER_BLOCK;
+ unsigned int slot = entry % AFS_DIR_SLOTS_PER_BLOCK;
+ unsigned int resv = (blnum == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS);
+
+ _debug("search %x", entry);
+
+ if (slot < resv) {
+ kdebug("slot out of range h=%x rs=%2x sl=%2x-%2x",
+ iter->bucket, resv, slot, slot + iter->nr_slots - 1);
+ goto bad;
+ }
+
+ block = afs_dir_find_block(iter, blnum);
+ if (!block)
+ goto bad;
+ dire = &block->dirents[slot];
+
+ if (slot + iter->nr_slots <= AFS_DIR_SLOTS_PER_BLOCK &&
+ memcmp(dire->u.name, name->name, name->len) == 0 &&
+ dire->u.name[name->len] == '\0') {
+ _fid->vnode = ntohl(dire->u.vnode);
+ _fid->unique = ntohl(dire->u.unique);
+ ret = entry;
+ goto found;
+ }
+
+ iter->prev_entry = entry;
+ entry = ntohs(dire->u.hash_next);
+ if (!--iter->loop_check) {
+ kdebug("dir chain loop h=%x", iter->bucket);
+ goto bad;
+ }
+ }
+
+ ret = -ENOENT;
+found:
+ if (iter->block) {
+ kunmap_local(iter->block);
+ iter->block = NULL;
+ }
+
+bad:
+ if (ret == -ESTALE)
+ afs_invalidate_dir(iter->dvnode, afs_dir_invalid_iter_stale);
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Search the appropriate hash chain in the contents of an AFS directory.
+ */
+int afs_dir_search(struct afs_vnode *dvnode, struct qstr *name,
+ struct afs_fid *_fid, afs_dataversion_t *_dir_version)
+{
+ struct afs_dir_iter iter = { .dvnode = dvnode, };
+ int ret, retry_limit = 3;
+
+ _enter("{%lu},,,", dvnode->netfs.inode.i_ino);
+
+ if (!afs_dir_init_iter(&iter, name))
+ return -ENOENT;
+ do {
+ if (--retry_limit < 0) {
+ pr_warn("afs_read_dir(): Too many retries\n");
+ ret = -ESTALE;
+ break;
+ }
+ ret = afs_read_dir(dvnode, NULL);
+ if (ret < 0) {
+ if (ret != -ESTALE)
+ break;
+ if (test_bit(AFS_VNODE_DELETED, &dvnode->flags)) {
+ ret = -ESTALE;
+ break;
+ }
+ continue;
+ }
+ *_dir_version = inode_peek_iversion_raw(&dvnode->netfs.inode);
+
+ ret = afs_dir_search_bucket(&iter, name, _fid);
+ up_read(&dvnode->validate_lock);
+ if (ret == -ESTALE)
+ afs_dir_reset_iter(&iter);
+ } while (ret == -ESTALE);
+
+ _leave(" = %d", ret);
+ return ret;
+}
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index c4d2711e20ad..d8bf52f77d93 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -186,50 +186,6 @@ out:
}
/*
- * Look up @cell in a dynroot directory. This is a substitution for the
- * local cell name for the net namespace.
- */
-static struct dentry *afs_lookup_atcell(struct dentry *dentry)
-{
- struct afs_cell *cell;
- struct afs_net *net = afs_d2net(dentry);
- struct dentry *ret;
- char *name;
- int len;
-
- if (!net->ws_cell)
- return ERR_PTR(-ENOENT);
-
- ret = ERR_PTR(-ENOMEM);
- name = kmalloc(AFS_MAXCELLNAME + 1, GFP_KERNEL);
- if (!name)
- goto out_p;
-
- down_read(&net->cells_lock);
- cell = net->ws_cell;
- if (cell) {
- len = cell->name_len;
- memcpy(name, cell->name, len + 1);
- }
- up_read(&net->cells_lock);
-
- ret = ERR_PTR(-ENOENT);
- if (!cell)
- goto out_n;
-
- ret = lookup_one_len(name, dentry->d_parent, len);
-
- /* We don't want to d_add() the @cell dentry here as we don't want to
- * the cached dentry to hide changes to the local cell name.
- */
-
-out_n:
- kfree(name);
-out_p:
- return ret;
-}
-
-/*
* Look up an entry in a dynroot directory.
*/
static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentry,
@@ -247,10 +203,6 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
return ERR_PTR(-ENAMETOOLONG);
}
- if (dentry->d_name.len == 5 &&
- memcmp(dentry->d_name.name, "@cell", 5) == 0)
- return afs_lookup_atcell(dentry);
-
return d_splice_alias(afs_try_auto_mntpt(dentry, dir), dentry);
}
@@ -271,7 +223,8 @@ const struct dentry_operations afs_dynroot_dentry_operations = {
int afs_dynroot_mkdir(struct afs_net *net, struct afs_cell *cell)
{
struct super_block *sb = net->dynroot_sb;
- struct dentry *root, *subdir;
+ struct dentry *root, *subdir, *dsubdir;
+ char *dotname = cell->name - 1;
int ret;
if (!sb || atomic_read(&sb->s_active) == 0)
@@ -286,14 +239,43 @@ int afs_dynroot_mkdir(struct afs_net *net, struct afs_cell *cell)
goto unlock;
}
- /* Note that we're retaining an extra ref on the dentry */
+ dsubdir = lookup_one_len(dotname, root, cell->name_len + 1);
+ if (IS_ERR(dsubdir)) {
+ ret = PTR_ERR(dsubdir);
+ dput(subdir);
+ goto unlock;
+ }
+
+ /* Note that we're retaining extra refs on the dentries. */
subdir->d_fsdata = (void *)1UL;
+ dsubdir->d_fsdata = (void *)1UL;
ret = 0;
unlock:
inode_unlock(root->d_inode);
return ret;
}
+static void afs_dynroot_rm_one_dir(struct dentry *root, const char *name, size_t name_len)
+{
+ struct dentry *subdir;
+
+ /* Don't want to trigger a lookup call, which will re-add the cell */
+ subdir = try_lookup_one_len(name, root, name_len);
+ if (IS_ERR_OR_NULL(subdir)) {
+ _debug("lookup %ld", PTR_ERR(subdir));
+ return;
+ }
+
+ _debug("rmdir %pd %u", subdir, d_count(subdir));
+
+ if (subdir->d_fsdata) {
+ _debug("unpin %u", d_count(subdir));
+ subdir->d_fsdata = NULL;
+ dput(subdir);
+ }
+ dput(subdir);
+}
+
/*
* Remove a manually added cell mount directory.
* - The caller must hold net->proc_cells_lock
@@ -301,32 +283,141 @@ unlock:
void afs_dynroot_rmdir(struct afs_net *net, struct afs_cell *cell)
{
struct super_block *sb = net->dynroot_sb;
- struct dentry *root, *subdir;
+ char *dotname = cell->name - 1;
if (!sb || atomic_read(&sb->s_active) == 0)
return;
- root = sb->s_root;
- inode_lock(root->d_inode);
+ inode_lock(sb->s_root->d_inode);
+ afs_dynroot_rm_one_dir(sb->s_root, cell->name, cell->name_len);
+ afs_dynroot_rm_one_dir(sb->s_root, dotname, cell->name_len + 1);
+ inode_unlock(sb->s_root->d_inode);
+ _leave("");
+}
- /* Don't want to trigger a lookup call, which will re-add the cell */
- subdir = try_lookup_one_len(cell->name, root, cell->name_len);
- if (IS_ERR_OR_NULL(subdir)) {
- _debug("lookup %ld", PTR_ERR(subdir));
- goto no_dentry;
+static void afs_atcell_delayed_put_cell(void *arg)
+{
+ struct afs_cell *cell = arg;
+
+ afs_put_cell(cell, afs_cell_trace_put_atcell);
+}
+
+/*
+ * Read @cell or .@cell symlinks.
+ */
+static const char *afs_atcell_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *done)
+{
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_cell *cell;
+ struct afs_net *net = afs_i2net(inode);
+ const char *name;
+ bool dotted = vnode->fid.vnode == 3;
+
+ if (!net->ws_cell)
+ return ERR_PTR(-ENOENT);
+
+ down_read(&net->cells_lock);
+
+ cell = net->ws_cell;
+ if (dotted)
+ name = cell->name - 1;
+ else
+ name = cell->name;
+ afs_get_cell(cell, afs_cell_trace_get_atcell);
+ set_delayed_call(done, afs_atcell_delayed_put_cell, cell);
+
+ up_read(&net->cells_lock);
+ return name;
+}
+
+static const struct inode_operations afs_atcell_inode_operations = {
+ .get_link = afs_atcell_get_link,
+};
+
+/*
+ * Look up @cell or .@cell in a dynroot directory. This is a substitution for
+ * the local cell name for the net namespace.
+ */
+static struct dentry *afs_dynroot_create_symlink(struct dentry *root, const char *name)
+{
+ struct afs_vnode *vnode;
+ struct afs_fid fid = { .vnode = 2, .unique = 1, };
+ struct dentry *dentry;
+ struct inode *inode;
+
+ if (name[0] == '.')
+ fid.vnode = 3;
+
+ dentry = d_alloc_name(root, name);
+ if (!dentry)
+ return ERR_PTR(-ENOMEM);
+
+ inode = iget5_locked(dentry->d_sb, fid.vnode,
+ afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
+ if (!inode) {
+ dput(dentry);
+ return ERR_PTR(-ENOMEM);
}
- _debug("rmdir %pd %u", subdir, d_count(subdir));
+ vnode = AFS_FS_I(inode);
- if (subdir->d_fsdata) {
- _debug("unpin %u", d_count(subdir));
- subdir->d_fsdata = NULL;
- dput(subdir);
+ /* there shouldn't be an existing inode */
+ if (WARN_ON_ONCE(!(inode->i_state & I_NEW))) {
+ iput(inode);
+ dput(dentry);
+ return ERR_PTR(-EIO);
}
- dput(subdir);
-no_dentry:
+
+ netfs_inode_init(&vnode->netfs, NULL, false);
+ simple_inode_init_ts(inode);
+ set_nlink(inode, 1);
+ inode->i_size = 0;
+ inode->i_mode = S_IFLNK | 0555;
+ inode->i_op = &afs_atcell_inode_operations;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+ inode->i_flags |= S_NOATIME;
+
+ unlock_new_inode(inode);
+ d_splice_alias(inode, dentry);
+ return dentry;
+}
+
+/*
+ * Create @cell and .@cell symlinks.
+ */
+static int afs_dynroot_symlink(struct afs_net *net)
+{
+ struct super_block *sb = net->dynroot_sb;
+ struct dentry *root, *symlink, *dsymlink;
+ int ret;
+
+ /* Let the ->lookup op do the creation */
+ root = sb->s_root;
+ inode_lock(root->d_inode);
+ symlink = afs_dynroot_create_symlink(root, "@cell");
+ if (IS_ERR(symlink)) {
+ ret = PTR_ERR(symlink);
+ goto unlock;
+ }
+
+ dsymlink = afs_dynroot_create_symlink(root, ".@cell");
+ if (IS_ERR(dsymlink)) {
+ ret = PTR_ERR(dsymlink);
+ dput(symlink);
+ goto unlock;
+ }
+
+ /* Note that we're retaining extra refs on the dentries. */
+ symlink->d_fsdata = (void *)1UL;
+ dsymlink->d_fsdata = (void *)1UL;
+ ret = 0;
+unlock:
inode_unlock(root->d_inode);
- _leave("");
+ return ret;
}
/*
@@ -341,6 +432,10 @@ int afs_dynroot_populate(struct super_block *sb)
mutex_lock(&net->proc_cells_lock);
net->dynroot_sb = sb;
+ ret = afs_dynroot_symlink(net);
+ if (ret < 0)
+ goto error;
+
hlist_for_each_entry(cell, &net->proc_cells, proc_link) {
ret = afs_dynroot_mkdir(net, cell);
if (ret < 0)
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 6762eff97517..fc15497608c6 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -20,7 +20,6 @@
#include "internal.h"
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
-static int afs_symlink_read_folio(struct file *file, struct folio *folio);
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
@@ -61,13 +60,6 @@ const struct address_space_operations afs_file_aops = {
.writepages = afs_writepages,
};
-const struct address_space_operations afs_symlink_aops = {
- .read_folio = afs_symlink_read_folio,
- .release_folio = netfs_release_folio,
- .invalidate_folio = netfs_invalidate_folio,
- .migrate_folio = filemap_migrate_folio,
-};
-
static const struct vm_operations_struct afs_vm_ops = {
.open = afs_vm_open,
.close = afs_vm_close,
@@ -208,49 +200,12 @@ int afs_release(struct inode *inode, struct file *file)
return ret;
}
-/*
- * Allocate a new read record.
- */
-struct afs_read *afs_alloc_read(gfp_t gfp)
-{
- struct afs_read *req;
-
- req = kzalloc(sizeof(struct afs_read), gfp);
- if (req)
- refcount_set(&req->usage, 1);
-
- return req;
-}
-
-/*
- * Dispose of a ref to a read record.
- */
-void afs_put_read(struct afs_read *req)
-{
- if (refcount_dec_and_test(&req->usage)) {
- if (req->cleanup)
- req->cleanup(req);
- key_put(req->key);
- kfree(req);
- }
-}
-
static void afs_fetch_data_notify(struct afs_operation *op)
{
- struct afs_read *req = op->fetch.req;
- struct netfs_io_subrequest *subreq = req->subreq;
- int error = afs_op_error(op);
-
- req->error = error;
- if (subreq) {
- subreq->rreq->i_size = req->file_size;
- if (req->pos + req->actual_len >= req->file_size)
- __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
- netfs_read_subreq_terminated(subreq, error, false);
- req->subreq = NULL;
- } else if (req->done) {
- req->done(req);
- }
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
+
+ subreq->error = afs_op_error(op);
+ netfs_read_subreq_terminated(subreq);
}
static void afs_fetch_data_success(struct afs_operation *op)
@@ -260,7 +215,7 @@ static void afs_fetch_data_success(struct afs_operation *op)
_enter("op=%08x", op->debug_id);
afs_vnode_commit_status(op, &op->file[0]);
afs_stat_v(vnode, n_fetches);
- atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes);
+ atomic_long_add(op->fetch.subreq->transferred, &op->net->n_fetch_bytes);
afs_fetch_data_notify(op);
}
@@ -270,107 +225,188 @@ static void afs_fetch_data_aborted(struct afs_operation *op)
afs_fetch_data_notify(op);
}
-static void afs_fetch_data_put(struct afs_operation *op)
-{
- op->fetch.req->error = afs_op_error(op);
- afs_put_read(op->fetch.req);
-}
-
-static const struct afs_operation_ops afs_fetch_data_operation = {
+const struct afs_operation_ops afs_fetch_data_operation = {
.issue_afs_rpc = afs_fs_fetch_data,
.issue_yfs_rpc = yfs_fs_fetch_data,
.success = afs_fetch_data_success,
.aborted = afs_fetch_data_aborted,
.failed = afs_fetch_data_notify,
- .put = afs_fetch_data_put,
};
+static void afs_issue_read_call(struct afs_operation *op)
+{
+ op->call_responded = false;
+ op->call_error = 0;
+ op->call_abort_code = 0;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags))
+ yfs_fs_fetch_data(op);
+ else
+ afs_fs_fetch_data(op);
+}
+
+static void afs_end_read(struct afs_operation *op)
+{
+ if (op->call_responded && op->server)
+ set_bit(AFS_SERVER_FL_RESPONDING, &op->server->flags);
+
+ if (!afs_op_error(op))
+ afs_fetch_data_success(op);
+ else if (op->cumul_error.aborted)
+ afs_fetch_data_aborted(op);
+ else
+ afs_fetch_data_notify(op);
+
+ afs_end_vnode_operation(op);
+ afs_put_operation(op);
+}
+
+/*
+ * Perform I/O processing on an asynchronous call. The work item carries a ref
+ * to the call struct that we either need to release or to pass on.
+ */
+static void afs_read_receive(struct afs_call *call)
+{
+ struct afs_operation *op = call->op;
+ enum afs_call_state state;
+
+ _enter("");
+
+ state = READ_ONCE(call->state);
+ if (state == AFS_CALL_COMPLETE)
+ return;
+ trace_afs_read_recv(op, call);
+
+ while (state < AFS_CALL_COMPLETE && READ_ONCE(call->need_attention)) {
+ WRITE_ONCE(call->need_attention, false);
+ afs_deliver_to_call(call);
+ state = READ_ONCE(call->state);
+ }
+
+ if (state < AFS_CALL_COMPLETE) {
+ netfs_read_subreq_progress(op->fetch.subreq);
+ if (rxrpc_kernel_check_life(call->net->socket, call->rxcall))
+ return;
+ /* rxrpc terminated the call. */
+ afs_set_call_complete(call, call->error, call->abort_code);
+ }
+
+ op->call_abort_code = call->abort_code;
+ op->call_error = call->error;
+ op->call_responded = call->responded;
+ op->call = NULL;
+ call->op = NULL;
+ afs_put_call(call);
+
+ /* If the call failed, then we need to crank the server rotation
+ * handle and try the next.
+ */
+ if (afs_select_fileserver(op)) {
+ afs_issue_read_call(op);
+ return;
+ }
+
+ afs_end_read(op);
+}
+
+void afs_fetch_data_async_rx(struct work_struct *work)
+{
+ struct afs_call *call = container_of(work, struct afs_call, async_work);
+
+ afs_read_receive(call);
+ afs_put_call(call);
+}
+
+void afs_fetch_data_immediate_cancel(struct afs_call *call)
+{
+ if (call->async) {
+ afs_get_call(call, afs_call_trace_wake);
+ if (!queue_work(afs_async_calls, &call->async_work))
+ afs_deferred_put_call(call);
+ flush_work(&call->async_work);
+ }
+}
+
/*
* Fetch file data from the volume.
*/
-int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req)
+static void afs_issue_read(struct netfs_io_subrequest *subreq)
{
struct afs_operation *op;
+ struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
+ struct key *key = subreq->rreq->netfs_priv;
_enter("%s{%llx:%llu.%u},%x,,,",
vnode->volume->name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
- key_serial(req->key));
+ key_serial(key));
- op = afs_alloc_operation(req->key, vnode->volume);
+ op = afs_alloc_operation(key, vnode->volume);
if (IS_ERR(op)) {
- if (req->subreq)
- netfs_read_subreq_terminated(req->subreq, PTR_ERR(op), false);
- return PTR_ERR(op);
+ subreq->error = PTR_ERR(op);
+ netfs_read_subreq_terminated(subreq);
+ return;
}
afs_op_set_vnode(op, 0, vnode);
- op->fetch.req = afs_get_read(req);
+ op->fetch.subreq = subreq;
op->ops = &afs_fetch_data_operation;
- return afs_do_sync_operation(op);
-}
-
-static void afs_read_worker(struct work_struct *work)
-{
- struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work);
- struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
- struct afs_read *fsreq;
-
- fsreq = afs_alloc_read(GFP_NOFS);
- if (!fsreq)
- return netfs_read_subreq_terminated(subreq, -ENOMEM, false);
-
- fsreq->subreq = subreq;
- fsreq->pos = subreq->start + subreq->transferred;
- fsreq->len = subreq->len - subreq->transferred;
- fsreq->key = key_get(subreq->rreq->netfs_priv);
- fsreq->vnode = vnode;
- fsreq->iter = &subreq->io_iter;
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
- afs_fetch_data(fsreq->vnode, fsreq);
- afs_put_read(fsreq);
-}
-
-static void afs_issue_read(struct netfs_io_subrequest *subreq)
-{
- INIT_WORK(&subreq->work, afs_read_worker);
- queue_work(system_long_wq, &subreq->work);
-}
-static int afs_symlink_read_folio(struct file *file, struct folio *folio)
-{
- struct afs_vnode *vnode = AFS_FS_I(folio->mapping->host);
- struct afs_read *fsreq;
- int ret;
+ if (subreq->rreq->origin == NETFS_READAHEAD ||
+ subreq->rreq->iocb) {
+ op->flags |= AFS_OPERATION_ASYNC;
- fsreq = afs_alloc_read(GFP_NOFS);
- if (!fsreq)
- return -ENOMEM;
+ if (!afs_begin_vnode_operation(op)) {
+ subreq->error = afs_put_operation(op);
+ netfs_read_subreq_terminated(subreq);
+ return;
+ }
- fsreq->pos = folio_pos(folio);
- fsreq->len = folio_size(folio);
- fsreq->vnode = vnode;
- fsreq->iter = &fsreq->def_iter;
- iov_iter_xarray(&fsreq->def_iter, ITER_DEST, &folio->mapping->i_pages,
- fsreq->pos, fsreq->len);
+ if (!afs_select_fileserver(op)) {
+ afs_end_read(op);
+ return;
+ }
- ret = afs_fetch_data(fsreq->vnode, fsreq);
- if (ret == 0)
- folio_mark_uptodate(folio);
- folio_unlock(folio);
- return ret;
+ afs_issue_read_call(op);
+ } else {
+ afs_do_sync_operation(op);
+ }
}
static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
{
+ struct afs_vnode *vnode = AFS_FS_I(rreq->inode);
+
if (file)
rreq->netfs_priv = key_get(afs_file_key(file));
rreq->rsize = 256 * 1024;
rreq->wsize = 256 * 1024 * 1024;
+
+ switch (rreq->origin) {
+ case NETFS_READ_SINGLE:
+ if (!file) {
+ struct key *key = afs_request_key(vnode->volume->cell);
+
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+ rreq->netfs_priv = key;
+ }
+ break;
+ case NETFS_WRITEBACK:
+ case NETFS_WRITETHROUGH:
+ case NETFS_UNBUFFERED_WRITE:
+ case NETFS_DIO_WRITE:
+ if (S_ISREG(rreq->inode->i_mode))
+ rreq->io_streams[0].avail = true;
+ break;
+ case NETFS_WRITEBACK_SINGLE:
+ default:
+ break;
+ }
return 0;
}
diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
index 428721bbe4f6..8418813ee043 100644
--- a/fs/afs/fs_operation.c
+++ b/fs/afs/fs_operation.c
@@ -49,6 +49,105 @@ struct afs_operation *afs_alloc_operation(struct key *key, struct afs_volume *vo
return op;
}
+struct afs_io_locker {
+ struct list_head link;
+ struct task_struct *task;
+ unsigned long have_lock;
+};
+
+/*
+ * Unlock the I/O lock on a vnode.
+ */
+static void afs_unlock_for_io(struct afs_vnode *vnode)
+{
+ struct afs_io_locker *locker;
+
+ spin_lock(&vnode->lock);
+ locker = list_first_entry_or_null(&vnode->io_lock_waiters,
+ struct afs_io_locker, link);
+ if (locker) {
+ list_del(&locker->link);
+ smp_store_release(&locker->have_lock, 1); /* The unlock barrier. */
+ smp_mb__after_atomic(); /* Store have_lock before task state */
+ wake_up_process(locker->task);
+ } else {
+ clear_bit(AFS_VNODE_IO_LOCK, &vnode->flags);
+ }
+ spin_unlock(&vnode->lock);
+}
+
+/*
+ * Lock the I/O lock on a vnode uninterruptibly. We can't use an ordinary
+ * mutex as lockdep will complain if we unlock it in the wrong thread.
+ */
+static void afs_lock_for_io(struct afs_vnode *vnode)
+{
+ struct afs_io_locker myself = { .task = current, };
+
+ spin_lock(&vnode->lock);
+
+ if (!test_and_set_bit(AFS_VNODE_IO_LOCK, &vnode->flags)) {
+ spin_unlock(&vnode->lock);
+ return;
+ }
+
+ list_add_tail(&myself.link, &vnode->io_lock_waiters);
+ spin_unlock(&vnode->lock);
+
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (smp_load_acquire(&myself.have_lock)) /* The lock barrier */
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+}
+
+/*
+ * Lock the I/O lock on a vnode interruptibly. We can't use an ordinary mutex
+ * as lockdep will complain if we unlock it in the wrong thread.
+ */
+static int afs_lock_for_io_interruptible(struct afs_vnode *vnode)
+{
+ struct afs_io_locker myself = { .task = current, };
+ int ret = 0;
+
+ spin_lock(&vnode->lock);
+
+ if (!test_and_set_bit(AFS_VNODE_IO_LOCK, &vnode->flags)) {
+ spin_unlock(&vnode->lock);
+ return 0;
+ }
+
+ list_add_tail(&myself.link, &vnode->io_lock_waiters);
+ spin_unlock(&vnode->lock);
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (smp_load_acquire(&myself.have_lock) || /* The lock barrier */
+ signal_pending(current))
+ break;
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+
+ /* If we got a signal, try to transfer the lock onto the next
+ * waiter.
+ */
+ if (unlikely(signal_pending(current))) {
+ spin_lock(&vnode->lock);
+ if (myself.have_lock) {
+ spin_unlock(&vnode->lock);
+ afs_unlock_for_io(vnode);
+ } else {
+ list_del(&myself.link);
+ spin_unlock(&vnode->lock);
+ }
+ ret = -ERESTARTSYS;
+ }
+ return ret;
+}
+
/*
* Lock the vnode(s) being operated upon.
*/
@@ -60,7 +159,7 @@ static bool afs_get_io_locks(struct afs_operation *op)
_enter("");
if (op->flags & AFS_OPERATION_UNINTR) {
- mutex_lock(&vnode->io_lock);
+ afs_lock_for_io(vnode);
op->flags |= AFS_OPERATION_LOCK_0;
_leave(" = t [1]");
return true;
@@ -72,7 +171,7 @@ static bool afs_get_io_locks(struct afs_operation *op)
if (vnode2 > vnode)
swap(vnode, vnode2);
- if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
+ if (afs_lock_for_io_interruptible(vnode) < 0) {
afs_op_set_error(op, -ERESTARTSYS);
op->flags |= AFS_OPERATION_STOP;
_leave(" = f [I 0]");
@@ -81,10 +180,10 @@ static bool afs_get_io_locks(struct afs_operation *op)
op->flags |= AFS_OPERATION_LOCK_0;
if (vnode2) {
- if (mutex_lock_interruptible_nested(&vnode2->io_lock, 1) < 0) {
+ if (afs_lock_for_io_interruptible(vnode2) < 0) {
afs_op_set_error(op, -ERESTARTSYS);
op->flags |= AFS_OPERATION_STOP;
- mutex_unlock(&vnode->io_lock);
+ afs_unlock_for_io(vnode);
op->flags &= ~AFS_OPERATION_LOCK_0;
_leave(" = f [I 1]");
return false;
@@ -104,9 +203,9 @@ static void afs_drop_io_locks(struct afs_operation *op)
_enter("");
if (op->flags & AFS_OPERATION_LOCK_1)
- mutex_unlock(&vnode2->io_lock);
+ afs_unlock_for_io(vnode2);
if (op->flags & AFS_OPERATION_LOCK_0)
- mutex_unlock(&vnode->io_lock);
+ afs_unlock_for_io(vnode);
}
static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *vp,
@@ -157,7 +256,7 @@ bool afs_begin_vnode_operation(struct afs_operation *op)
/*
* Tidy up a filesystem cursor and unlock the vnode.
*/
-static void afs_end_vnode_operation(struct afs_operation *op)
+void afs_end_vnode_operation(struct afs_operation *op)
{
_enter("");
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 098fa034a1cc..1d9ecd5418d8 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -301,19 +301,19 @@ void afs_fs_fetch_status(struct afs_operation *op)
static int afs_deliver_fs_fetch_data(struct afs_call *call)
{
struct afs_operation *op = call->op;
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
- struct afs_read *req = op->fetch.req;
const __be32 *bp;
size_t count_before;
int ret;
_enter("{%u,%zu,%zu/%llu}",
call->unmarshall, call->iov_len, iov_iter_count(call->iter),
- req->actual_len);
+ call->remaining);
switch (call->unmarshall) {
case 0:
- req->actual_len = 0;
+ call->remaining = 0;
call->unmarshall++;
if (call->operation_ID == FSFETCHDATA64) {
afs_extract_to_tmp64(call);
@@ -323,8 +323,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
}
fallthrough;
- /* Extract the returned data length into
- * ->actual_len. This may indicate more or less data than was
+ /* Extract the returned data length into ->remaining.
+ * This may indicate more or less data than was
* requested will be returned.
*/
case 1:
@@ -333,42 +333,40 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
if (ret < 0)
return ret;
- req->actual_len = be64_to_cpu(call->tmp64);
- _debug("DATA length: %llu", req->actual_len);
+ call->remaining = be64_to_cpu(call->tmp64);
+ _debug("DATA length: %llu", call->remaining);
- if (req->actual_len == 0)
+ if (call->remaining == 0)
goto no_more_data;
- call->iter = req->iter;
- call->iov_len = min(req->actual_len, req->len);
+ call->iter = &subreq->io_iter;
+ call->iov_len = umin(call->remaining, subreq->len - subreq->transferred);
call->unmarshall++;
fallthrough;
/* extract the returned data */
case 2:
count_before = call->iov_len;
- _debug("extract data %zu/%llu", count_before, req->actual_len);
+ _debug("extract data %zu/%llu", count_before, call->remaining);
ret = afs_extract_data(call, true);
- if (req->subreq) {
- req->subreq->transferred += count_before - call->iov_len;
- netfs_read_subreq_progress(req->subreq, false);
- }
+ subreq->transferred += count_before - call->iov_len;
+ call->remaining -= count_before - call->iov_len;
if (ret < 0)
return ret;
call->iter = &call->def_iter;
- if (req->actual_len <= req->len)
+ if (call->remaining)
goto no_more_data;
/* Discard any excess data the server gave us */
- afs_extract_discard(call, req->actual_len - req->len);
+ afs_extract_discard(call, call->remaining);
call->unmarshall = 3;
fallthrough;
case 3:
_debug("extract discard %zu/%llu",
- iov_iter_count(call->iter), req->actual_len - req->len);
+ iov_iter_count(call->iter), call->remaining);
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -390,8 +388,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
xdr_decode_AFSCallBack(&bp, call, &vp->scb);
xdr_decode_AFSVolSync(&bp, &op->volsync);
- req->data_version = vp->scb.status.data_version;
- req->file_size = vp->scb.status.size;
+ if (subreq->start + subreq->transferred >= vp->scb.status.size)
+ __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
call->unmarshall++;
fallthrough;
@@ -410,14 +408,18 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
static const struct afs_call_type afs_RXFSFetchData = {
.name = "FS.FetchData",
.op = afs_FS_FetchData,
+ .async_rx = afs_fetch_data_async_rx,
.deliver = afs_deliver_fs_fetch_data,
+ .immediate_cancel = afs_fetch_data_immediate_cancel,
.destructor = afs_flat_call_destructor,
};
static const struct afs_call_type afs_RXFSFetchData64 = {
.name = "FS.FetchData64",
.op = afs_FS_FetchData64,
+ .async_rx = afs_fetch_data_async_rx,
.deliver = afs_deliver_fs_fetch_data,
+ .immediate_cancel = afs_fetch_data_immediate_cancel,
.destructor = afs_flat_call_destructor,
};
@@ -426,8 +428,8 @@ static const struct afs_call_type afs_RXFSFetchData64 = {
*/
static void afs_fs_fetch_data64(struct afs_operation *op)
{
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
- struct afs_read *req = op->fetch.req;
struct afs_call *call;
__be32 *bp;
@@ -437,16 +439,19 @@ static void afs_fs_fetch_data64(struct afs_operation *op)
if (!call)
return afs_op_nomem(op);
+ if (op->flags & AFS_OPERATION_ASYNC)
+ call->async = true;
+
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSFETCHDATA64);
bp[1] = htonl(vp->fid.vid);
bp[2] = htonl(vp->fid.vnode);
bp[3] = htonl(vp->fid.unique);
- bp[4] = htonl(upper_32_bits(req->pos));
- bp[5] = htonl(lower_32_bits(req->pos));
+ bp[4] = htonl(upper_32_bits(subreq->start + subreq->transferred));
+ bp[5] = htonl(lower_32_bits(subreq->start + subreq->transferred));
bp[6] = 0;
- bp[7] = htonl(lower_32_bits(req->len));
+ bp[7] = htonl(lower_32_bits(subreq->len - subreq->transferred));
call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
@@ -458,9 +463,9 @@ static void afs_fs_fetch_data64(struct afs_operation *op)
*/
void afs_fs_fetch_data(struct afs_operation *op)
{
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_read *req = op->fetch.req;
__be32 *bp;
if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
@@ -472,16 +477,14 @@ void afs_fs_fetch_data(struct afs_operation *op)
if (!call)
return afs_op_nomem(op);
- req->call_debug_id = call->debug_id;
-
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSFETCHDATA);
bp[1] = htonl(vp->fid.vid);
bp[2] = htonl(vp->fid.vnode);
bp[3] = htonl(vp->fid.unique);
- bp[4] = htonl(lower_32_bits(req->pos));
- bp[5] = htonl(lower_32_bits(req->len));
+ bp[4] = htonl(lower_32_bits(subreq->start + subreq->transferred));
+ bp[5] = htonl(lower_32_bits(subreq->len + subreq->transferred));
call->fid = vp->fid;
trace_afs_make_fs_call(call, &vp->fid);
@@ -1733,6 +1736,7 @@ static const struct afs_call_type afs_RXFSGetCapabilities = {
.op = afs_FS_GetCapabilities,
.deliver = afs_deliver_fs_get_capabilities,
.done = afs_fileserver_probe_result,
+ .immediate_cancel = afs_fileserver_probe_result,
.destructor = afs_fs_get_capabilities_destructor,
};
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index a95e77670b49..e9538e91f848 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -25,8 +25,94 @@
#include "internal.h"
#include "afs_fs.h"
+void afs_init_new_symlink(struct afs_vnode *vnode, struct afs_operation *op)
+{
+ size_t size = strlen(op->create.symlink) + 1;
+ size_t dsize = 0;
+ char *p;
+
+ if (netfs_alloc_folioq_buffer(NULL, &vnode->directory, &dsize, size,
+ mapping_gfp_mask(vnode->netfs.inode.i_mapping)) < 0)
+ return;
+
+ vnode->directory_size = dsize;
+ p = kmap_local_folio(folioq_folio(vnode->directory, 0), 0);
+ memcpy(p, op->create.symlink, size);
+ kunmap_local(p);
+ set_bit(AFS_VNODE_DIR_READ, &vnode->flags);
+ netfs_single_mark_inode_dirty(&vnode->netfs.inode);
+}
+
+static void afs_put_link(void *arg)
+{
+ struct folio *folio = virt_to_folio(arg);
+
+ kunmap_local(arg);
+ folio_put(folio);
+}
+
+const char *afs_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback)
+{
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct folio *folio;
+ char *content;
+ ssize_t ret;
+
+ if (!dentry) {
+ /* RCU pathwalk. */
+ if (!test_bit(AFS_VNODE_DIR_READ, &vnode->flags) || !afs_check_validity(vnode))
+ return ERR_PTR(-ECHILD);
+ goto good;
+ }
+
+ if (test_bit(AFS_VNODE_DIR_READ, &vnode->flags))
+ goto fetch;
+
+ ret = afs_validate(vnode, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (!test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
+ test_bit(AFS_VNODE_DIR_READ, &vnode->flags))
+ goto good;
+
+fetch:
+ ret = afs_read_single(vnode, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ set_bit(AFS_VNODE_DIR_READ, &vnode->flags);
+
+good:
+ folio = folioq_folio(vnode->directory, 0);
+ folio_get(folio);
+ content = kmap_local_folio(folio, 0);
+ set_delayed_call(callback, afs_put_link, content);
+ return content;
+}
+
+int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
+{
+ DEFINE_DELAYED_CALL(done);
+ const char *content;
+ int len;
+
+ content = afs_get_link(dentry, d_inode(dentry), &done);
+ if (IS_ERR(content)) {
+ do_delayed_call(&done);
+ return PTR_ERR(content);
+ }
+
+ len = umin(strlen(content), buflen);
+ if (copy_to_user(buffer, content, len))
+ len = -EFAULT;
+ do_delayed_call(&done);
+ return len;
+}
+
static const struct inode_operations afs_symlink_inode_operations = {
- .get_link = page_get_link,
+ .get_link = afs_get_link,
+ .readlink = afs_readlink,
};
static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *parent_vnode)
@@ -110,7 +196,9 @@ static int afs_inode_init_from_status(struct afs_operation *op,
inode->i_op = &afs_dir_inode_operations;
inode->i_fop = &afs_dir_file_operations;
inode->i_mapping->a_ops = &afs_dir_aops;
- mapping_set_large_folios(inode->i_mapping);
+ __set_bit(NETFS_ICTX_SINGLE_NO_UPLOAD, &vnode->netfs.flags);
+ /* Assume locally cached directory data will be valid. */
+ __set_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
break;
case AFS_FTYPE_SYMLINK:
/* Symlinks with a mode of 0644 are actually mountpoints. */
@@ -122,13 +210,13 @@ static int afs_inode_init_from_status(struct afs_operation *op,
inode->i_mode = S_IFDIR | 0555;
inode->i_op = &afs_mntpt_inode_operations;
inode->i_fop = &afs_mntpt_file_operations;
- inode->i_mapping->a_ops = &afs_symlink_aops;
} else {
inode->i_mode = S_IFLNK | status->mode;
inode->i_op = &afs_symlink_inode_operations;
- inode->i_mapping->a_ops = &afs_symlink_aops;
}
+ inode->i_mapping->a_ops = &afs_dir_aops;
inode_nohighmem(inode);
+ mapping_set_release_always(inode->i_mapping);
break;
default:
dump_vnode(vnode, op->file[0].vnode != vnode ? op->file[0].vnode : NULL);
@@ -140,15 +228,17 @@ static int afs_inode_init_from_status(struct afs_operation *op,
afs_set_netfs_context(vnode);
vnode->invalid_before = status->data_version;
+ trace_afs_set_dv(vnode, status->data_version);
inode_set_iversion_raw(&vnode->netfs.inode, status->data_version);
if (!vp->scb.have_cb) {
/* it's a symlink we just created (the fileserver
* didn't give us a callback) */
- atomic64_set(&vnode->cb_expires_at, AFS_NO_CB_PROMISE);
+ afs_clear_cb_promise(vnode, afs_cb_promise_set_new_symlink);
} else {
vnode->cb_server = op->server;
- atomic64_set(&vnode->cb_expires_at, vp->scb.callback.expires_at);
+ afs_set_cb_promise(vnode, vp->scb.callback.expires_at,
+ afs_cb_promise_set_new_inode);
}
write_sequnlock(&vnode->cb_lock);
@@ -207,12 +297,17 @@ static void afs_apply_status(struct afs_operation *op,
if (vp->update_ctime)
inode_set_ctime_to_ts(inode, op->ctime);
- if (vnode->status.data_version != status->data_version)
+ if (vnode->status.data_version != status->data_version) {
+ trace_afs_set_dv(vnode, status->data_version);
data_changed = true;
+ }
vnode->status = *status;
if (vp->dv_before + vp->dv_delta != status->data_version) {
+ trace_afs_dv_mismatch(vnode, vp->dv_before, vp->dv_delta,
+ status->data_version);
+
if (vnode->cb_ro_snapshot == atomic_read(&vnode->volume->cb_ro_snapshot) &&
atomic64_read(&vnode->cb_expires_at) != AFS_NO_CB_PROMISE)
pr_warn("kAFS: vnode modified {%llx:%llu} %llx->%llx %s (op=%x)\n",
@@ -223,12 +318,10 @@ static void afs_apply_status(struct afs_operation *op,
op->debug_id);
vnode->invalid_before = status->data_version;
- if (vnode->status.type == AFS_FTYPE_DIR) {
- if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
- afs_stat_v(vnode, n_inval);
- } else {
+ if (vnode->status.type == AFS_FTYPE_DIR)
+ afs_invalidate_dir(vnode, afs_dir_invalid_dv_mismatch);
+ else
set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
- }
change_size = true;
data_changed = true;
unexpected_jump = true;
@@ -258,6 +351,8 @@ static void afs_apply_status(struct afs_operation *op,
inode_set_ctime_to_ts(inode, t);
inode_set_atime_to_ts(inode, t);
}
+ if (op->ops == &afs_fetch_data_operation)
+ op->fetch.subreq->rreq->i_size = status->size;
}
}
@@ -273,7 +368,7 @@ static void afs_apply_callback(struct afs_operation *op,
if (!afs_cb_is_broken(vp->cb_break_before, vnode)) {
if (op->volume->type == AFSVL_RWVOL)
vnode->cb_server = op->server;
- atomic64_set(&vnode->cb_expires_at, cb->expires_at);
+ afs_set_cb_promise(vnode, cb->expires_at, afs_cb_promise_set_apply_cb);
}
}
@@ -435,7 +530,9 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
} __packed key;
struct afs_vnode_cache_aux aux;
- if (vnode->status.type != AFS_FTYPE_FILE) {
+ if (vnode->status.type != AFS_FTYPE_FILE &&
+ vnode->status.type != AFS_FTYPE_DIR &&
+ vnode->status.type != AFS_FTYPE_SYMLINK) {
vnode->netfs.cache = NULL;
return;
}
@@ -637,6 +734,7 @@ int afs_drop_inode(struct inode *inode)
void afs_evict_inode(struct inode *inode)
{
struct afs_vnode_cache_aux aux;
+ struct afs_super_info *sbi = AFS_FS_S(inode->i_sb);
struct afs_vnode *vnode = AFS_FS_I(inode);
_enter("{%llx:%llu.%d}",
@@ -648,8 +746,22 @@ void afs_evict_inode(struct inode *inode)
ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
+ if ((S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode)) &&
+ (inode->i_state & I_DIRTY) &&
+ !sbi->dyn_root) {
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .for_sync = true,
+ .range_end = LLONG_MAX,
+ };
+
+ afs_single_writepages(inode->i_mapping, &wbc);
+ }
+
netfs_wait_for_outstanding_io(inode);
truncate_inode_pages_final(&inode->i_data);
+ netfs_free_folioq_buffer(vnode->directory);
afs_set_cache_aux(vnode, &aux);
netfs_clear_inode_writeback(inode, &aux);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index c9d620175e80..90f407774a9a 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -163,6 +163,7 @@ struct afs_call {
spinlock_t state_lock;
int error; /* error code */
u32 abort_code; /* Remote abort ID or 0 */
+ unsigned long long remaining; /* How much is left to receive */
unsigned int max_lifespan; /* Maximum lifespan in secs to set if not 0 */
unsigned request_size; /* size of request data */
unsigned reply_max; /* maximum size of reply */
@@ -201,11 +202,17 @@ struct afs_call_type {
/* clean up a call */
void (*destructor)(struct afs_call *call);
+ /* Async receive processing function */
+ void (*async_rx)(struct work_struct *work);
+
/* Work function */
void (*work)(struct work_struct *work);
/* Call done function (gets called immediately on success or failure) */
void (*done)(struct afs_call *call);
+
+ /* Handle a call being immediately cancelled. */
+ void (*immediate_cancel)(struct afs_call *call);
};
/*
@@ -233,28 +240,6 @@ static inline struct key *afs_file_key(struct file *file)
}
/*
- * Record of an outstanding read operation on a vnode.
- */
-struct afs_read {
- loff_t pos; /* Where to start reading */
- loff_t len; /* How much we're asking for */
- loff_t actual_len; /* How much we're actually getting */
- loff_t file_size; /* File size returned by server */
- struct key *key; /* The key to use to reissue the read */
- struct afs_vnode *vnode; /* The file being read into. */
- struct netfs_io_subrequest *subreq; /* Fscache helper read request this belongs to */
- afs_dataversion_t data_version; /* Version number returned by server */
- refcount_t usage;
- unsigned int call_debug_id;
- unsigned int nr_pages;
- int error;
- void (*done)(struct afs_read *);
- void (*cleanup)(struct afs_read *);
- struct iov_iter *iter; /* Iterator representing the buffer */
- struct iov_iter def_iter; /* Default iterator */
-};
-
-/*
* AFS superblock private data
* - there's one superblock per volume
*/
@@ -702,13 +687,14 @@ struct afs_vnode {
struct afs_file_status status; /* AFS status info for this file */
afs_dataversion_t invalid_before; /* Child dentries are invalid before this */
struct afs_permits __rcu *permit_cache; /* cache of permits so far obtained */
- struct mutex io_lock; /* Lock for serialising I/O on this mutex */
+ struct list_head io_lock_waiters; /* Threads waiting for the I/O lock */
struct rw_semaphore validate_lock; /* lock for validating this vnode */
struct rw_semaphore rmdir_lock; /* Lock for rmdir vs sillyrename */
struct key *silly_key; /* Silly rename key */
spinlock_t wb_lock; /* lock for wb_keys */
spinlock_t lock; /* waitqueue/flags lock */
unsigned long flags;
+#define AFS_VNODE_IO_LOCK 0 /* Set if the I/O serialisation lock is held */
#define AFS_VNODE_UNSET 1 /* set if vnode attributes not yet set */
#define AFS_VNODE_DIR_VALID 2 /* Set if dir contents are valid */
#define AFS_VNODE_ZAP_DATA 3 /* set if vnode's data should be invalidated */
@@ -719,7 +705,9 @@ struct afs_vnode {
#define AFS_VNODE_NEW_CONTENT 8 /* Set if file has new content (create/trunc-0) */
#define AFS_VNODE_SILLY_DELETED 9 /* Set if file has been silly-deleted */
#define AFS_VNODE_MODIFYING 10 /* Set if we're performing a modification op */
+#define AFS_VNODE_DIR_READ 11 /* Set if we've read a dir's contents */
+ struct folio_queue *directory; /* Directory contents */
struct list_head wb_keys; /* List of keys available for writeback */
struct list_head pending_locks; /* locks waiting to be granted */
struct list_head granted_locks; /* locks granted on this file */
@@ -728,6 +716,7 @@ struct afs_vnode {
ktime_t locked_at; /* Time at which lock obtained */
enum afs_lock_state lock_state : 8;
afs_lock_type_t lock_type : 8;
+ unsigned int directory_size; /* Amount of space in ->directory */
/* outstanding callback notification on this file */
struct work_struct cb_work; /* Work for mmap'd files */
@@ -907,7 +896,7 @@ struct afs_operation {
bool new_negative;
} rename;
struct {
- struct afs_read *req;
+ struct netfs_io_subrequest *subreq;
} fetch;
struct {
afs_lock_type_t type;
@@ -959,6 +948,7 @@ struct afs_operation {
#define AFS_OPERATION_TRIED_ALL 0x0400 /* Set if we've tried all the fileservers */
#define AFS_OPERATION_RETRY_SERVER 0x0800 /* Set if we should retry the current server */
#define AFS_OPERATION_DIR_CONFLICT 0x1000 /* Set if we detected a 3rd-party dir change */
+#define AFS_OPERATION_ASYNC 0x2000 /* Set if should run asynchronously */
};
/*
@@ -983,6 +973,21 @@ static inline void afs_invalidate_cache(struct afs_vnode *vnode, unsigned int fl
i_size_read(&vnode->netfs.inode), flags);
}
+/*
+ * Directory iteration management.
+ */
+struct afs_dir_iter {
+ struct afs_vnode *dvnode;
+ union afs_xdr_dir_block *block;
+ struct folio_queue *fq;
+ unsigned int fpos;
+ int fq_slot;
+ unsigned int loop_check;
+ u8 nr_slots;
+ u8 bucket;
+ unsigned int prev_entry;
+};
+
#include <trace/events/afs.h>
/*****************************************************************************/
@@ -1064,8 +1069,13 @@ extern const struct inode_operations afs_dir_inode_operations;
extern const struct address_space_operations afs_dir_aops;
extern const struct dentry_operations afs_fs_dentry_operations;
+ssize_t afs_read_single(struct afs_vnode *dvnode, struct file *file);
+ssize_t afs_read_dir(struct afs_vnode *dvnode, struct file *file)
+ __acquires(&dvnode->validate_lock);
extern void afs_d_release(struct dentry *);
extern void afs_check_for_remote_deletion(struct afs_operation *);
+int afs_single_writepages(struct address_space *mapping,
+ struct writeback_control *wbc);
/*
* dir_edit.c
@@ -1075,6 +1085,18 @@ extern void afs_edit_dir_add(struct afs_vnode *, struct qstr *, struct afs_fid *
extern void afs_edit_dir_remove(struct afs_vnode *, struct qstr *, enum afs_edit_dir_reason);
void afs_edit_dir_update_dotdot(struct afs_vnode *vnode, struct afs_vnode *new_dvnode,
enum afs_edit_dir_reason why);
+void afs_mkdir_init_dir(struct afs_vnode *dvnode, struct afs_vnode *parent_vnode);
+
+/*
+ * dir_search.c
+ */
+unsigned int afs_dir_hash_name(const struct qstr *name);
+bool afs_dir_init_iter(struct afs_dir_iter *iter, const struct qstr *name);
+union afs_xdr_dir_block *afs_dir_find_block(struct afs_dir_iter *iter, size_t block);
+int afs_dir_search_bucket(struct afs_dir_iter *iter, const struct qstr *name,
+ struct afs_fid *_fid);
+int afs_dir_search(struct afs_vnode *dvnode, struct qstr *name,
+ struct afs_fid *_fid, afs_dataversion_t *_dir_version);
/*
* dir_silly.c
@@ -1099,24 +1121,17 @@ extern void afs_dynroot_depopulate(struct super_block *);
* file.c
*/
extern const struct address_space_operations afs_file_aops;
-extern const struct address_space_operations afs_symlink_aops;
extern const struct inode_operations afs_file_inode_operations;
extern const struct file_operations afs_file_operations;
+extern const struct afs_operation_ops afs_fetch_data_operation;
extern const struct netfs_request_ops afs_req_ops;
extern int afs_cache_wb_key(struct afs_vnode *, struct afs_file *);
extern void afs_put_wb_key(struct afs_wb_key *);
extern int afs_open(struct inode *, struct file *);
extern int afs_release(struct inode *, struct file *);
-extern int afs_fetch_data(struct afs_vnode *, struct afs_read *);
-extern struct afs_read *afs_alloc_read(gfp_t);
-extern void afs_put_read(struct afs_read *);
-
-static inline struct afs_read *afs_get_read(struct afs_read *req)
-{
- refcount_inc(&req->usage);
- return req;
-}
+void afs_fetch_data_async_rx(struct work_struct *work);
+void afs_fetch_data_immediate_cancel(struct afs_call *call);
/*
* flock.c
@@ -1168,6 +1183,7 @@ extern void afs_fs_store_acl(struct afs_operation *);
extern struct afs_operation *afs_alloc_operation(struct key *, struct afs_volume *);
extern int afs_put_operation(struct afs_operation *);
extern bool afs_begin_vnode_operation(struct afs_operation *);
+extern void afs_end_vnode_operation(struct afs_operation *op);
extern void afs_wait_for_operation(struct afs_operation *);
extern int afs_do_sync_operation(struct afs_operation *);
@@ -1205,6 +1221,10 @@ extern void afs_fs_probe_cleanup(struct afs_net *);
*/
extern const struct afs_operation_ops afs_fetch_status_operation;
+void afs_init_new_symlink(struct afs_vnode *vnode, struct afs_operation *op);
+const char *afs_get_link(struct dentry *dentry, struct inode *inode,
+ struct delayed_call *callback);
+int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen);
extern void afs_vnode_commit_status(struct afs_operation *, struct afs_vnode_param *);
extern int afs_fetch_status(struct afs_vnode *, struct key *, bool, afs_access_t *);
extern int afs_ilookup5_test_by_fid(struct inode *, void *);
@@ -1336,6 +1356,7 @@ extern void afs_charge_preallocation(struct work_struct *);
extern void afs_put_call(struct afs_call *);
void afs_deferred_put_call(struct afs_call *call);
void afs_make_call(struct afs_call *call, gfp_t gfp);
+void afs_deliver_to_call(struct afs_call *call);
void afs_wait_for_call_to_complete(struct afs_call *call);
extern struct afs_call *afs_alloc_flat_call(struct afs_net *,
const struct afs_call_type *,
@@ -1346,6 +1367,28 @@ extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
extern int afs_extract_data(struct afs_call *, bool);
extern int afs_protocol_error(struct afs_call *, enum afs_eproto_cause);
+static inline struct afs_call *afs_get_call(struct afs_call *call,
+ enum afs_call_trace why)
+{
+ int r;
+
+ __refcount_inc(&call->ref, &r);
+
+ trace_afs_call(call->debug_id, why, r + 1,
+ atomic_read(&call->net->nr_outstanding_calls),
+ __builtin_return_address(0));
+ return call;
+}
+
+static inline void afs_see_call(struct afs_call *call, enum afs_call_trace why)
+{
+ int r = refcount_read(&call->ref);
+
+ trace_afs_call(call->debug_id, why, r,
+ atomic_read(&call->net->nr_outstanding_calls),
+ __builtin_return_address(0));
+}
+
static inline void afs_make_op_call(struct afs_operation *op, struct afs_call *call,
gfp_t gfp)
{
@@ -1712,6 +1755,38 @@ static inline int afs_bad(struct afs_vnode *vnode, enum afs_file_error where)
return -EIO;
}
+/*
+ * Set the callback promise on a vnode.
+ */
+static inline void afs_set_cb_promise(struct afs_vnode *vnode, time64_t expires_at,
+ enum afs_cb_promise_trace trace)
+{
+ atomic64_set(&vnode->cb_expires_at, expires_at);
+ trace_afs_cb_promise(vnode, trace);
+}
+
+/*
+ * Clear the callback promise on a vnode, returning true if it was promised.
+ */
+static inline bool afs_clear_cb_promise(struct afs_vnode *vnode,
+ enum afs_cb_promise_trace trace)
+{
+ trace_afs_cb_promise(vnode, trace);
+ return atomic64_xchg(&vnode->cb_expires_at, AFS_NO_CB_PROMISE) != AFS_NO_CB_PROMISE;
+}
+
+/*
+ * Mark a directory as being invalid.
+ */
+static inline void afs_invalidate_dir(struct afs_vnode *dvnode,
+ enum afs_dir_invalid_trace trace)
+{
+ if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
+ trace_afs_dir_invalid(dvnode, trace);
+ afs_stat_v(dvnode, n_inval);
+ }
+}
+
/*****************************************************************************/
/*
* debug tracing
diff --git a/fs/afs/main.c b/fs/afs/main.c
index a14f6013e316..1ae0067f772d 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -177,7 +177,7 @@ static int __init afs_init(void)
afs_wq = alloc_workqueue("afs", 0, 0);
if (!afs_wq)
goto error_afs_wq;
- afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM, 0);
+ afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!afs_async_calls)
goto error_async;
afs_lock_manager = alloc_workqueue("kafs_lockd", WQ_MEM_RECLAIM, 0);
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 297487ee8323..507c25a5b2cb 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -30,7 +30,7 @@ const struct file_operations afs_mntpt_file_operations = {
const struct inode_operations afs_mntpt_inode_operations = {
.lookup = afs_mntpt_lookup,
- .readlink = page_readlink,
+ .readlink = afs_readlink,
.getattr = afs_getattr,
};
@@ -118,9 +118,9 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
ctx->volnamesz = sizeof(afs_root_volume) - 1;
} else {
/* read the contents of the AFS special symlink */
- struct page *page;
+ DEFINE_DELAYED_CALL(cleanup);
+ const char *content;
loff_t size = i_size_read(d_inode(mntpt));
- char *buf;
if (src_as->cell)
ctx->cell = afs_use_cell(src_as->cell, afs_cell_trace_use_mntpt);
@@ -128,16 +128,16 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
if (size < 2 || size > PAGE_SIZE - 1)
return -EINVAL;
- page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ content = afs_get_link(mntpt, d_inode(mntpt), &cleanup);
+ if (IS_ERR(content)) {
+ do_delayed_call(&cleanup);
+ return PTR_ERR(content);
+ }
- buf = kmap(page);
ret = -EINVAL;
- if (buf[size - 1] == '.')
- ret = vfs_parse_fs_string(fc, "source", buf, size - 1);
- kunmap(page);
- put_page(page);
+ if (content[size - 1] == '.')
+ ret = vfs_parse_fs_string(fc, "source", content, size - 1);
+ do_delayed_call(&cleanup);
if (ret < 0)
return ret;
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 15eab053af6d..e7614f4f30c2 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -240,7 +240,13 @@ static int afs_proc_rootcell_write(struct file *file, char *buf, size_t size)
/* determine command to perform */
_debug("rootcell=%s", buf);
- ret = afs_cell_init(net, buf);
+ ret = -EEXIST;
+ inode_lock(file_inode(file));
+ if (!net->ws_cell)
+ ret = afs_cell_init(net, buf);
+ else
+ printk("busy\n");
+ inode_unlock(file_inode(file));
out:
_leave(" = %d", ret);
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index d612983d6f38..a1c24f589d9e 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -99,7 +99,7 @@ static bool afs_start_fs_iteration(struct afs_operation *op,
write_seqlock(&vnode->cb_lock);
ASSERTCMP(cb_server, ==, vnode->cb_server);
vnode->cb_server = NULL;
- if (atomic64_xchg(&vnode->cb_expires_at, AFS_NO_CB_PROMISE) != AFS_NO_CB_PROMISE)
+ if (afs_clear_cb_promise(vnode, afs_cb_promise_clear_rotate_server))
vnode->cb_break++;
write_sequnlock(&vnode->cb_lock);
}
@@ -583,7 +583,7 @@ selected_server:
if (vnode->cb_server != server) {
vnode->cb_server = server;
vnode->cb_v_check = atomic_read(&vnode->volume->cb_v_break);
- atomic64_set(&vnode->cb_expires_at, AFS_NO_CB_PROMISE);
+ afs_clear_cb_promise(vnode, afs_cb_promise_clear_server_change);
}
retry_server:
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 9f2a3bb56ec6..886416ea1d96 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -149,7 +149,8 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
call->net = net;
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
refcount_set(&call->ref, 1);
- INIT_WORK(&call->async_work, afs_process_async_call);
+ INIT_WORK(&call->async_work, type->async_rx ?: afs_process_async_call);
+ INIT_WORK(&call->work, call->type->work);
INIT_WORK(&call->free_work, afs_deferred_free_worker);
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->state_lock);
@@ -235,27 +236,12 @@ void afs_deferred_put_call(struct afs_call *call)
schedule_work(&call->free_work);
}
-static struct afs_call *afs_get_call(struct afs_call *call,
- enum afs_call_trace why)
-{
- int r;
-
- __refcount_inc(&call->ref, &r);
-
- trace_afs_call(call->debug_id, why, r + 1,
- atomic_read(&call->net->nr_outstanding_calls),
- __builtin_return_address(0));
- return call;
-}
-
/*
* Queue the call for actual work.
*/
static void afs_queue_call_work(struct afs_call *call)
{
if (call->type->work) {
- INIT_WORK(&call->work, call->type->work);
-
afs_get_call(call, afs_call_trace_work);
if (!queue_work(afs_wq, &call->work))
afs_put_call(call);
@@ -430,11 +416,16 @@ void afs_make_call(struct afs_call *call, gfp_t gfp)
return;
error_do_abort:
- if (ret != -ECONNABORTED) {
+ if (ret != -ECONNABORTED)
rxrpc_kernel_abort_call(call->net->socket, rxcall,
RX_USER_ABORT, ret,
afs_abort_send_data_error);
- } else {
+ if (call->async) {
+ afs_see_call(call, afs_call_trace_async_abort);
+ return;
+ }
+
+ if (ret == -ECONNABORTED) {
len = 0;
iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0);
rxrpc_kernel_recv_data(call->net->socket, rxcall,
@@ -445,8 +436,10 @@ error_do_abort:
call->error = ret;
trace_afs_call_done(call);
error_kill_call:
- if (call->type->done)
- call->type->done(call);
+ if (call->async)
+ afs_see_call(call, afs_call_trace_async_kill);
+ if (call->type->immediate_cancel)
+ call->type->immediate_cancel(call);
/* We need to dispose of the extra ref we grabbed for an async call.
* The call, however, might be queued on afs_async_calls and we need to
@@ -501,7 +494,7 @@ static void afs_log_error(struct afs_call *call, s32 remote_abort)
/*
* deliver messages to a call
*/
-static void afs_deliver_to_call(struct afs_call *call)
+void afs_deliver_to_call(struct afs_call *call)
{
enum afs_call_state state;
size_t len;
@@ -602,7 +595,6 @@ local_abort:
abort_code = 0;
call_complete:
afs_set_call_complete(call, ret, remote_abort);
- state = AFS_CALL_COMPLETE;
goto done;
}
@@ -803,6 +795,7 @@ static int afs_deliver_cm_op_id(struct afs_call *call)
return -ENOTSUPP;
trace_afs_cb_call(call);
+ call->work.func = call->type->work;
/* pass responsibility for the remainer of this message off to the
* cache manager op */
diff --git a/fs/afs/super.c b/fs/afs/super.c
index f3ba1c3e72f5..a9bee610674e 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -663,7 +663,7 @@ static void afs_i_init_once(void *_vnode)
memset(vnode, 0, sizeof(*vnode));
inode_init_once(&vnode->netfs.inode);
- mutex_init(&vnode->io_lock);
+ INIT_LIST_HEAD(&vnode->io_lock_waiters);
init_rwsem(&vnode->validate_lock);
spin_lock_init(&vnode->wb_lock);
spin_lock_init(&vnode->lock);
@@ -696,6 +696,8 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
vnode->volume = NULL;
vnode->lock_key = NULL;
vnode->permit_cache = NULL;
+ vnode->directory = NULL;
+ vnode->directory_size = 0;
vnode->flags = 1 << AFS_VNODE_UNSET;
vnode->lock_state = AFS_VNODE_LOCK_NONE;
diff --git a/fs/afs/validation.c b/fs/afs/validation.c
index bef8af12ebe2..0ba8336c9025 100644
--- a/fs/afs/validation.c
+++ b/fs/afs/validation.c
@@ -120,22 +120,31 @@
bool afs_check_validity(const struct afs_vnode *vnode)
{
const struct afs_volume *volume = vnode->volume;
+ enum afs_vnode_invalid_trace trace = afs_vnode_valid_trace;
+ time64_t cb_expires_at = atomic64_read(&vnode->cb_expires_at);
time64_t deadline = ktime_get_real_seconds() + 10;
if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
return true;
- if (atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break) ||
- atomic64_read(&vnode->cb_expires_at) <= deadline ||
- volume->cb_expires_at <= deadline ||
- vnode->cb_ro_snapshot != atomic_read(&volume->cb_ro_snapshot) ||
- vnode->cb_scrub != atomic_read(&volume->cb_scrub) ||
- test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
- _debug("inval");
- return false;
- }
-
- return true;
+ if (atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break))
+ trace = afs_vnode_invalid_trace_cb_v_break;
+ else if (cb_expires_at == AFS_NO_CB_PROMISE)
+ trace = afs_vnode_invalid_trace_no_cb_promise;
+ else if (cb_expires_at <= deadline)
+ trace = afs_vnode_invalid_trace_expired;
+ else if (volume->cb_expires_at <= deadline)
+ trace = afs_vnode_invalid_trace_vol_expired;
+ else if (vnode->cb_ro_snapshot != atomic_read(&volume->cb_ro_snapshot))
+ trace = afs_vnode_invalid_trace_cb_ro_snapshot;
+ else if (vnode->cb_scrub != atomic_read(&volume->cb_scrub))
+ trace = afs_vnode_invalid_trace_cb_scrub;
+ else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
+ trace = afs_vnode_invalid_trace_zap_data;
+ else
+ return true;
+ trace_afs_vnode_invalid(vnode, trace);
+ return false;
}
/*
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index 55dd0fc5aad7..3a23c0b08eb6 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -370,6 +370,7 @@ static const struct afs_call_type afs_RXVLGetCapabilities = {
.name = "VL.GetCapabilities",
.op = afs_VL_GetCapabilities,
.deliver = afs_deliver_vl_get_capabilities,
+ .immediate_cancel = afs_vlserver_probe_result,
.done = afs_vlserver_probe_result,
.destructor = afs_destroy_vl_get_capabilities,
};
diff --git a/fs/afs/write.c b/fs/afs/write.c
index ccb6aa8027c5..18b0a9f1615e 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -182,8 +182,8 @@ void afs_issue_write(struct netfs_io_subrequest *subreq)
*/
void afs_begin_writeback(struct netfs_io_request *wreq)
{
- afs_get_writeback_key(wreq);
- wreq->io_streams[0].avail = true;
+ if (S_ISREG(wreq->inode->i_mode))
+ afs_get_writeback_key(wreq);
}
/*
@@ -196,6 +196,18 @@ void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *st
list_first_entry(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
+ switch (wreq->origin) {
+ case NETFS_READAHEAD:
+ case NETFS_READPAGE:
+ case NETFS_READ_GAPS:
+ case NETFS_READ_SINGLE:
+ case NETFS_READ_FOR_WRITE:
+ case NETFS_DIO_READ:
+ return;
+ default:
+ break;
+ }
+
switch (subreq->error) {
case -EACCES:
case -EPERM:
diff --git a/fs/afs/xdr_fs.h b/fs/afs/xdr_fs.h
index 8ca868164507..cc5f143d21a3 100644
--- a/fs/afs/xdr_fs.h
+++ b/fs/afs/xdr_fs.h
@@ -88,7 +88,7 @@ union afs_xdr_dir_block {
struct {
struct afs_xdr_dir_hdr hdr;
- u8 alloc_ctrs[AFS_DIR_MAX_BLOCKS];
+ u8 alloc_ctrs[AFS_DIR_BLOCKS_WITH_CTR];
__be16 hashtable[AFS_DIR_HASHTBL_SIZE];
} meta;
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 024227aba4cd..257af259c04a 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -352,19 +352,19 @@ static int yfs_deliver_status_and_volsync(struct afs_call *call)
static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
{
struct afs_operation *op = call->op;
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
- struct afs_read *req = op->fetch.req;
const __be32 *bp;
size_t count_before;
int ret;
_enter("{%u,%zu, %zu/%llu}",
call->unmarshall, call->iov_len, iov_iter_count(call->iter),
- req->actual_len);
+ call->remaining);
switch (call->unmarshall) {
case 0:
- req->actual_len = 0;
+ call->remaining = 0;
afs_extract_to_tmp64(call);
call->unmarshall++;
fallthrough;
@@ -379,42 +379,39 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
if (ret < 0)
return ret;
- req->actual_len = be64_to_cpu(call->tmp64);
- _debug("DATA length: %llu", req->actual_len);
+ call->remaining = be64_to_cpu(call->tmp64);
+ _debug("DATA length: %llu", call->remaining);
- if (req->actual_len == 0)
+ if (call->remaining == 0)
goto no_more_data;
- call->iter = req->iter;
- call->iov_len = min(req->actual_len, req->len);
+ call->iter = &subreq->io_iter;
+ call->iov_len = min(call->remaining, subreq->len - subreq->transferred);
call->unmarshall++;
fallthrough;
/* extract the returned data */
case 2:
count_before = call->iov_len;
- _debug("extract data %zu/%llu", count_before, req->actual_len);
+ _debug("extract data %zu/%llu", count_before, call->remaining);
ret = afs_extract_data(call, true);
- if (req->subreq) {
- req->subreq->transferred += count_before - call->iov_len;
- netfs_read_subreq_progress(req->subreq, false);
- }
+ subreq->transferred += count_before - call->iov_len;
if (ret < 0)
return ret;
call->iter = &call->def_iter;
- if (req->actual_len <= req->len)
+ if (call->remaining)
goto no_more_data;
/* Discard any excess data the server gave us */
- afs_extract_discard(call, req->actual_len - req->len);
+ afs_extract_discard(call, call->remaining);
call->unmarshall = 3;
fallthrough;
case 3:
_debug("extract discard %zu/%llu",
- iov_iter_count(call->iter), req->actual_len - req->len);
+ iov_iter_count(call->iter), call->remaining);
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -439,8 +436,8 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
xdr_decode_YFSCallBack(&bp, call, &vp->scb);
xdr_decode_YFSVolSync(&bp, &op->volsync);
- req->data_version = vp->scb.status.data_version;
- req->file_size = vp->scb.status.size;
+ if (subreq->start + subreq->transferred >= vp->scb.status.size)
+ __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
call->unmarshall++;
fallthrough;
@@ -459,7 +456,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
static const struct afs_call_type yfs_RXYFSFetchData64 = {
.name = "YFS.FetchData64",
.op = yfs_FS_FetchData64,
+ .async_rx = afs_fetch_data_async_rx,
.deliver = yfs_deliver_fs_fetch_data64,
+ .immediate_cancel = afs_fetch_data_immediate_cancel,
.destructor = afs_flat_call_destructor,
};
@@ -468,14 +467,15 @@ static const struct afs_call_type yfs_RXYFSFetchData64 = {
*/
void yfs_fs_fetch_data(struct afs_operation *op)
{
+ struct netfs_io_subrequest *subreq = op->fetch.subreq;
struct afs_vnode_param *vp = &op->file[0];
- struct afs_read *req = op->fetch.req;
struct afs_call *call;
__be32 *bp;
- _enter(",%x,{%llx:%llu},%llx,%llx",
+ _enter(",%x,{%llx:%llu},%llx,%zx",
key_serial(op->key), vp->fid.vid, vp->fid.vnode,
- req->pos, req->len);
+ subreq->start + subreq->transferred,
+ subreq->len - subreq->transferred);
call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchData64,
sizeof(__be32) * 2 +
@@ -487,15 +487,16 @@ void yfs_fs_fetch_data(struct afs_operation *op)
if (!call)
return afs_op_nomem(op);
- req->call_debug_id = call->debug_id;
+ if (op->flags & AFS_OPERATION_ASYNC)
+ call->async = true;
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSFETCHDATA64);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
bp = xdr_encode_YFSFid(bp, &vp->fid);
- bp = xdr_encode_u64(bp, req->pos);
- bp = xdr_encode_u64(bp, req->len);
+ bp = xdr_encode_u64(bp, subreq->start + subreq->transferred);
+ bp = xdr_encode_u64(bp, subreq->len - subreq->transferred);
yfs_check_req(call, bp);
call->fid = vp->fid;
@@ -666,8 +667,9 @@ static int yfs_deliver_fs_remove_file2(struct afs_call *call)
static void yfs_done_fs_remove_file2(struct afs_call *call)
{
if (call->error == -ECONNABORTED &&
- call->abort_code == RX_INVALID_OPERATION) {
- set_bit(AFS_SERVER_FL_NO_RM2, &call->server->flags);
+ (call->abort_code == RX_INVALID_OPERATION ||
+ call->abort_code == RXGEN_OPCODE)) {
+ set_bit(AFS_SERVER_FL_NO_RM2, &call->op->server->flags);
call->op->flags |= AFS_OPERATION_DOWNGRADE;
}
}
diff --git a/fs/backing-file.c b/fs/backing-file.c
index cbdad8b68474..763fbe9b72b2 100644
--- a/fs/backing-file.c
+++ b/fs/backing-file.c
@@ -176,7 +176,7 @@ ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
!(file->f_mode & FMODE_CAN_ODIRECT))
return -EINVAL;
- old_cred = override_creds_light(ctx->cred);
+ old_cred = override_creds(ctx->cred);
if (is_sync_kiocb(iocb)) {
rwf_t rwf = iocb_to_rw_flags(flags);
@@ -197,7 +197,7 @@ ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
backing_aio_cleanup(aio, ret);
}
out:
- revert_creds_light(old_cred);
+ revert_creds(old_cred);
if (ctx->accessed)
ctx->accessed(iocb->ki_filp);
@@ -233,7 +233,7 @@ ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
*/
flags &= ~IOCB_DIO_CALLER_COMP;
- old_cred = override_creds_light(ctx->cred);
+ old_cred = override_creds(ctx->cred);
if (is_sync_kiocb(iocb)) {
rwf_t rwf = iocb_to_rw_flags(flags);
@@ -264,7 +264,7 @@ ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
backing_aio_cleanup(aio, ret);
}
out:
- revert_creds_light(old_cred);
+ revert_creds(old_cred);
return ret;
}
@@ -281,9 +281,9 @@ ssize_t backing_file_splice_read(struct file *in, struct kiocb *iocb,
if (WARN_ON_ONCE(!(in->f_mode & FMODE_BACKING)))
return -EIO;
- old_cred = override_creds_light(ctx->cred);
+ old_cred = override_creds(ctx->cred);
ret = vfs_splice_read(in, &iocb->ki_pos, pipe, len, flags);
- revert_creds_light(old_cred);
+ revert_creds(old_cred);
if (ctx->accessed)
ctx->accessed(iocb->ki_filp);
@@ -310,11 +310,11 @@ ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
if (ret)
return ret;
- old_cred = override_creds_light(ctx->cred);
+ old_cred = override_creds(ctx->cred);
file_start_write(out);
ret = out->f_op->splice_write(pipe, out, &iocb->ki_pos, len, flags);
file_end_write(out);
- revert_creds_light(old_cred);
+ revert_creds(old_cred);
if (ctx->end_write)
ctx->end_write(iocb, ret);
@@ -338,9 +338,9 @@ int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
vma_set_file(vma, file);
- old_cred = override_creds_light(ctx->cred);
+ old_cred = override_creds(ctx->cred);
ret = call_mmap(vma->vm_file, vma);
- revert_creds_light(old_cred);
+ revert_creds(old_cred);
if (ctx->accessed)
ctx->accessed(user_file);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a594f66daedf..0a0776489055 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -798,6 +798,10 @@ static int get_canonical_dev_path(const char *dev_path, char *canonical)
if (ret)
goto out;
resolved_path = d_path(&path, path_buf, PATH_MAX);
+ if (IS_ERR(resolved_path)) {
+ ret = PTR_ERR(resolved_path);
+ goto out;
+ }
ret = strscpy(canonical, resolved_path, PATH_MAX);
out:
kfree(path_buf);
diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index 6a821a959b59..92058ae43488 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -13,6 +13,7 @@
#include <linux/falloc.h>
#include <linux/sched/mm.h>
#include <trace/events/fscache.h>
+#include <trace/events/netfs.h>
#include "internal.h"
struct cachefiles_kiocb {
@@ -366,6 +367,7 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE)) {
if (term_func)
term_func(term_func_priv, -ENOBUFS, false);
+ trace_netfs_sreq(term_func_priv, netfs_sreq_trace_cache_nowrite);
return -ENOBUFS;
}
@@ -695,6 +697,7 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
iov_iter_truncate(&subreq->io_iter, len);
}
+ trace_netfs_sreq(subreq, netfs_sreq_trace_cache_prepare);
cachefiles_begin_secure(cache, &saved_cred);
ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
&start, &len, len, true);
@@ -704,6 +707,7 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
return;
}
+ trace_netfs_sreq(subreq, netfs_sreq_trace_cache_write);
cachefiles_write(&subreq->rreq->cache_resources,
subreq->start, &subreq->io_iter,
netfs_write_subrequest_terminated, subreq);
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index 7c6f260a3be5..52383b1d0ba6 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -77,6 +77,7 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object)
trace_cachefiles_vfs_error(object, file_inode(file), ret,
cachefiles_trace_setxattr_error);
trace_cachefiles_coherency(object, file_inode(file)->i_ino,
+ be64_to_cpup((__be64 *)buf->data),
buf->content,
cachefiles_coherency_set_fail);
if (ret != -ENOMEM)
@@ -85,6 +86,7 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object)
"Failed to set xattr with error %d", ret);
} else {
trace_cachefiles_coherency(object, file_inode(file)->i_ino,
+ be64_to_cpup((__be64 *)buf->data),
buf->content,
cachefiles_coherency_set_ok);
}
@@ -126,7 +128,10 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file
object,
"Failed to read aux with error %zd", xlen);
why = cachefiles_coherency_check_xattr;
- } else if (buf->type != CACHEFILES_COOKIE_TYPE_DATA) {
+ goto out;
+ }
+
+ if (buf->type != CACHEFILES_COOKIE_TYPE_DATA) {
why = cachefiles_coherency_check_type;
} else if (memcmp(buf->data, p, len) != 0) {
why = cachefiles_coherency_check_aux;
@@ -141,7 +146,9 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file
ret = 0;
}
+out:
trace_cachefiles_coherency(object, file_inode(file)->i_ino,
+ be64_to_cpup((__be64 *)buf->data),
buf->content, why);
kfree(buf);
return ret;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 85936f6d2bf7..f5224a566b69 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -223,10 +223,13 @@ static void finish_netfs_read(struct ceph_osd_request *req)
subreq->len, i_size_read(req->r_inode));
/* no object means success but no data */
- if (err == -ENOENT)
+ if (err == -ENOENT) {
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
err = 0;
- else if (err == -EBLOCKLISTED)
+ } else if (err == -EBLOCKLISTED) {
fsc->blocklisted = true;
+ }
if (err >= 0) {
if (sparse && err > 0)
@@ -242,6 +245,8 @@ static void finish_netfs_read(struct ceph_osd_request *req)
if (err > subreq->len)
err = subreq->len;
}
+ if (err > 0)
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
}
if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
@@ -253,8 +258,9 @@ static void finish_netfs_read(struct ceph_osd_request *req)
subreq->transferred = err;
err = 0;
}
+ subreq->error = err;
trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
- netfs_read_subreq_terminated(subreq, err, false);
+ netfs_read_subreq_terminated(subreq);
iput(req->r_inode);
ceph_dec_osd_stopping_blocker(fsc->mdsc);
}
@@ -314,7 +320,9 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
ceph_mdsc_put_request(req);
out:
- netfs_read_subreq_terminated(subreq, err, false);
+ subreq->error = err;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
+ netfs_read_subreq_terminated(subreq);
return true;
}
@@ -426,8 +434,10 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
ceph_osdc_start_request(req->r_osdc, req);
out:
ceph_osdc_put_request(req);
- if (err)
- netfs_read_subreq_terminated(subreq, err, false);
+ if (err) {
+ subreq->error = err;
+ netfs_read_subreq_terminated(subreq);
+ }
doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err);
}
diff --git a/fs/dcache.c b/fs/dcache.c
index b4d5e9e1e43d..1a01d7a6a7a9 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1681,9 +1681,8 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
/* Make sure we always see the terminating NUL character */
smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
- dentry->d_lockref.count = 1;
dentry->d_flags = 0;
- spin_lock_init(&dentry->d_lock);
+ lockref_init(&dentry->d_lockref, 1);
seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
dentry->d_inode = NULL;
dentry->d_parent = dentry;
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 19ef4ff2a134..254f6ad2c336 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -747,8 +747,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
if (IS_ERR(pcl))
return PTR_ERR(pcl);
- spin_lock_init(&pcl->lockref.lock);
- pcl->lockref.count = 1; /* one ref for this request */
+ lockref_init(&pcl->lockref, 1); /* one ref for this request */
pcl->algorithmformat = map->m_algorithmformat;
pcl->length = 0;
pcl->partial = true;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 89aade6f45f6..7c54ae5fcbd4 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5006,10 +5006,11 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
if (IS_ENCRYPTED(inode)) {
inode->i_op = &ext4_encrypted_symlink_inode_operations;
} else if (ext4_inode_is_fast_symlink(inode)) {
- inode->i_link = (char *)ei->i_data;
inode->i_op = &ext4_fast_symlink_inode_operations;
nd_terminate_link(ei->i_data, inode->i_size,
sizeof(ei->i_data) - 1);
+ inode_set_cached_link(inode, (char *)ei->i_data,
+ inode->i_size);
} else {
inode->i_op = &ext4_symlink_inode_operations;
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index bcf2737078b8..536d56d15072 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3418,7 +3418,6 @@ retry:
inode->i_op = &ext4_symlink_inode_operations;
} else {
inode->i_op = &ext4_fast_symlink_inode_operations;
- inode->i_link = (char *)&EXT4_I(inode)->i_data;
}
}
@@ -3434,6 +3433,9 @@ retry:
disk_link.len);
inode->i_size = disk_link.len - 1;
EXT4_I(inode)->i_disksize = inode->i_size;
+ if (!IS_ENCRYPTED(inode))
+ inode_set_cached_link(inode, (char *)&EXT4_I(inode)->i_data,
+ inode->i_size);
}
err = ext4_add_nondir(handle, dentry, &inode);
if (handle)
diff --git a/fs/fhandle.c b/fs/fhandle.c
index ec9145047dfc..3e092ae6d142 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -187,17 +187,6 @@ static int get_path_from_fd(int fd, struct path *root)
return 0;
}
-enum handle_to_path_flags {
- HANDLE_CHECK_PERMS = (1 << 0),
- HANDLE_CHECK_SUBTREE = (1 << 1),
-};
-
-struct handle_to_path_ctx {
- struct path root;
- enum handle_to_path_flags flags;
- unsigned int fh_flags;
-};
-
static int vfs_dentry_acceptable(void *context, struct dentry *dentry)
{
struct handle_to_path_ctx *ctx = context;
@@ -261,50 +250,55 @@ static int do_handle_to_path(struct file_handle *handle, struct path *path,
{
int handle_dwords;
struct vfsmount *mnt = ctx->root.mnt;
+ struct dentry *dentry;
/* change the handle size to multiple of sizeof(u32) */
handle_dwords = handle->handle_bytes >> 2;
- path->dentry = exportfs_decode_fh_raw(mnt,
- (struct fid *)handle->f_handle,
- handle_dwords, handle->handle_type,
- ctx->fh_flags,
- vfs_dentry_acceptable, ctx);
- if (IS_ERR_OR_NULL(path->dentry)) {
- if (path->dentry == ERR_PTR(-ENOMEM))
+ dentry = exportfs_decode_fh_raw(mnt, (struct fid *)handle->f_handle,
+ handle_dwords, handle->handle_type,
+ ctx->fh_flags, vfs_dentry_acceptable,
+ ctx);
+ if (IS_ERR_OR_NULL(dentry)) {
+ if (dentry == ERR_PTR(-ENOMEM))
return -ENOMEM;
return -ESTALE;
}
+ path->dentry = dentry;
path->mnt = mntget(mnt);
return 0;
}
-/*
- * Allow relaxed permissions of file handles if the caller has the
- * ability to mount the filesystem or create a bind-mount of the
- * provided @mountdirfd.
- *
- * In both cases the caller may be able to get an unobstructed way to
- * the encoded file handle. If the caller is only able to create a
- * bind-mount we need to verify that there are no locked mounts on top
- * of it that could prevent us from getting to the encoded file.
- *
- * In principle, locked mounts can prevent the caller from mounting the
- * filesystem but that only applies to procfs and sysfs neither of which
- * support decoding file handles.
- */
-static inline bool may_decode_fh(struct handle_to_path_ctx *ctx,
- unsigned int o_flags)
+static inline int may_decode_fh(struct handle_to_path_ctx *ctx,
+ unsigned int o_flags)
{
struct path *root = &ctx->root;
+ if (capable(CAP_DAC_READ_SEARCH))
+ return 0;
+
/*
- * Restrict to O_DIRECTORY to provide a deterministic API that avoids a
- * confusing api in the face of disconnected non-dir dentries.
+ * Allow relaxed permissions of file handles if the caller has
+ * the ability to mount the filesystem or create a bind-mount of
+ * the provided @mountdirfd.
+ *
+ * In both cases the caller may be able to get an unobstructed
+ * way to the encoded file handle. If the caller is only able to
+ * create a bind-mount we need to verify that there are no
+ * locked mounts on top of it that could prevent us from getting
+ * to the encoded file.
+ *
+ * In principle, locked mounts can prevent the caller from
+ * mounting the filesystem but that only applies to procfs and
+ * sysfs neither of which support decoding file handles.
+ *
+ * Restrict to O_DIRECTORY to provide a deterministic API that
+ * avoids a confusing api in the face of disconnected non-dir
+ * dentries.
*
* There's only one dentry for each directory inode (VFS rule)...
*/
if (!(o_flags & O_DIRECTORY))
- return false;
+ return -EPERM;
if (ns_capable(root->mnt->mnt_sb->s_user_ns, CAP_SYS_ADMIN))
ctx->flags = HANDLE_CHECK_PERMS;
@@ -314,14 +308,14 @@ static inline bool may_decode_fh(struct handle_to_path_ctx *ctx,
!has_locked_children(real_mount(root->mnt), root->dentry))
ctx->flags = HANDLE_CHECK_PERMS | HANDLE_CHECK_SUBTREE;
else
- return false;
+ return -EPERM;
/* Are we able to override DAC permissions? */
if (!ns_capable(current_user_ns(), CAP_DAC_READ_SEARCH))
- return false;
+ return -EPERM;
ctx->fh_flags = EXPORT_FH_DIR_ONLY;
- return true;
+ return 0;
}
static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
@@ -331,15 +325,19 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
struct file_handle f_handle;
struct file_handle *handle = NULL;
struct handle_to_path_ctx ctx = {};
+ const struct export_operations *eops;
retval = get_path_from_fd(mountdirfd, &ctx.root);
if (retval)
goto out_err;
- if (!capable(CAP_DAC_READ_SEARCH) && !may_decode_fh(&ctx, o_flags)) {
- retval = -EPERM;
+ eops = ctx.root.mnt->mnt_sb->s_export_op;
+ if (eops && eops->permission)
+ retval = eops->permission(&ctx, o_flags);
+ else
+ retval = may_decode_fh(&ctx, o_flags);
+ if (retval)
goto out_path;
- }
if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle))) {
retval = -EFAULT;
@@ -398,29 +396,28 @@ static long do_handle_open(int mountdirfd, struct file_handle __user *ufh,
int open_flag)
{
long retval = 0;
- struct path path;
+ struct path path __free(path_put) = {};
struct file *file;
- int fd;
+ const struct export_operations *eops;
retval = handle_to_path(mountdirfd, ufh, &path, open_flag);
if (retval)
return retval;
- fd = get_unused_fd_flags(open_flag);
- if (fd < 0) {
- path_put(&path);
+ CLASS(get_unused_fd, fd)(O_CLOEXEC);
+ if (fd < 0)
return fd;
- }
- file = file_open_root(&path, "", open_flag, 0);
- if (IS_ERR(file)) {
- put_unused_fd(fd);
- retval = PTR_ERR(file);
- } else {
- retval = fd;
- fd_install(fd, file);
- }
- path_put(&path);
- return retval;
+
+ eops = path.mnt->mnt_sb->s_export_op;
+ if (eops->open)
+ file = eops->open(&path, open_flag);
+ else
+ file = file_open_root(&path, "", open_flag, 0);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ fd_install(fd, file);
+ return take_fd(fd);
}
/**
diff --git a/fs/file.c b/fs/file.c
index 25c6e53b03f8..d868cdb95d1e 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -279,10 +279,6 @@ repeat:
if (nr < fdt->max_fds)
return 0;
- /* Can we expand? */
- if (nr >= sysctl_nr_open)
- return -EMFILE;
-
if (unlikely(files->resize_in_progress)) {
spin_unlock(&files->file_lock);
wait_event(files->resize_wait, !files->resize_in_progress);
@@ -290,6 +286,10 @@ repeat:
goto repeat;
}
+ /* Can we expand? */
+ if (unlikely(nr >= sysctl_nr_open))
+ return -EMFILE;
+
/* All good, so we try */
files->resize_in_progress = true;
error = expand_fdtable(files, nr);
@@ -1231,17 +1231,9 @@ __releases(&files->file_lock)
/*
* We need to detect attempts to do dup2() over allocated but still
- * not finished descriptor. NB: OpenBSD avoids that at the price of
- * extra work in their equivalent of fget() - they insert struct
- * file immediately after grabbing descriptor, mark it larval if
- * more work (e.g. actual opening) is needed and make sure that
- * fget() treats larval files as absent. Potentially interesting,
- * but while extra work in fget() is trivial, locking implications
- * and amount of surgery on open()-related paths in VFS are not.
- * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
- * deadlocks in rather amusing ways, AFAICS. All of that is out of
- * scope of POSIX or SUS, since neither considers shared descriptor
- * tables and this condition does not arise without those.
+ * not finished descriptor.
+ *
+ * POSIX is silent on the issue, we return -EBUSY.
*/
fdt = files_fdtable(files);
fd = array_index_nospec(fd, fdt->max_fds);
diff --git a/fs/file_table.c b/fs/file_table.c
index 976736be47cb..a32171d2b83f 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -128,7 +128,7 @@ static struct ctl_table fs_stat_sysctls[] = {
.data = &sysctl_nr_open,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_douintvec_minmax,
.extra1 = &sysctl_nr_open_min,
.extra2 = &sysctl_nr_open_max,
},
@@ -478,6 +478,8 @@ static void ____fput(struct callback_head *work)
__fput(container_of(work, struct file, f_task_work));
}
+static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
+
/*
* If kernel thread really needs to have the final fput() it has done
* to complete, call this. The only user right now is the boot - we
@@ -491,11 +493,10 @@ static void ____fput(struct callback_head *work)
void flush_delayed_fput(void)
{
delayed_fput(NULL);
+ flush_delayed_work(&delayed_fput_work);
}
EXPORT_SYMBOL_GPL(flush_delayed_fput);
-static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
-
void fput(struct file *file)
{
if (file_ref_put(&file->f_ref)) {
diff --git a/fs/fs_context.c b/fs/fs_context.c
index 98589aae5208..582d33e81117 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -493,7 +493,7 @@ static void put_fc_log(struct fs_context *fc)
if (log) {
if (refcount_dec_and_test(&log->usage)) {
fc->log.log = NULL;
- for (i = 0; i <= 7; i++)
+ for (i = 0; i < ARRAY_SIZE(log->buffer) ; i++)
if (log->need_free & (1 << i))
kfree(log->buffer[i]);
kfree(log);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 1e73cf87ff88..c9bb3be21d2b 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -251,6 +251,7 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
error = filemap_fdatawait(inode->i_mapping);
if (error)
goto out;
+ truncate_inode_pages(inode->i_mapping, 0);
if (new_flags & GFS2_DIF_JDATA)
gfs2_ordered_del_inode(ip);
}
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 72b48f6f5561..58bc5013ca49 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -236,8 +236,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
return NULL;
qd->qd_sbd = sdp;
- qd->qd_lockref.count = 0;
- spin_lock_init(&qd->qd_lockref.lock);
+ lockref_init(&qd->qd_lockref, 0);
qd->qd_id = qid;
qd->qd_slot = -1;
INIT_LIST_HEAD(&qd->qd_lru);
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index f462d9cb3087..988f38dc5b2c 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -44,8 +44,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
int ret;
ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
- if (capable(CAP_SYS_RESOURCE) ||
- sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
+ if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ||
+ capable(CAP_SYS_RESOURCE))
return 0;
ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (ret)
diff --git a/fs/libfs.c b/fs/libfs.c
index 748ac5923154..5b6120b19e99 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -245,9 +245,16 @@ const struct inode_operations simple_dir_inode_operations = {
};
EXPORT_SYMBOL(simple_dir_inode_operations);
-/* 0 is '.', 1 is '..', so always start with offset 2 or more */
+/* simple_offset_add() never assigns these to a dentry */
enum {
- DIR_OFFSET_MIN = 2,
+ DIR_OFFSET_FIRST = 2, /* Find first real entry */
+ DIR_OFFSET_EOD = S32_MAX,
+};
+
+/* simple_offset_add() allocation range */
+enum {
+ DIR_OFFSET_MIN = DIR_OFFSET_FIRST + 1,
+ DIR_OFFSET_MAX = DIR_OFFSET_EOD - 1,
};
static void offset_set(struct dentry *dentry, long offset)
@@ -291,9 +298,10 @@ int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry)
return -EBUSY;
ret = mtree_alloc_cyclic(&octx->mt, &offset, dentry, DIR_OFFSET_MIN,
- LONG_MAX, &octx->next_offset, GFP_KERNEL);
- if (ret < 0)
- return ret;
+ DIR_OFFSET_MAX, &octx->next_offset,
+ GFP_KERNEL);
+ if (unlikely(ret < 0))
+ return ret == -EBUSY ? -ENOSPC : ret;
offset_set(dentry, offset);
return 0;
@@ -330,38 +338,6 @@ void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry)
}
/**
- * simple_offset_empty - Check if a dentry can be unlinked
- * @dentry: dentry to be tested
- *
- * Returns 0 if @dentry is a non-empty directory; otherwise returns 1.
- */
-int simple_offset_empty(struct dentry *dentry)
-{
- struct inode *inode = d_inode(dentry);
- struct offset_ctx *octx;
- struct dentry *child;
- unsigned long index;
- int ret = 1;
-
- if (!inode || !S_ISDIR(inode->i_mode))
- return ret;
-
- index = DIR_OFFSET_MIN;
- octx = inode->i_op->get_offset_ctx(inode);
- mt_for_each(&octx->mt, child, index, LONG_MAX) {
- spin_lock(&child->d_lock);
- if (simple_positive(child)) {
- spin_unlock(&child->d_lock);
- ret = 0;
- break;
- }
- spin_unlock(&child->d_lock);
- }
-
- return ret;
-}
-
-/**
* simple_offset_rename - handle directory offsets for rename
* @old_dir: parent directory of source entry
* @old_dentry: dentry of source entry
@@ -454,14 +430,6 @@ void simple_offset_destroy(struct offset_ctx *octx)
mtree_destroy(&octx->mt);
}
-static int offset_dir_open(struct inode *inode, struct file *file)
-{
- struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode);
-
- file->private_data = (void *)ctx->next_offset;
- return 0;
-}
-
/**
* offset_dir_llseek - Advance the read position of a directory descriptor
* @file: an open directory whose position is to be updated
@@ -475,9 +443,6 @@ static int offset_dir_open(struct inode *inode, struct file *file)
*/
static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
{
- struct inode *inode = file->f_inode;
- struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode);
-
switch (whence) {
case SEEK_CUR:
offset += file->f_pos;
@@ -490,62 +455,89 @@ static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
return -EINVAL;
}
- /* In this case, ->private_data is protected by f_pos_lock */
- if (!offset)
- file->private_data = (void *)ctx->next_offset;
return vfs_setpos(file, offset, LONG_MAX);
}
-static struct dentry *offset_find_next(struct offset_ctx *octx, loff_t offset)
+static struct dentry *find_positive_dentry(struct dentry *parent,
+ struct dentry *dentry,
+ bool next)
{
- MA_STATE(mas, &octx->mt, offset, offset);
+ struct dentry *found = NULL;
+
+ spin_lock(&parent->d_lock);
+ if (next)
+ dentry = d_next_sibling(dentry);
+ else if (!dentry)
+ dentry = d_first_child(parent);
+ hlist_for_each_entry_from(dentry, d_sib) {
+ if (!simple_positive(dentry))
+ continue;
+ spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ if (simple_positive(dentry))
+ found = dget_dlock(dentry);
+ spin_unlock(&dentry->d_lock);
+ if (likely(found))
+ break;
+ }
+ spin_unlock(&parent->d_lock);
+ return found;
+}
+
+static noinline_for_stack struct dentry *
+offset_dir_lookup(struct dentry *parent, loff_t offset)
+{
+ struct inode *inode = d_inode(parent);
+ struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode);
struct dentry *child, *found = NULL;
- rcu_read_lock();
- child = mas_find(&mas, LONG_MAX);
- if (!child)
- goto out;
- spin_lock(&child->d_lock);
- if (simple_positive(child))
- found = dget_dlock(child);
- spin_unlock(&child->d_lock);
-out:
- rcu_read_unlock();
+ MA_STATE(mas, &octx->mt, offset, offset);
+
+ if (offset == DIR_OFFSET_FIRST)
+ found = find_positive_dentry(parent, NULL, false);
+ else {
+ rcu_read_lock();
+ child = mas_find(&mas, DIR_OFFSET_MAX);
+ found = find_positive_dentry(parent, child, false);
+ rcu_read_unlock();
+ }
return found;
}
static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
- long offset = dentry2offset(dentry);
- return ctx->actor(ctx, dentry->d_name.name, dentry->d_name.len, offset,
- inode->i_ino, fs_umode_to_dtype(inode->i_mode));
+ return dir_emit(ctx, dentry->d_name.name, dentry->d_name.len,
+ inode->i_ino, fs_umode_to_dtype(inode->i_mode));
}
-static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, long last_index)
+static void offset_iterate_dir(struct file *file, struct dir_context *ctx)
{
- struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode);
+ struct dentry *dir = file->f_path.dentry;
struct dentry *dentry;
+ dentry = offset_dir_lookup(dir, ctx->pos);
+ if (!dentry)
+ goto out_eod;
while (true) {
- dentry = offset_find_next(octx, ctx->pos);
- if (!dentry)
- return;
-
- if (dentry2offset(dentry) >= last_index) {
- dput(dentry);
- return;
- }
+ struct dentry *next;
- if (!offset_dir_emit(ctx, dentry)) {
- dput(dentry);
- return;
- }
+ ctx->pos = dentry2offset(dentry);
+ if (!offset_dir_emit(ctx, dentry))
+ break;
- ctx->pos = dentry2offset(dentry) + 1;
+ next = find_positive_dentry(dir, dentry, true);
dput(dentry);
+
+ if (!next)
+ goto out_eod;
+ dentry = next;
}
+ dput(dentry);
+ return;
+
+out_eod:
+ ctx->pos = DIR_OFFSET_EOD;
}
/**
@@ -565,6 +557,8 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, lon
*
* On return, @ctx->pos contains an offset that will read the next entry
* in this directory when offset_readdir() is called again with @ctx.
+ * Caller places this value in the d_off field of the last entry in the
+ * user's buffer.
*
* Return values:
* %0 - Complete
@@ -572,19 +566,17 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, lon
static int offset_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dir = file->f_path.dentry;
- long last_index = (long)file->private_data;
lockdep_assert_held(&d_inode(dir)->i_rwsem);
if (!dir_emit_dots(file, ctx))
return 0;
-
- offset_iterate_dir(d_inode(dir), ctx, last_index);
+ if (ctx->pos != DIR_OFFSET_EOD)
+ offset_iterate_dir(file, ctx);
return 0;
}
const struct file_operations simple_offset_dir_operations = {
- .open = offset_dir_open,
.llseek = offset_dir_llseek,
.iterate_shared = offset_readdir,
.read = generic_read_dir,
@@ -673,6 +665,7 @@ static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc)
s->s_blocksize_bits = PAGE_SHIFT;
s->s_magic = ctx->magic;
s->s_op = ctx->ops ?: &simple_super_operations;
+ s->s_export_op = ctx->eops;
s->s_xattr = ctx->xattr;
s->s_time_gran = 1;
root = new_inode(s);
diff --git a/fs/mount.h b/fs/mount.h
index 179f690a0c72..ffb613cdfeee 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -8,15 +8,23 @@
struct mnt_namespace {
struct ns_common ns;
struct mount * root;
- struct rb_root mounts; /* Protected by namespace_sem */
+ struct {
+ struct rb_root mounts; /* Protected by namespace_sem */
+ struct rb_node *mnt_last_node; /* last (rightmost) mount in the rbtree */
+ struct rb_node *mnt_first_node; /* first (leftmost) mount in the rbtree */
+ };
struct user_namespace *user_ns;
struct ucounts *ucounts;
u64 seq; /* Sequence number to prevent loops */
- wait_queue_head_t poll;
+ union {
+ wait_queue_head_t poll;
+ struct rcu_head mnt_ns_rcu;
+ };
u64 event;
unsigned int nr_mounts; /* # of mounts in the namespace */
unsigned int pending_mounts;
struct rb_node mnt_ns_tree_node; /* node in the mnt_ns_tree */
+ struct list_head mnt_ns_list; /* entry in the sequential list of mounts namespace */
refcount_t passive; /* number references not pinning @mounts */
} __randomize_layout;
@@ -150,22 +158,21 @@ static inline bool mnt_ns_attached(const struct mount *mnt)
static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list)
{
+ struct mnt_namespace *ns = mnt->mnt_ns;
WARN_ON(!mnt_ns_attached(mnt));
- rb_erase(&mnt->mnt_node, &mnt->mnt_ns->mounts);
+ if (ns->mnt_last_node == &mnt->mnt_node)
+ ns->mnt_last_node = rb_prev(&mnt->mnt_node);
+ if (ns->mnt_first_node == &mnt->mnt_node)
+ ns->mnt_first_node = rb_next(&mnt->mnt_node);
+ rb_erase(&mnt->mnt_node, &ns->mounts);
RB_CLEAR_NODE(&mnt->mnt_node);
list_add_tail(&mnt->mnt_list, dt_list);
}
bool has_locked_children(struct mount *mnt, struct dentry *dentry);
-struct mnt_namespace *__lookup_next_mnt_ns(struct mnt_namespace *mnt_ns, bool previous);
-static inline struct mnt_namespace *lookup_next_mnt_ns(struct mnt_namespace *mntns)
-{
- return __lookup_next_mnt_ns(mntns, false);
-}
-static inline struct mnt_namespace *lookup_prev_mnt_ns(struct mnt_namespace *mntns)
-{
- return __lookup_next_mnt_ns(mntns, true);
-}
+struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mnt_ns,
+ bool previous);
+
static inline struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
{
return container_of(ns, struct mnt_namespace, ns);
diff --git a/fs/namei.c b/fs/namei.c
index 9d30c7aa9aa6..e56c29a22d26 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -5272,19 +5272,16 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
getname(newname), 0);
}
-int readlink_copy(char __user *buffer, int buflen, const char *link)
+int readlink_copy(char __user *buffer, int buflen, const char *link, int linklen)
{
- int len = PTR_ERR(link);
- if (IS_ERR(link))
- goto out;
+ int copylen;
- len = strlen(link);
- if (len > (unsigned) buflen)
- len = buflen;
- if (copy_to_user(buffer, link, len))
- len = -EFAULT;
-out:
- return len;
+ copylen = linklen;
+ if (unlikely(copylen > (unsigned) buflen))
+ copylen = buflen;
+ if (copy_to_user(buffer, link, copylen))
+ copylen = -EFAULT;
+ return copylen;
}
/**
@@ -5304,6 +5301,9 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
const char *link;
int res;
+ if (inode->i_opflags & IOP_CACHED_LINK)
+ return readlink_copy(buffer, buflen, inode->i_link, inode->i_linklen);
+
if (unlikely(!(inode->i_opflags & IOP_DEFAULT_READLINK))) {
if (unlikely(inode->i_op->readlink))
return inode->i_op->readlink(dentry, buffer, buflen);
@@ -5322,7 +5322,7 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
if (IS_ERR(link))
return PTR_ERR(link);
}
- res = readlink_copy(buffer, buflen, link);
+ res = readlink_copy(buffer, buflen, link, strlen(link));
do_delayed_call(&done);
return res;
}
@@ -5391,10 +5391,14 @@ EXPORT_SYMBOL(page_put_link);
int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
+ const char *link;
+ int res;
+
DEFINE_DELAYED_CALL(done);
- int res = readlink_copy(buffer, buflen,
- page_get_link(dentry, d_inode(dentry),
- &done));
+ link = page_get_link(dentry, d_inode(dentry), &done);
+ res = PTR_ERR(link);
+ if (!IS_ERR(link))
+ res = readlink_copy(buffer, buflen, link, strlen(link));
do_delayed_call(&done);
return res;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index eac057e56948..4013fbac354a 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -32,7 +32,7 @@
#include <linux/fs_context.h>
#include <linux/shmem_fs.h>
#include <linux/mnt_idmapping.h>
-#include <linux/nospec.h>
+#include <linux/pidfs.h>
#include "pnode.h"
#include "internal.h"
@@ -66,12 +66,12 @@ static int __init set_mphash_entries(char *str)
__setup("mphash_entries=", set_mphash_entries);
static u64 event;
-static DEFINE_IDA(mnt_id_ida);
+static DEFINE_XARRAY_FLAGS(mnt_id_xa, XA_FLAGS_ALLOC);
static DEFINE_IDA(mnt_group_ida);
/* Don't allow confusion with old 32bit mount ID */
#define MNT_UNIQUE_ID_OFFSET (1ULL << 31)
-static atomic64_t mnt_id_ctr = ATOMIC64_INIT(MNT_UNIQUE_ID_OFFSET);
+static u64 mnt_id_ctr = MNT_UNIQUE_ID_OFFSET;
static struct hlist_head *mount_hashtable __ro_after_init;
static struct hlist_head *mountpoint_hashtable __ro_after_init;
@@ -79,8 +79,10 @@ static struct kmem_cache *mnt_cache __ro_after_init;
static DECLARE_RWSEM(namespace_sem);
static HLIST_HEAD(unmounted); /* protected by namespace_sem */
static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
-static DEFINE_RWLOCK(mnt_ns_tree_lock);
+static DEFINE_SEQLOCK(mnt_ns_tree_lock);
+
static struct rb_root mnt_ns_tree = RB_ROOT; /* protected by mnt_ns_tree_lock */
+static LIST_HEAD(mnt_ns_list); /* protected by mnt_ns_tree_lock */
struct mount_kattr {
unsigned int attr_set;
@@ -106,17 +108,6 @@ EXPORT_SYMBOL_GPL(fs_kobj);
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
-static int mnt_ns_cmp(u64 seq, const struct mnt_namespace *ns)
-{
- u64 seq_b = ns->seq;
-
- if (seq < seq_b)
- return -1;
- if (seq > seq_b)
- return 1;
- return 0;
-}
-
static inline struct mnt_namespace *node_to_mnt_ns(const struct rb_node *node)
{
if (!node)
@@ -124,25 +115,52 @@ static inline struct mnt_namespace *node_to_mnt_ns(const struct rb_node *node)
return rb_entry(node, struct mnt_namespace, mnt_ns_tree_node);
}
-static bool mnt_ns_less(struct rb_node *a, const struct rb_node *b)
+static int mnt_ns_cmp(struct rb_node *a, const struct rb_node *b)
{
struct mnt_namespace *ns_a = node_to_mnt_ns(a);
struct mnt_namespace *ns_b = node_to_mnt_ns(b);
u64 seq_a = ns_a->seq;
+ u64 seq_b = ns_b->seq;
+
+ if (seq_a < seq_b)
+ return -1;
+ if (seq_a > seq_b)
+ return 1;
+ return 0;
+}
- return mnt_ns_cmp(seq_a, ns_b) < 0;
+static inline void mnt_ns_tree_write_lock(void)
+{
+ write_seqlock(&mnt_ns_tree_lock);
+}
+
+static inline void mnt_ns_tree_write_unlock(void)
+{
+ write_sequnlock(&mnt_ns_tree_lock);
}
static void mnt_ns_tree_add(struct mnt_namespace *ns)
{
- guard(write_lock)(&mnt_ns_tree_lock);
- rb_add(&ns->mnt_ns_tree_node, &mnt_ns_tree, mnt_ns_less);
+ struct rb_node *node, *prev;
+
+ mnt_ns_tree_write_lock();
+ node = rb_find_add_rcu(&ns->mnt_ns_tree_node, &mnt_ns_tree, mnt_ns_cmp);
+ /*
+ * If there's no previous entry simply add it after the
+ * head and if there is add it after the previous entry.
+ */
+ prev = rb_prev(&ns->mnt_ns_tree_node);
+ if (!prev)
+ list_add_rcu(&ns->mnt_ns_list, &mnt_ns_list);
+ else
+ list_add_rcu(&ns->mnt_ns_list, &node_to_mnt_ns(prev)->mnt_ns_list);
+ mnt_ns_tree_write_unlock();
+
+ WARN_ON_ONCE(node);
}
static void mnt_ns_release(struct mnt_namespace *ns)
{
- lockdep_assert_not_held(&mnt_ns_tree_lock);
-
/* keep alive for {list,stat}mount() */
if (refcount_dec_and_test(&ns->passive)) {
put_user_ns(ns->user_ns);
@@ -151,41 +169,34 @@ static void mnt_ns_release(struct mnt_namespace *ns)
}
DEFINE_FREE(mnt_ns_release, struct mnt_namespace *, if (_T) mnt_ns_release(_T))
+static void mnt_ns_release_rcu(struct rcu_head *rcu)
+{
+ mnt_ns_release(container_of(rcu, struct mnt_namespace, mnt_ns_rcu));
+}
+
static void mnt_ns_tree_remove(struct mnt_namespace *ns)
{
/* remove from global mount namespace list */
if (!is_anon_ns(ns)) {
- guard(write_lock)(&mnt_ns_tree_lock);
+ mnt_ns_tree_write_lock();
rb_erase(&ns->mnt_ns_tree_node, &mnt_ns_tree);
+ list_bidir_del_rcu(&ns->mnt_ns_list);
+ mnt_ns_tree_write_unlock();
}
- mnt_ns_release(ns);
+ call_rcu(&ns->mnt_ns_rcu, mnt_ns_release_rcu);
}
-/*
- * Returns the mount namespace which either has the specified id, or has the
- * next smallest id afer the specified one.
- */
-static struct mnt_namespace *mnt_ns_find_id_at(u64 mnt_ns_id)
+static int mnt_ns_find(const void *key, const struct rb_node *node)
{
- struct rb_node *node = mnt_ns_tree.rb_node;
- struct mnt_namespace *ret = NULL;
-
- lockdep_assert_held(&mnt_ns_tree_lock);
+ const u64 mnt_ns_id = *(u64 *)key;
+ const struct mnt_namespace *ns = node_to_mnt_ns(node);
- while (node) {
- struct mnt_namespace *n = node_to_mnt_ns(node);
-
- if (mnt_ns_id <= n->seq) {
- ret = node_to_mnt_ns(node);
- if (mnt_ns_id == n->seq)
- break;
- node = node->rb_left;
- } else {
- node = node->rb_right;
- }
- }
- return ret;
+ if (mnt_ns_id < ns->seq)
+ return -1;
+ if (mnt_ns_id > ns->seq)
+ return 1;
+ return 0;
}
/*
@@ -195,18 +206,37 @@ static struct mnt_namespace *mnt_ns_find_id_at(u64 mnt_ns_id)
* namespace the @namespace_sem must first be acquired. If the namespace has
* already shut down before acquiring @namespace_sem, {list,stat}mount() will
* see that the mount rbtree of the namespace is empty.
+ *
+ * Note the lookup is lockless protected by a sequence counter. We only
+ * need to guard against false negatives as false positives aren't
+ * possible. So if we didn't find a mount namespace and the sequence
+ * counter has changed we need to retry. If the sequence counter is
+ * still the same we know the search actually failed.
*/
static struct mnt_namespace *lookup_mnt_ns(u64 mnt_ns_id)
{
- struct mnt_namespace *ns;
+ struct mnt_namespace *ns;
+ struct rb_node *node;
+ unsigned int seq;
+
+ guard(rcu)();
+ do {
+ seq = read_seqbegin(&mnt_ns_tree_lock);
+ node = rb_find_rcu(&mnt_ns_id, &mnt_ns_tree, mnt_ns_find);
+ if (node)
+ break;
+ } while (read_seqretry(&mnt_ns_tree_lock, seq));
- guard(read_lock)(&mnt_ns_tree_lock);
- ns = mnt_ns_find_id_at(mnt_ns_id);
- if (!ns || ns->seq != mnt_ns_id)
- return NULL;
+ if (!node)
+ return NULL;
- refcount_inc(&ns->passive);
- return ns;
+ /*
+ * The last reference count is put with RCU delay so we can
+ * unconditonally acquire a reference here.
+ */
+ ns = node_to_mnt_ns(node);
+ refcount_inc(&ns->passive);
+ return ns;
}
static inline void lock_mount_hash(void)
@@ -236,18 +266,19 @@ static inline struct hlist_head *mp_hash(struct dentry *dentry)
static int mnt_alloc_id(struct mount *mnt)
{
- int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
+ int res;
- if (res < 0)
- return res;
- mnt->mnt_id = res;
- mnt->mnt_id_unique = atomic64_inc_return(&mnt_id_ctr);
- return 0;
+ xa_lock(&mnt_id_xa);
+ res = __xa_alloc(&mnt_id_xa, &mnt->mnt_id, mnt, XA_LIMIT(1, INT_MAX), GFP_KERNEL);
+ if (!res)
+ mnt->mnt_id_unique = ++mnt_id_ctr;
+ xa_unlock(&mnt_id_xa);
+ return res;
}
static void mnt_free_id(struct mount *mnt)
{
- ida_free(&mnt_id_ida, mnt->mnt_id);
+ xa_erase(&mnt_id_xa, mnt->mnt_id);
}
/*
@@ -1124,16 +1155,25 @@ static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt)
{
struct rb_node **link = &ns->mounts.rb_node;
struct rb_node *parent = NULL;
+ bool mnt_first_node = true, mnt_last_node = true;
WARN_ON(mnt_ns_attached(mnt));
mnt->mnt_ns = ns;
while (*link) {
parent = *link;
- if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique)
+ if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique) {
link = &parent->rb_left;
- else
+ mnt_last_node = false;
+ } else {
link = &parent->rb_right;
+ mnt_first_node = false;
+ }
}
+
+ if (mnt_last_node)
+ ns->mnt_last_node = &mnt->mnt_node;
+ if (mnt_first_node)
+ ns->mnt_first_node = &mnt->mnt_node;
rb_link_node(&mnt->mnt_node, parent, link);
rb_insert_color(&mnt->mnt_node, &ns->mounts);
}
@@ -2069,30 +2109,34 @@ struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
return &mnt->ns;
}
-struct mnt_namespace *__lookup_next_mnt_ns(struct mnt_namespace *mntns, bool previous)
+struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mntns, bool previous)
{
- guard(read_lock)(&mnt_ns_tree_lock);
+ guard(rcu)();
+
for (;;) {
- struct rb_node *node;
+ struct list_head *list;
if (previous)
- node = rb_prev(&mntns->mnt_ns_tree_node);
+ list = rcu_dereference(list_bidir_prev_rcu(&mntns->mnt_ns_list));
else
- node = rb_next(&mntns->mnt_ns_tree_node);
- if (!node)
+ list = rcu_dereference(list_next_rcu(&mntns->mnt_ns_list));
+ if (list_is_head(list, &mnt_ns_list))
return ERR_PTR(-ENOENT);
- mntns = node_to_mnt_ns(node);
- node = &mntns->mnt_ns_tree_node;
+ mntns = list_entry_rcu(list, struct mnt_namespace, mnt_ns_list);
+ /*
+ * The last passive reference count is put with RCU
+ * delay so accessing the mount namespace is not just
+ * safe but all relevant members are still valid.
+ */
if (!ns_capable_noaudit(mntns->user_ns, CAP_SYS_ADMIN))
continue;
/*
- * Holding mnt_ns_tree_lock prevents the mount namespace from
- * being freed but it may well be on it's deathbed. We want an
- * active reference, not just a passive one here as we're
- * persisting the mount namespace.
+ * We need an active reference count as we're persisting
+ * the mount namespace and it might already be on its
+ * deathbed.
*/
if (!refcount_inc_not_zero(&mntns->ns.count))
continue;
@@ -2736,8 +2780,13 @@ static struct mount *__do_loopback(struct path *old_path, int recurse)
if (IS_MNT_UNBINDABLE(old))
return mnt;
- if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations)
- return mnt;
+ if (!check_mnt(old)) {
+ const struct dentry_operations *d_op = old_path->dentry->d_op;
+
+ if (d_op != &ns_dentry_operations &&
+ d_op != &pidfs_dentry_operations)
+ return mnt;
+ }
if (!recurse && has_locked_children(old, old_path->dentry))
return mnt;
@@ -3839,7 +3888,7 @@ int path_mount(const char *dev_name, struct path *path,
data_page);
}
-long do_mount(const char *dev_name, const char __user *dir_name,
+int do_mount(const char *dev_name, const char __user *dir_name,
const char *type_page, unsigned long flags, void *data_page)
{
struct path path;
@@ -3909,6 +3958,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool a
refcount_set(&new_ns->ns.count, 1);
refcount_set(&new_ns->passive, 1);
new_ns->mounts = RB_ROOT;
+ INIT_LIST_HEAD(&new_ns->mnt_ns_list);
RB_CLEAR_NODE(&new_ns->mnt_ns_tree_node);
init_waitqueue_head(&new_ns->poll);
new_ns->user_ns = get_user_ns(user_ns);
@@ -3988,7 +4038,6 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
while (p->mnt.mnt_root != q->mnt.mnt_root)
p = next_mnt(skip_mnt_tree(p), old);
}
- mnt_ns_tree_add(new_ns);
namespace_unlock();
if (rootmnt)
@@ -3996,6 +4045,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
if (pwdmnt)
mntput(pwdmnt);
+ mnt_ns_tree_add(new_ns);
return new_ns;
}
@@ -5042,6 +5092,10 @@ static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq)
if (sb->s_op->show_options) {
size_t start = seq->count;
+ err = security_sb_show_options(seq, sb);
+ if (err)
+ return err;
+
err = sb->s_op->show_options(seq, mnt->mnt_root);
if (err)
return err;
@@ -5529,9 +5583,9 @@ static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id,
if (!last_mnt_id) {
if (reverse)
- first = node_to_mount(rb_last(&ns->mounts));
+ first = node_to_mount(ns->mnt_last_node);
else
- first = node_to_mount(rb_first(&ns->mounts));
+ first = node_to_mount(ns->mnt_first_node);
} else {
if (reverse)
first = mnt_find_id_at_reverse(ns, last_mnt_id - 1);
diff --git a/fs/netfs/Makefile b/fs/netfs/Makefile
index d08b0bfb6756..b43188d64bd8 100644
--- a/fs/netfs/Makefile
+++ b/fs/netfs/Makefile
@@ -13,8 +13,11 @@ netfs-y := \
read_collect.o \
read_pgpriv2.o \
read_retry.o \
+ read_single.o \
+ rolling_buffer.o \
write_collect.o \
- write_issue.o
+ write_issue.o \
+ write_retry.o
netfs-$(CONFIG_NETFS_STATS) += stats.o
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 4dc9b8286355..f761d44b3436 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -64,37 +64,6 @@ static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_in
}
/*
- * Decant the list of folios to read into a rolling buffer.
- */
-static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq,
- struct folio_queue *folioq,
- struct folio_batch *put_batch)
-{
- unsigned int order, nr;
- size_t size = 0;
-
- nr = __readahead_batch(rreq->ractl, (struct page **)folioq->vec.folios,
- ARRAY_SIZE(folioq->vec.folios));
- folioq->vec.nr = nr;
- for (int i = 0; i < nr; i++) {
- struct folio *folio = folioq_folio(folioq, i);
-
- trace_netfs_folio(folio, netfs_folio_trace_read);
- order = folio_order(folio);
- folioq->orders[i] = order;
- size += PAGE_SIZE << order;
-
- if (!folio_batch_add(put_batch, folio))
- folio_batch_release(put_batch);
- }
-
- for (int i = nr; i < folioq_nr_slots(folioq); i++)
- folioq_clear(folioq, i);
-
- return size;
-}
-
-/*
* netfs_prepare_read_iterator - Prepare the subreq iterator for I/O
* @subreq: The subrequest to be set up
*
@@ -128,19 +97,12 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
folio_batch_init(&put_batch);
while (rreq->submitted < subreq->start + rsize) {
- struct folio_queue *tail = rreq->buffer_tail, *new;
- size_t added;
-
- new = kmalloc(sizeof(*new), GFP_NOFS);
- if (!new)
- return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(new);
- new->prev = tail;
- tail->next = new;
- rreq->buffer_tail = new;
- added = netfs_load_buffer_from_ra(rreq, new, &put_batch);
- rreq->iter.count += added;
+ ssize_t added;
+
+ added = rolling_buffer_load_from_ra(&rreq->buffer, rreq->ractl,
+ &put_batch);
+ if (added < 0)
+ return added;
rreq->submitted += added;
}
folio_batch_release(&put_batch);
@@ -148,7 +110,7 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
subreq->len = rsize;
if (unlikely(rreq->io_streams[0].sreq_max_segs)) {
- size_t limit = netfs_limit_iter(&rreq->iter, 0, rsize,
+ size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize,
rreq->io_streams[0].sreq_max_segs);
if (limit < rsize) {
@@ -157,20 +119,10 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq)
}
}
- subreq->io_iter = rreq->iter;
-
- if (iov_iter_is_folioq(&subreq->io_iter)) {
- if (subreq->io_iter.folioq_slot >= folioq_nr_slots(subreq->io_iter.folioq)) {
- subreq->io_iter.folioq = subreq->io_iter.folioq->next;
- subreq->io_iter.folioq_slot = 0;
- }
- subreq->curr_folioq = (struct folio_queue *)subreq->io_iter.folioq;
- subreq->curr_folioq_slot = subreq->io_iter.folioq_slot;
- subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot];
- }
+ subreq->io_iter = rreq->buffer.iter;
iov_iter_truncate(&subreq->io_iter, subreq->len);
- iov_iter_advance(&rreq->iter, subreq->len);
+ rolling_buffer_advance(&rreq->buffer, subreq->len);
return subreq->len;
}
@@ -179,25 +131,14 @@ static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_request *rr
loff_t i_size)
{
struct netfs_cache_resources *cres = &rreq->cache_resources;
+ enum netfs_io_source source;
if (!cres->ops)
return NETFS_DOWNLOAD_FROM_SERVER;
- return cres->ops->prepare_read(subreq, i_size);
-}
-
-static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
- bool was_async)
-{
- struct netfs_io_subrequest *subreq = priv;
-
- if (transferred_or_error < 0) {
- netfs_read_subreq_terminated(subreq, transferred_or_error, was_async);
- return;
- }
+ source = cres->ops->prepare_read(subreq, i_size);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+ return source;
- if (transferred_or_error > 0)
- subreq->transferred += transferred_or_error;
- netfs_read_subreq_terminated(subreq, 0, was_async);
}
/*
@@ -214,6 +155,47 @@ static void netfs_read_cache_to_pagecache(struct netfs_io_request *rreq,
netfs_cache_read_terminated, subreq);
}
+static void netfs_issue_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+
+ /* We add to the end of the list whilst the collector may be walking
+ * the list. The collector only goes nextwards and uses the lock to
+ * remove entries off of the front.
+ */
+ spin_lock(&rreq->lock);
+ list_add_tail(&subreq->rreq_link, &stream->subrequests);
+ if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
+ stream->front = subreq;
+ if (!stream->active) {
+ stream->collected_to = stream->front->start;
+ /* Store list pointers before active flag */
+ smp_store_release(&stream->active, true);
+ }
+ }
+
+ spin_unlock(&rreq->lock);
+
+ switch (subreq->source) {
+ case NETFS_DOWNLOAD_FROM_SERVER:
+ rreq->netfs_ops->issue_read(subreq);
+ break;
+ case NETFS_READ_FROM_CACHE:
+ netfs_read_cache_to_pagecache(rreq, subreq);
+ break;
+ default:
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ subreq->error = 0;
+ iov_iter_zero(subreq->len, &subreq->io_iter);
+ subreq->transferred = subreq->len;
+ netfs_read_subreq_terminated(subreq);
+ break;
+ }
+}
+
/*
* Perform a read to the pagecache from a series of sources of different types,
* slicing up the region to be read according to available cache blocks and
@@ -226,11 +208,9 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
ssize_t size = rreq->len;
int ret = 0;
- atomic_inc(&rreq->nr_outstanding);
-
do {
struct netfs_io_subrequest *subreq;
- enum netfs_io_source source = NETFS_DOWNLOAD_FROM_SERVER;
+ enum netfs_io_source source = NETFS_SOURCE_UNKNOWN;
ssize_t slice;
subreq = netfs_alloc_subrequest(rreq);
@@ -242,20 +222,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
subreq->start = start;
subreq->len = size;
- atomic_inc(&rreq->nr_outstanding);
- spin_lock_bh(&rreq->lock);
- list_add_tail(&subreq->rreq_link, &rreq->subrequests);
- subreq->prev_donated = rreq->prev_donated;
- rreq->prev_donated = 0;
- trace_netfs_sreq(subreq, netfs_sreq_trace_added);
- spin_unlock_bh(&rreq->lock);
-
source = netfs_cache_prepare_read(rreq, subreq, rreq->i_size);
subreq->source = source;
if (source == NETFS_DOWNLOAD_FROM_SERVER) {
unsigned long long zp = umin(ictx->zero_point, rreq->i_size);
size_t len = subreq->len;
+ if (unlikely(rreq->origin == NETFS_READ_SINGLE))
+ zp = rreq->i_size;
if (subreq->start >= zp) {
subreq->source = source = NETFS_FILL_WITH_ZEROES;
goto fill_with_zeroes;
@@ -275,17 +249,18 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
netfs_stat(&netfs_n_rh_download);
if (rreq->netfs_ops->prepare_read) {
ret = rreq->netfs_ops->prepare_read(subreq);
- if (ret < 0)
- goto prep_failed;
+ if (ret < 0) {
+ subreq->error = ret;
+ /* Not queued - release both refs. */
+ netfs_put_subrequest(subreq, false,
+ netfs_sreq_trace_put_cancel);
+ netfs_put_subrequest(subreq, false,
+ netfs_sreq_trace_put_cancel);
+ break;
+ }
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
}
-
- slice = netfs_prepare_read_iterator(subreq);
- if (slice < 0)
- goto prep_iter_failed;
-
- rreq->netfs_ops->issue_read(subreq);
- goto done;
+ goto issue;
}
fill_with_zeroes:
@@ -293,94 +268,50 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
subreq->source = NETFS_FILL_WITH_ZEROES;
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
netfs_stat(&netfs_n_rh_zero);
- slice = netfs_prepare_read_iterator(subreq);
- if (slice < 0)
- goto prep_iter_failed;
- __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
- netfs_read_subreq_terminated(subreq, 0, false);
- goto done;
+ goto issue;
}
if (source == NETFS_READ_FROM_CACHE) {
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
- slice = netfs_prepare_read_iterator(subreq);
- if (slice < 0)
- goto prep_iter_failed;
- netfs_read_cache_to_pagecache(rreq, subreq);
- goto done;
+ goto issue;
}
pr_err("Unexpected read source %u\n", source);
WARN_ON_ONCE(1);
break;
- prep_iter_failed:
- ret = slice;
- prep_failed:
- subreq->error = ret;
- atomic_dec(&rreq->nr_outstanding);
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
- break;
-
- done:
+ issue:
+ slice = netfs_prepare_read_iterator(subreq);
+ if (slice < 0) {
+ ret = slice;
+ subreq->error = ret;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_cancel);
+ /* Not queued - release both refs. */
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
+ break;
+ }
size -= slice;
start += slice;
+ if (size <= 0) {
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ }
+
+ netfs_issue_read(rreq, subreq);
cond_resched();
} while (size > 0);
- if (atomic_dec_and_test(&rreq->nr_outstanding))
- netfs_rreq_terminated(rreq, false);
+ if (unlikely(size > 0)) {
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ netfs_wake_read_collector(rreq);
+ }
/* Defer error return as we may need to wait for outstanding I/O. */
cmpxchg(&rreq->error, 0, ret);
}
-/*
- * Wait for the read operation to complete, successfully or otherwise.
- */
-static int netfs_wait_for_read(struct netfs_io_request *rreq)
-{
- int ret;
-
- trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
- wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE);
- ret = rreq->error;
- if (ret == 0 && rreq->submitted < rreq->len) {
- trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
- ret = -EIO;
- }
-
- return ret;
-}
-
-/*
- * Set up the initial folioq of buffer folios in the rolling buffer and set the
- * iterator to refer to it.
- */
-static int netfs_prime_buffer(struct netfs_io_request *rreq)
-{
- struct folio_queue *folioq;
- struct folio_batch put_batch;
- size_t added;
-
- folioq = kmalloc(sizeof(*folioq), GFP_KERNEL);
- if (!folioq)
- return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(folioq);
- rreq->buffer = folioq;
- rreq->buffer_tail = folioq;
- rreq->submitted = rreq->start;
- iov_iter_folio_queue(&rreq->iter, ITER_DEST, folioq, 0, 0, 0);
-
- folio_batch_init(&put_batch);
- added = netfs_load_buffer_from_ra(rreq, folioq, &put_batch);
- folio_batch_release(&put_batch);
- rreq->iter.count += added;
- rreq->submitted += added;
- return 0;
-}
-
/**
* netfs_readahead - Helper to manage a read request
* @ractl: The description of the readahead request
@@ -409,6 +340,8 @@ void netfs_readahead(struct readahead_control *ractl)
if (IS_ERR(rreq))
return;
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags);
+
ret = netfs_begin_cache_read(rreq, ictx);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto cleanup_free;
@@ -420,7 +353,8 @@ void netfs_readahead(struct readahead_control *ractl)
netfs_rreq_expand(rreq, ractl);
rreq->ractl = ractl;
- if (netfs_prime_buffer(rreq) < 0)
+ rreq->submitted = rreq->start;
+ if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0)
goto cleanup_free;
netfs_read_to_pagecache(rreq);
@@ -436,23 +370,18 @@ EXPORT_SYMBOL(netfs_readahead);
/*
* Create a rolling buffer with a single occupying folio.
*/
-static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct folio *folio)
+static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct folio *folio,
+ unsigned int rollbuf_flags)
{
- struct folio_queue *folioq;
+ ssize_t added;
- folioq = kmalloc(sizeof(*folioq), GFP_KERNEL);
- if (!folioq)
+ if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0)
return -ENOMEM;
- netfs_stat(&netfs_n_folioq);
- folioq_init(folioq);
- folioq_append(folioq, folio);
- BUG_ON(folioq_folio(folioq, 0) != folio);
- BUG_ON(folioq_folio_order(folioq, 0) != folio_order(folio));
- rreq->buffer = folioq;
- rreq->buffer_tail = folioq;
- rreq->submitted = rreq->start + rreq->len;
- iov_iter_folio_queue(&rreq->iter, ITER_DEST, folioq, 0, 0, rreq->len);
+ added = rolling_buffer_append(&rreq->buffer, folio, rollbuf_flags);
+ if (added < 0)
+ return added;
+ rreq->submitted = rreq->start + added;
rreq->ractl = (struct readahead_control *)1UL;
return 0;
}
@@ -520,7 +449,7 @@ static int netfs_read_gaps(struct file *file, struct folio *folio)
}
if (to < flen)
bvec_set_folio(&bvec[i++], folio, flen - to, to);
- iov_iter_bvec(&rreq->iter, ITER_DEST, bvec, i, rreq->len);
+ iov_iter_bvec(&rreq->buffer.iter, ITER_DEST, bvec, i, rreq->len);
rreq->submitted = rreq->start + flen;
netfs_read_to_pagecache(rreq);
@@ -529,7 +458,7 @@ static int netfs_read_gaps(struct file *file, struct folio *folio)
folio_put(sink);
ret = netfs_wait_for_read(rreq);
- if (ret == 0) {
+ if (ret >= 0) {
flush_dcache_folio(folio);
folio_mark_uptodate(folio);
}
@@ -588,7 +517,7 @@ int netfs_read_folio(struct file *file, struct folio *folio)
trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
/* Set up the output buffer */
- ret = netfs_create_singular_buffer(rreq, folio);
+ ret = netfs_create_singular_buffer(rreq, folio, 0);
if (ret < 0)
goto discard;
@@ -745,7 +674,7 @@ retry:
trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
/* Set up the output buffer */
- ret = netfs_create_singular_buffer(rreq, folio);
+ ret = netfs_create_singular_buffer(rreq, folio, 0);
if (ret < 0)
goto error_put;
@@ -810,15 +739,14 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
trace_netfs_read(rreq, start, flen, netfs_read_trace_prefetch_for_write);
/* Set up the output buffer */
- ret = netfs_create_singular_buffer(rreq, folio);
+ ret = netfs_create_singular_buffer(rreq, folio, NETFS_ROLLBUF_PAGECACHE_MARK);
if (ret < 0)
goto error_put;
- folioq_mark2(rreq->buffer, 0);
netfs_read_to_pagecache(rreq);
ret = netfs_wait_for_read(rreq);
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
- return ret;
+ return ret < 0 ? ret : 0;
error_put:
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
index b1a66a6e6bc2..0bf3c2f5a710 100644
--- a/fs/netfs/direct_read.c
+++ b/fs/netfs/direct_read.c
@@ -25,7 +25,7 @@ static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq)
subreq->len = rsize;
if (unlikely(rreq->io_streams[0].sreq_max_segs)) {
- size_t limit = netfs_limit_iter(&rreq->iter, 0, rsize,
+ size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize,
rreq->io_streams[0].sreq_max_segs);
if (limit < rsize) {
@@ -36,9 +36,9 @@ static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq)
trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
- subreq->io_iter = rreq->iter;
+ subreq->io_iter = rreq->buffer.iter;
iov_iter_truncate(&subreq->io_iter, subreq->len);
- iov_iter_advance(&rreq->iter, subreq->len);
+ iov_iter_advance(&rreq->buffer.iter, subreq->len);
}
/*
@@ -47,12 +47,11 @@ static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq)
*/
static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
{
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
unsigned long long start = rreq->start;
ssize_t size = rreq->len;
int ret = 0;
- atomic_set(&rreq->nr_outstanding, 1);
-
do {
struct netfs_io_subrequest *subreq;
ssize_t slice;
@@ -67,19 +66,25 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
subreq->start = start;
subreq->len = size;
- atomic_inc(&rreq->nr_outstanding);
- spin_lock_bh(&rreq->lock);
- list_add_tail(&subreq->rreq_link, &rreq->subrequests);
- subreq->prev_donated = rreq->prev_donated;
- rreq->prev_donated = 0;
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+
+ spin_lock(&rreq->lock);
+ list_add_tail(&subreq->rreq_link, &stream->subrequests);
+ if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
+ stream->front = subreq;
+ if (!stream->active) {
+ stream->collected_to = stream->front->start;
+ /* Store list pointers before active flag */
+ smp_store_release(&stream->active, true);
+ }
+ }
trace_netfs_sreq(subreq, netfs_sreq_trace_added);
- spin_unlock_bh(&rreq->lock);
+ spin_unlock(&rreq->lock);
netfs_stat(&netfs_n_rh_download);
if (rreq->netfs_ops->prepare_read) {
ret = rreq->netfs_ops->prepare_read(subreq);
if (ret < 0) {
- atomic_dec(&rreq->nr_outstanding);
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
break;
}
@@ -87,20 +92,32 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
netfs_prepare_dio_read_iterator(subreq);
slice = subreq->len;
- rreq->netfs_ops->issue_read(subreq);
-
size -= slice;
start += slice;
rreq->submitted += slice;
+ if (size <= 0) {
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ }
+
+ rreq->netfs_ops->issue_read(subreq);
+ if (test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
+ netfs_wait_for_pause(rreq);
+ if (test_bit(NETFS_RREQ_FAILED, &rreq->flags))
+ break;
if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
break;
cond_resched();
} while (size > 0);
- if (atomic_dec_and_test(&rreq->nr_outstanding))
- netfs_rreq_terminated(rreq, false);
+ if (unlikely(size > 0)) {
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ netfs_wake_read_collector(rreq);
+ }
+
return ret;
}
@@ -133,21 +150,10 @@ static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
goto out;
}
- if (sync) {
- trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
- wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS,
- TASK_UNINTERRUPTIBLE);
-
- ret = rreq->error;
- if (ret == 0 && rreq->submitted < rreq->len &&
- rreq->origin != NETFS_DIO_READ) {
- trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
- ret = -EIO;
- }
- } else {
+ if (sync)
+ ret = netfs_wait_for_read(rreq);
+ else
ret = -EIOCBQUEUED;
- }
-
out:
_leave(" = %d", ret);
return ret;
@@ -199,15 +205,15 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
* the request.
*/
if (user_backed_iter(iter)) {
- ret = netfs_extract_user_iter(iter, rreq->len, &rreq->iter, 0);
+ ret = netfs_extract_user_iter(iter, rreq->len, &rreq->buffer.iter, 0);
if (ret < 0)
goto out;
- rreq->direct_bv = (struct bio_vec *)rreq->iter.bvec;
+ rreq->direct_bv = (struct bio_vec *)rreq->buffer.iter.bvec;
rreq->direct_bv_count = ret;
rreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
- rreq->len = iov_iter_count(&rreq->iter);
+ rreq->len = iov_iter_count(&rreq->buffer.iter);
} else {
- rreq->iter = *iter;
+ rreq->buffer.iter = *iter;
rreq->len = orig_count;
rreq->direct_bv_unpin = false;
iov_iter_advance(iter, orig_count);
@@ -215,8 +221,10 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
// TODO: Set up bounce buffer if needed
- if (!sync)
+ if (!sync) {
rreq->iocb = iocb;
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags);
+ }
ret = netfs_unbuffered_read(rreq, sync);
if (ret < 0)
diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
index f9421f3e6d37..42ce53cc216e 100644
--- a/fs/netfs/direct_write.c
+++ b/fs/netfs/direct_write.c
@@ -68,12 +68,12 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
* request.
*/
if (user_backed_iter(iter)) {
- n = netfs_extract_user_iter(iter, len, &wreq->iter, 0);
+ n = netfs_extract_user_iter(iter, len, &wreq->buffer.iter, 0);
if (n < 0) {
ret = n;
goto out;
}
- wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec;
+ wreq->direct_bv = (struct bio_vec *)wreq->buffer.iter.bvec;
wreq->direct_bv_count = n;
wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
} else {
@@ -82,10 +82,8 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
* (eg. a bio_vec array) will persist till the end of
* the op.
*/
- wreq->iter = *iter;
+ wreq->buffer.iter = *iter;
}
-
- wreq->io_iter = wreq->iter;
}
__set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
@@ -97,7 +95,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
__set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
if (async)
wreq->iocb = iocb;
- wreq->len = iov_iter_count(&wreq->io_iter);
+ wreq->len = iov_iter_count(&wreq->buffer.iter);
wreq->cleanup = netfs_cleanup_dio_write;
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
if (ret < 0) {
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index c562aec3b483..eb76f98c894b 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -23,6 +23,7 @@
/*
* buffered_read.c
*/
+void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async);
int netfs_prefetch_for_write(struct file *file, struct folio *folio,
size_t offset, size_t len);
@@ -58,11 +59,8 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
/*
* misc.c
*/
-struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq);
-int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
- bool needs_put);
-struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq);
-void netfs_clear_buffer(struct netfs_io_request *rreq);
+struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq,
+ enum netfs_folioq_trace trace);
void netfs_reset_iter(struct netfs_io_subrequest *subreq);
/*
@@ -84,20 +82,27 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
}
+static inline void netfs_see_subrequest(struct netfs_io_subrequest *subreq,
+ enum netfs_sreq_ref_trace what)
+{
+ trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index,
+ refcount_read(&subreq->ref), what);
+}
+
/*
* read_collect.c
*/
-void netfs_read_termination_worker(struct work_struct *work);
-void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async);
+void netfs_read_collection_worker(struct work_struct *work);
+void netfs_wake_read_collector(struct netfs_io_request *rreq);
+void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async);
+ssize_t netfs_wait_for_read(struct netfs_io_request *rreq);
+void netfs_wait_for_pause(struct netfs_io_request *rreq);
/*
* read_pgpriv2.c
*/
-void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
- struct netfs_io_request *rreq,
- struct folio_queue *folioq,
- int slot);
-void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq);
+void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio);
+void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq);
bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq);
/*
@@ -113,6 +118,7 @@ void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq);
extern atomic_t netfs_n_rh_dio_read;
extern atomic_t netfs_n_rh_readahead;
extern atomic_t netfs_n_rh_read_folio;
+extern atomic_t netfs_n_rh_read_single;
extern atomic_t netfs_n_rh_rreq;
extern atomic_t netfs_n_rh_sreq;
extern atomic_t netfs_n_rh_download;
@@ -181,9 +187,9 @@ void netfs_reissue_write(struct netfs_io_stream *stream,
struct iov_iter *source);
void netfs_issue_write(struct netfs_io_request *wreq,
struct netfs_io_stream *stream);
-int netfs_advance_write(struct netfs_io_request *wreq,
- struct netfs_io_stream *stream,
- loff_t start, size_t len, bool to_eof);
+size_t netfs_advance_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream,
+ loff_t start, size_t len, bool to_eof);
struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
struct folio *folio, size_t copied, bool to_page_end,
@@ -193,6 +199,11 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
/*
+ * write_retry.c
+ */
+void netfs_retry_writes(struct netfs_io_request *wreq);
+
+/*
* Miscellaneous functions.
*/
static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
diff --git a/fs/netfs/main.c b/fs/netfs/main.c
index 6c7be1377ee0..4e3e62040831 100644
--- a/fs/netfs/main.c
+++ b/fs/netfs/main.c
@@ -37,9 +37,11 @@ static const char *netfs_origins[nr__netfs_io_origin] = {
[NETFS_READAHEAD] = "RA",
[NETFS_READPAGE] = "RP",
[NETFS_READ_GAPS] = "RG",
+ [NETFS_READ_SINGLE] = "R1",
[NETFS_READ_FOR_WRITE] = "RW",
[NETFS_DIO_READ] = "DR",
[NETFS_WRITEBACK] = "WB",
+ [NETFS_WRITEBACK_SINGLE] = "W1",
[NETFS_WRITETHROUGH] = "WT",
[NETFS_UNBUFFERED_WRITE] = "UW",
[NETFS_DIO_WRITE] = "DW",
@@ -69,7 +71,7 @@ static int netfs_requests_seq_show(struct seq_file *m, void *v)
refcount_read(&rreq->ref),
rreq->flags,
rreq->error,
- atomic_read(&rreq->nr_outstanding),
+ 0,
rreq->start, rreq->submitted, rreq->len);
seq_putc(m, '\n');
return 0;
@@ -116,7 +118,7 @@ static int __init netfs_init(void)
goto error_reqpool;
netfs_subrequest_slab = kmem_cache_create("netfs_subrequest",
- sizeof(struct netfs_io_subrequest), 0,
+ sizeof(struct netfs_io_subrequest) + 16, 0,
SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
NULL);
if (!netfs_subrequest_slab)
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index 78fe5796b2b2..7099aa07737a 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -8,113 +8,101 @@
#include <linux/swap.h>
#include "internal.h"
-/*
- * Make sure there's space in the rolling queue.
+/**
+ * netfs_alloc_folioq_buffer - Allocate buffer space into a folio queue
+ * @mapping: Address space to set on the folio (or NULL).
+ * @_buffer: Pointer to the folio queue to add to (may point to a NULL; updated).
+ * @_cur_size: Current size of the buffer (updated).
+ * @size: Target size of the buffer.
+ * @gfp: The allocation constraints.
*/
-struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq)
+int netfs_alloc_folioq_buffer(struct address_space *mapping,
+ struct folio_queue **_buffer,
+ size_t *_cur_size, ssize_t size, gfp_t gfp)
{
- struct folio_queue *tail = rreq->buffer_tail, *prev;
- unsigned int prev_nr_slots = 0;
-
- if (WARN_ON_ONCE(!rreq->buffer && tail) ||
- WARN_ON_ONCE(rreq->buffer && !tail))
- return ERR_PTR(-EIO);
-
- prev = tail;
- if (prev) {
- if (!folioq_full(tail))
- return tail;
- prev_nr_slots = folioq_nr_slots(tail);
- }
-
- tail = kmalloc(sizeof(*tail), GFP_NOFS);
- if (!tail)
- return ERR_PTR(-ENOMEM);
- netfs_stat(&netfs_n_folioq);
- folioq_init(tail);
- tail->prev = prev;
- if (prev)
- /* [!] NOTE: After we set prev->next, the consumer is entirely
- * at liberty to delete prev.
- */
- WRITE_ONCE(prev->next, tail);
-
- rreq->buffer_tail = tail;
- if (!rreq->buffer) {
- rreq->buffer = tail;
- iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0);
- } else {
- /* Make sure we don't leave the master iterator pointing to a
- * block that might get immediately consumed.
- */
- if (rreq->io_iter.folioq == prev &&
- rreq->io_iter.folioq_slot == prev_nr_slots) {
- rreq->io_iter.folioq = tail;
- rreq->io_iter.folioq_slot = 0;
+ struct folio_queue *tail = *_buffer, *p;
+
+ size = round_up(size, PAGE_SIZE);
+ if (*_cur_size >= size)
+ return 0;
+
+ if (tail)
+ while (tail->next)
+ tail = tail->next;
+
+ do {
+ struct folio *folio;
+ int order = 0, slot;
+
+ if (!tail || folioq_full(tail)) {
+ p = netfs_folioq_alloc(0, GFP_NOFS, netfs_trace_folioq_alloc_buffer);
+ if (!p)
+ return -ENOMEM;
+ if (tail) {
+ tail->next = p;
+ p->prev = tail;
+ } else {
+ *_buffer = p;
+ }
+ tail = p;
}
- }
- rreq->buffer_tail_slot = 0;
- return tail;
-}
-/*
- * Append a folio to the rolling queue.
- */
-int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
- bool needs_put)
-{
- struct folio_queue *tail;
- unsigned int slot, order = folio_order(folio);
+ if (size - *_cur_size > PAGE_SIZE)
+ order = umin(ilog2(size - *_cur_size) - PAGE_SHIFT,
+ MAX_PAGECACHE_ORDER);
- tail = netfs_buffer_make_space(rreq);
- if (IS_ERR(tail))
- return PTR_ERR(tail);
+ folio = folio_alloc(gfp, order);
+ if (!folio && order > 0)
+ folio = folio_alloc(gfp, 0);
+ if (!folio)
+ return -ENOMEM;
- rreq->io_iter.count += PAGE_SIZE << order;
+ folio->mapping = mapping;
+ folio->index = *_cur_size / PAGE_SIZE;
+ trace_netfs_folio(folio, netfs_folio_trace_alloc_buffer);
+ slot = folioq_append_mark(tail, folio);
+ *_cur_size += folioq_folio_size(tail, slot);
+ } while (*_cur_size < size);
- slot = folioq_append(tail, folio);
- /* Store the counter after setting the slot. */
- smp_store_release(&rreq->buffer_tail_slot, slot);
return 0;
}
+EXPORT_SYMBOL(netfs_alloc_folioq_buffer);
-/*
- * Delete the head of a rolling queue.
+/**
+ * netfs_free_folioq_buffer - Free a folio queue.
+ * @fq: The start of the folio queue to free
+ *
+ * Free up a chain of folio_queues and, if marked, the marked folios they point
+ * to.
*/
-struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq)
+void netfs_free_folioq_buffer(struct folio_queue *fq)
{
- struct folio_queue *head = wreq->buffer, *next = head->next;
-
- if (next)
- next->prev = NULL;
- netfs_stat_d(&netfs_n_folioq);
- kfree(head);
- wreq->buffer = next;
- return next;
-}
+ struct folio_queue *next;
+ struct folio_batch fbatch;
-/*
- * Clear out a rolling queue.
- */
-void netfs_clear_buffer(struct netfs_io_request *rreq)
-{
- struct folio_queue *p;
+ folio_batch_init(&fbatch);
+
+ for (; fq; fq = next) {
+ for (int slot = 0; slot < folioq_count(fq); slot++) {
+ struct folio *folio = folioq_folio(fq, slot);
- while ((p = rreq->buffer)) {
- rreq->buffer = p->next;
- for (int slot = 0; slot < folioq_count(p); slot++) {
- struct folio *folio = folioq_folio(p, slot);
- if (!folio)
+ if (!folio ||
+ !folioq_is_marked(fq, slot))
continue;
- if (folioq_is_marked(p, slot)) {
- trace_netfs_folio(folio, netfs_folio_trace_put);
- folio_put(folio);
- }
+
+ trace_netfs_folio(folio, netfs_folio_trace_put);
+ if (folio_batch_add(&fbatch, folio))
+ folio_batch_release(&fbatch);
}
+
netfs_stat_d(&netfs_n_folioq);
- kfree(p);
+ next = fq->next;
+ kfree(fq);
}
+
+ folio_batch_release(&fbatch);
}
+EXPORT_SYMBOL(netfs_free_folioq_buffer);
/*
* Reset the subrequest iterator to refer just to the region remaining to be
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index 31e388ec6e48..dc6b41ef18b0 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -48,17 +48,20 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
spin_lock_init(&rreq->lock);
INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
- INIT_LIST_HEAD(&rreq->subrequests);
+ init_waitqueue_head(&rreq->waitq);
refcount_set(&rreq->ref, 1);
if (origin == NETFS_READAHEAD ||
origin == NETFS_READPAGE ||
origin == NETFS_READ_GAPS ||
+ origin == NETFS_READ_SINGLE ||
origin == NETFS_READ_FOR_WRITE ||
- origin == NETFS_DIO_READ)
- INIT_WORK(&rreq->work, netfs_read_termination_worker);
- else
+ origin == NETFS_DIO_READ) {
+ INIT_WORK(&rreq->work, netfs_read_collection_worker);
+ rreq->io_streams[0].avail = true;
+ } else {
INIT_WORK(&rreq->work, netfs_write_collection_worker);
+ }
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
if (file && file->f_flags & O_NONBLOCK)
@@ -92,14 +95,6 @@ void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
struct netfs_io_stream *stream;
int s;
- while (!list_empty(&rreq->subrequests)) {
- subreq = list_first_entry(&rreq->subrequests,
- struct netfs_io_subrequest, rreq_link);
- list_del(&subreq->rreq_link);
- netfs_put_subrequest(subreq, was_async,
- netfs_sreq_trace_put_clear);
- }
-
for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) {
stream = &rreq->io_streams[s];
while (!list_empty(&stream->subrequests)) {
@@ -143,7 +138,7 @@ static void netfs_free_request(struct work_struct *work)
}
kvfree(rreq->direct_bv);
}
- netfs_clear_buffer(rreq);
+ rolling_buffer_clear(&rreq->buffer);
if (atomic_dec_and_test(&ictx->io_count))
wake_up_var(&ictx->io_count);
diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
index e8624f5c7fcc..f65affa5a9e4 100644
--- a/fs/netfs/read_collect.c
+++ b/fs/netfs/read_collect.c
@@ -14,6 +14,14 @@
#include <linux/task_io_accounting_ops.h>
#include "internal.h"
+/* Notes made in the collector */
+#define HIT_PENDING 0x01 /* A front op was still pending */
+#define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */
+#define BUFFERED 0x08 /* The pagecache needs cleaning up */
+#define NEED_RETRY 0x10 /* A front op requests retrying */
+#define COPY_TO_CACHE 0x40 /* Need to copy subrequest to cache */
+#define ABANDON_SREQ 0x80 /* Need to abandon untransferred part of subrequest */
+
/*
* Clear the unread part of an I/O request.
*/
@@ -31,14 +39,18 @@ static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
* cache the folio, we set the group to NETFS_FOLIO_COPY_TO_CACHE, mark it
* dirty and let writeback handle it.
*/
-static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
- struct netfs_io_request *rreq,
+static void netfs_unlock_read_folio(struct netfs_io_request *rreq,
struct folio_queue *folioq,
int slot)
{
struct netfs_folio *finfo;
struct folio *folio = folioq_folio(folioq, slot);
+ if (unlikely(folio_pos(folio) < rreq->abandon_to)) {
+ trace_netfs_folio(folio, netfs_folio_trace_abandon);
+ goto just_unlock;
+ }
+
flush_dcache_folio(folio);
folio_mark_uptodate(folio);
@@ -53,7 +65,7 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
kfree(finfo);
}
- if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
+ if (test_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags)) {
if (!WARN_ON_ONCE(folio_get_private(folio) != NULL)) {
trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE);
@@ -66,12 +78,11 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
folioq_clear(folioq, slot);
} else {
// TODO: Use of PG_private_2 is deprecated.
- if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
- netfs_pgpriv2_mark_copy_to_cache(subreq, rreq, folioq, slot);
- else
- folioq_clear(folioq, slot);
+ if (test_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags))
+ netfs_pgpriv2_copy_to_cache(rreq, folio);
}
+just_unlock:
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
if (folio->index == rreq->no_unlock_folio &&
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) {
@@ -81,238 +92,249 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
folio_unlock(folio);
}
}
+
+ folioq_clear(folioq, slot);
}
/*
- * Unlock any folios that are now completely read. Returns true if the
- * subrequest is removed from the list.
+ * Unlock any folios we've finished with.
*/
-static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was_async)
+static void netfs_read_unlock_folios(struct netfs_io_request *rreq,
+ unsigned int *notes)
{
- struct netfs_io_subrequest *prev, *next;
- struct netfs_io_request *rreq = subreq->rreq;
- struct folio_queue *folioq = subreq->curr_folioq;
- size_t avail, prev_donated, next_donated, fsize, part, excess;
- loff_t fpos, start;
- loff_t fend;
- int slot = subreq->curr_folioq_slot;
-
- if (WARN(subreq->transferred > subreq->len,
- "Subreq overread: R%x[%x] %zu > %zu",
- rreq->debug_id, subreq->debug_index,
- subreq->transferred, subreq->len))
- subreq->transferred = subreq->len;
-
-next_folio:
- fsize = PAGE_SIZE << subreq->curr_folio_order;
- fpos = round_down(subreq->start + subreq->consumed, fsize);
- fend = fpos + fsize;
-
- if (WARN_ON_ONCE(!folioq) ||
- WARN_ON_ONCE(!folioq_folio(folioq, slot)) ||
- WARN_ON_ONCE(folioq_folio(folioq, slot)->index != fpos / PAGE_SIZE)) {
- pr_err("R=%08x[%x] s=%llx-%llx ctl=%zx/%zx/%zx sl=%u\n",
- rreq->debug_id, subreq->debug_index,
- subreq->start, subreq->start + subreq->transferred - 1,
- subreq->consumed, subreq->transferred, subreq->len,
- slot);
- if (folioq) {
- struct folio *folio = folioq_folio(folioq, slot);
-
- pr_err("folioq: orders=%02x%02x%02x%02x\n",
- folioq->orders[0], folioq->orders[1],
- folioq->orders[2], folioq->orders[3]);
- if (folio)
- pr_err("folio: %llx-%llx ix=%llx o=%u qo=%u\n",
- fpos, fend - 1, folio_pos(folio), folio_order(folio),
- folioq_folio_order(folioq, slot));
- }
- }
+ struct folio_queue *folioq = rreq->buffer.tail;
+ unsigned long long collected_to = rreq->collected_to;
+ unsigned int slot = rreq->buffer.first_tail_slot;
-donation_changed:
- /* Try to consume the current folio if we've hit or passed the end of
- * it. There's a possibility that this subreq doesn't start at the
- * beginning of the folio, in which case we need to donate to/from the
- * preceding subreq.
- *
- * We also need to include any potential donation back from the
- * following subreq.
- */
- prev_donated = READ_ONCE(subreq->prev_donated);
- next_donated = READ_ONCE(subreq->next_donated);
- if (prev_donated || next_donated) {
- spin_lock_bh(&rreq->lock);
- prev_donated = subreq->prev_donated;
- next_donated = subreq->next_donated;
- subreq->start -= prev_donated;
- subreq->len += prev_donated;
- subreq->transferred += prev_donated;
- prev_donated = subreq->prev_donated = 0;
- if (subreq->transferred == subreq->len) {
- subreq->len += next_donated;
- subreq->transferred += next_donated;
- next_donated = subreq->next_donated = 0;
+ if (rreq->cleaned_to >= rreq->collected_to)
+ return;
+
+ // TODO: Begin decryption
+
+ if (slot >= folioq_nr_slots(folioq)) {
+ folioq = rolling_buffer_delete_spent(&rreq->buffer);
+ if (!folioq) {
+ rreq->front_folio_order = 0;
+ return;
}
- trace_netfs_sreq(subreq, netfs_sreq_trace_add_donations);
- spin_unlock_bh(&rreq->lock);
+ slot = 0;
}
- avail = subreq->transferred;
- if (avail == subreq->len)
- avail += next_donated;
- start = subreq->start;
- if (subreq->consumed == 0) {
- start -= prev_donated;
- avail += prev_donated;
- } else {
- start += subreq->consumed;
- avail -= subreq->consumed;
- }
- part = umin(avail, fsize);
-
- trace_netfs_progress(subreq, start, avail, part);
-
- if (start + avail >= fend) {
- if (fpos == start) {
- /* Flush, unlock and mark for caching any folio we've just read. */
- subreq->consumed = fend - subreq->start;
- netfs_unlock_read_folio(subreq, rreq, folioq, slot);
- folioq_mark2(folioq, slot);
- if (subreq->consumed >= subreq->len)
- goto remove_subreq;
- } else if (fpos < start) {
- excess = fend - subreq->start;
-
- spin_lock_bh(&rreq->lock);
- /* If we complete first on a folio split with the
- * preceding subreq, donate to that subreq - otherwise
- * we get the responsibility.
- */
- if (subreq->prev_donated != prev_donated) {
- spin_unlock_bh(&rreq->lock);
- goto donation_changed;
- }
+ for (;;) {
+ struct folio *folio;
+ unsigned long long fpos, fend;
+ unsigned int order;
+ size_t fsize;
- if (list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
- spin_unlock_bh(&rreq->lock);
- pr_err("Can't donate prior to front\n");
- goto bad;
- }
+ if (*notes & COPY_TO_CACHE)
+ set_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
- prev = list_prev_entry(subreq, rreq_link);
- WRITE_ONCE(prev->next_donated, prev->next_donated + excess);
- subreq->start += excess;
- subreq->len -= excess;
- subreq->transferred -= excess;
- trace_netfs_donate(rreq, subreq, prev, excess,
- netfs_trace_donate_tail_to_prev);
- trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev);
-
- if (subreq->consumed >= subreq->len)
- goto remove_subreq_locked;
- spin_unlock_bh(&rreq->lock);
- } else {
- pr_err("fpos > start\n");
- goto bad;
- }
+ folio = folioq_folio(folioq, slot);
+ if (WARN_ONCE(!folio_test_locked(folio),
+ "R=%08x: folio %lx is not locked\n",
+ rreq->debug_id, folio->index))
+ trace_netfs_folio(folio, netfs_folio_trace_not_locked);
+
+ order = folioq_folio_order(folioq, slot);
+ rreq->front_folio_order = order;
+ fsize = PAGE_SIZE << order;
+ fpos = folio_pos(folio);
+ fend = umin(fpos + fsize, rreq->i_size);
+
+ trace_netfs_collect_folio(rreq, folio, fend, collected_to);
- /* Advance the rolling buffer to the next folio. */
+ /* Unlock any folio we've transferred all of. */
+ if (collected_to < fend)
+ break;
+
+ netfs_unlock_read_folio(rreq, folioq, slot);
+ WRITE_ONCE(rreq->cleaned_to, fpos + fsize);
+ *notes |= MADE_PROGRESS;
+
+ clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
+
+ /* Clean up the head folioq. If we clear an entire folioq, then
+ * we can get rid of it provided it's not also the tail folioq
+ * being filled by the issuer.
+ */
+ folioq_clear(folioq, slot);
slot++;
if (slot >= folioq_nr_slots(folioq)) {
+ folioq = rolling_buffer_delete_spent(&rreq->buffer);
+ if (!folioq)
+ goto done;
slot = 0;
- folioq = folioq->next;
- subreq->curr_folioq = folioq;
+ trace_netfs_folioq(folioq, netfs_trace_folioq_read_progress);
}
- subreq->curr_folioq_slot = slot;
- if (folioq && folioq_folio(folioq, slot))
- subreq->curr_folio_order = folioq->orders[slot];
- if (!was_async)
- cond_resched();
- goto next_folio;
+
+ if (fpos + fsize >= collected_to)
+ break;
}
- /* Deal with partial progress. */
- if (subreq->transferred < subreq->len)
- return false;
+ rreq->buffer.tail = folioq;
+done:
+ rreq->buffer.first_tail_slot = slot;
+}
- /* Donate the remaining downloaded data to one of the neighbouring
- * subrequests. Note that we may race with them doing the same thing.
+/*
+ * Collect and assess the results of various read subrequests. We may need to
+ * retry some of the results.
+ *
+ * Note that we have a sequence of subrequests, which may be drawing on
+ * different sources and may or may not be the same size or starting position
+ * and may not even correspond in boundary alignment.
+ */
+static void netfs_collect_read_results(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *front, *remove;
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ unsigned int notes;
+
+ _enter("%llx-%llx", rreq->start, rreq->start + rreq->len);
+ trace_netfs_rreq(rreq, netfs_rreq_trace_collect);
+ trace_netfs_collect(rreq);
+
+reassess:
+ if (rreq->origin == NETFS_READAHEAD ||
+ rreq->origin == NETFS_READPAGE ||
+ rreq->origin == NETFS_READ_FOR_WRITE)
+ notes = BUFFERED;
+ else
+ notes = 0;
+
+ /* Remove completed subrequests from the front of the stream and
+ * advance the completion point. We stop when we hit something that's
+ * in progress. The issuer thread may be adding stuff to the tail
+ * whilst we're doing this.
*/
- spin_lock_bh(&rreq->lock);
+ front = READ_ONCE(stream->front);
+ while (front) {
+ size_t transferred;
- if (subreq->prev_donated != prev_donated ||
- subreq->next_donated != next_donated) {
- spin_unlock_bh(&rreq->lock);
- cond_resched();
- goto donation_changed;
- }
+ trace_netfs_collect_sreq(rreq, front);
+ _debug("sreq [%x] %llx %zx/%zx",
+ front->debug_index, front->start, front->transferred, front->len);
- /* Deal with the trickiest case: that this subreq is in the middle of a
- * folio, not touching either edge, but finishes first. In such a
- * case, we donate to the previous subreq, if there is one and if it is
- * contiguous, so that the donation is only handled when that completes
- * - and remove this subreq from the list.
- *
- * If the previous subreq finished first, we will have acquired their
- * donation and should be able to unlock folios and/or donate nextwards.
- */
- if (!subreq->consumed &&
- !prev_donated &&
- !list_is_first(&subreq->rreq_link, &rreq->subrequests) &&
- subreq->start == prev->start + prev->len) {
- prev = list_prev_entry(subreq, rreq_link);
- WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len);
- subreq->start += subreq->len;
- subreq->len = 0;
- subreq->transferred = 0;
- trace_netfs_donate(rreq, subreq, prev, subreq->len,
- netfs_trace_donate_to_prev);
- trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev);
- goto remove_subreq_locked;
+ if (stream->collected_to < front->start) {
+ trace_netfs_collect_gap(rreq, stream, front->start, 'F');
+ stream->collected_to = front->start;
+ }
+
+ if (test_bit(NETFS_SREQ_IN_PROGRESS, &front->flags))
+ notes |= HIT_PENDING;
+ smp_rmb(); /* Read counters after IN_PROGRESS flag. */
+ transferred = READ_ONCE(front->transferred);
+
+ /* If we can now collect the next folio, do so. We don't want
+ * to defer this as we have to decide whether we need to copy
+ * to the cache or not, and that may differ between adjacent
+ * subreqs.
+ */
+ if (notes & BUFFERED) {
+ size_t fsize = PAGE_SIZE << rreq->front_folio_order;
+
+ /* Clear the tail of a short read. */
+ if (!(notes & HIT_PENDING) &&
+ front->error == 0 &&
+ transferred < front->len &&
+ (test_bit(NETFS_SREQ_HIT_EOF, &front->flags) ||
+ test_bit(NETFS_SREQ_CLEAR_TAIL, &front->flags))) {
+ netfs_clear_unread(front);
+ transferred = front->transferred = front->len;
+ trace_netfs_sreq(front, netfs_sreq_trace_clear);
+ }
+
+ stream->collected_to = front->start + transferred;
+ rreq->collected_to = stream->collected_to;
+
+ if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &front->flags))
+ notes |= COPY_TO_CACHE;
+
+ if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
+ rreq->abandon_to = front->start + front->len;
+ front->transferred = front->len;
+ transferred = front->len;
+ trace_netfs_rreq(rreq, netfs_rreq_trace_set_abandon);
+ }
+ if (front->start + transferred >= rreq->cleaned_to + fsize ||
+ test_bit(NETFS_SREQ_HIT_EOF, &front->flags))
+ netfs_read_unlock_folios(rreq, &notes);
+ } else {
+ stream->collected_to = front->start + transferred;
+ rreq->collected_to = stream->collected_to;
+ }
+
+ /* Stall if the front is still undergoing I/O. */
+ if (notes & HIT_PENDING)
+ break;
+
+ if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
+ if (!stream->failed) {
+ stream->error = front->error;
+ rreq->error = front->error;
+ set_bit(NETFS_RREQ_FAILED, &rreq->flags);
+ stream->failed = true;
+ }
+ notes |= MADE_PROGRESS | ABANDON_SREQ;
+ } else if (test_bit(NETFS_SREQ_NEED_RETRY, &front->flags)) {
+ stream->need_retry = true;
+ notes |= NEED_RETRY | MADE_PROGRESS;
+ break;
+ } else {
+ if (!stream->failed)
+ stream->transferred = stream->collected_to - rreq->start;
+ notes |= MADE_PROGRESS;
+ }
+
+ /* Remove if completely consumed. */
+ stream->source = front->source;
+ spin_lock(&rreq->lock);
+
+ remove = front;
+ trace_netfs_sreq(front, netfs_sreq_trace_discard);
+ list_del_init(&front->rreq_link);
+ front = list_first_entry_or_null(&stream->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+ stream->front = front;
+ spin_unlock(&rreq->lock);
+ netfs_put_subrequest(remove, false,
+ notes & ABANDON_SREQ ?
+ netfs_sreq_trace_put_abandon :
+ netfs_sreq_trace_put_done);
}
- /* If we can't donate down the chain, donate up the chain instead. */
- excess = subreq->len - subreq->consumed + next_donated;
+ trace_netfs_collect_stream(rreq, stream);
+ trace_netfs_collect_state(rreq, rreq->collected_to, notes);
- if (!subreq->consumed)
- excess += prev_donated;
+ if (!(notes & BUFFERED))
+ rreq->cleaned_to = rreq->collected_to;
- if (list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
- rreq->prev_donated = excess;
- trace_netfs_donate(rreq, subreq, NULL, excess,
- netfs_trace_donate_to_deferred_next);
- } else {
- next = list_next_entry(subreq, rreq_link);
- WRITE_ONCE(next->prev_donated, excess);
- trace_netfs_donate(rreq, subreq, next, excess,
- netfs_trace_donate_to_next);
+ if (notes & NEED_RETRY)
+ goto need_retry;
+ if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_unpause);
+ clear_bit_unlock(NETFS_RREQ_PAUSE, &rreq->flags);
+ smp_mb__after_atomic(); /* Set PAUSE before task state */
+ wake_up(&rreq->waitq);
+ }
+
+ if (notes & MADE_PROGRESS) {
+ //cond_resched();
+ goto reassess;
}
- trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_next);
- subreq->len = subreq->consumed;
- subreq->transferred = subreq->consumed;
- goto remove_subreq_locked;
-
-remove_subreq:
- spin_lock_bh(&rreq->lock);
-remove_subreq_locked:
- subreq->consumed = subreq->len;
- list_del(&subreq->rreq_link);
- spin_unlock_bh(&rreq->lock);
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_consumed);
- return true;
-
-bad:
- /* Errr... prev and next both donated to us, but insufficient to finish
- * the folio.
+
+out:
+ _leave(" = %x", notes);
+ return;
+
+need_retry:
+ /* Okay... We're going to have to retry parts of the stream. Note
+ * that any partially completed op will have had any wholly transferred
+ * folios removed from it.
*/
- printk("R=%08x[%x] s=%llx-%llx %zx/%zx/%zx\n",
- rreq->debug_id, subreq->debug_index,
- subreq->start, subreq->start + subreq->transferred - 1,
- subreq->consumed, subreq->transferred, subreq->len);
- printk("folio: %llx-%llx\n", fpos, fend - 1);
- printk("donated: prev=%zx next=%zx\n", prev_donated, next_donated);
- printk("s=%llx av=%zx part=%zx\n", start, avail, part);
- BUG();
+ _debug("retry");
+ netfs_retry_reads(rreq);
+ goto out;
}
/*
@@ -321,12 +343,13 @@ bad:
static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
unsigned int i;
/* Collect unbuffered reads and direct reads, adding up the transfer
* sizes until we find the first short or failed subrequest.
*/
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+ list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
rreq->transferred += subreq->transferred;
if (subreq->transferred < subreq->len ||
@@ -359,25 +382,67 @@ static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
}
/*
- * Assess the state of a read request and decide what to do next.
+ * Do processing after reading a monolithic single object.
+ */
+static void netfs_rreq_assess_single(struct netfs_io_request *rreq)
+{
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+
+ if (!rreq->error && stream->source == NETFS_DOWNLOAD_FROM_SERVER &&
+ fscache_resources_valid(&rreq->cache_resources)) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_dirty);
+ netfs_single_mark_inode_dirty(rreq->inode);
+ }
+
+ if (rreq->iocb) {
+ rreq->iocb->ki_pos += rreq->transferred;
+ if (rreq->iocb->ki_complete)
+ rreq->iocb->ki_complete(
+ rreq->iocb, rreq->error ? rreq->error : rreq->transferred);
+ }
+ if (rreq->netfs_ops->done)
+ rreq->netfs_ops->done(rreq);
+}
+
+/*
+ * Perform the collection of subrequests and folios.
*
* Note that we're in normal kernel thread context at this point, possibly
* running on a workqueue.
*/
-static void netfs_rreq_assess(struct netfs_io_request *rreq)
+static void netfs_read_collection(struct netfs_io_request *rreq)
{
- trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
- //netfs_rreq_is_still_valid(rreq);
+ netfs_collect_read_results(rreq);
- if (test_and_clear_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags)) {
- netfs_retry_reads(rreq);
+ /* We're done when the app thread has finished posting subreqs and the
+ * queue is empty.
+ */
+ if (!test_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags))
+ return;
+ smp_rmb(); /* Read ALL_QUEUED before subreq lists. */
+
+ if (!list_empty(&stream->subrequests))
return;
- }
- if (rreq->origin == NETFS_DIO_READ ||
- rreq->origin == NETFS_READ_GAPS)
+ /* Okay, declare that all I/O is complete. */
+ rreq->transferred = stream->transferred;
+ trace_netfs_rreq(rreq, netfs_rreq_trace_complete);
+
+ //netfs_rreq_is_still_valid(rreq);
+
+ switch (rreq->origin) {
+ case NETFS_DIO_READ:
+ case NETFS_READ_GAPS:
netfs_rreq_assess_dio(rreq);
+ break;
+ case NETFS_READ_SINGLE:
+ netfs_rreq_assess_single(rreq);
+ break;
+ default:
+ break;
+ }
task_io_account_read(rreq->transferred);
trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
@@ -386,61 +451,66 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq)
trace_netfs_rreq(rreq, netfs_rreq_trace_done);
netfs_clear_subrequests(rreq, false);
netfs_unlock_abandoned_read_pages(rreq);
- if (unlikely(test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)))
- netfs_pgpriv2_write_to_the_cache(rreq);
+ if (unlikely(rreq->copy_to_cache))
+ netfs_pgpriv2_end_copy_to_cache(rreq);
}
-void netfs_read_termination_worker(struct work_struct *work)
+void netfs_read_collection_worker(struct work_struct *work)
{
- struct netfs_io_request *rreq =
- container_of(work, struct netfs_io_request, work);
+ struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work);
+
netfs_see_request(rreq, netfs_rreq_trace_see_work);
- netfs_rreq_assess(rreq);
- netfs_put_request(rreq, false, netfs_rreq_trace_put_work_complete);
+ if (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
+ netfs_read_collection(rreq);
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_work);
}
/*
- * Handle the completion of all outstanding I/O operations on a read request.
- * We inherit a ref from the caller.
+ * Wake the collection work item.
*/
-void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async)
+void netfs_wake_read_collector(struct netfs_io_request *rreq)
{
- if (!was_async)
- return netfs_rreq_assess(rreq);
- if (!work_pending(&rreq->work)) {
- netfs_get_request(rreq, netfs_rreq_trace_get_work);
- if (!queue_work(system_unbound_wq, &rreq->work))
- netfs_put_request(rreq, was_async, netfs_rreq_trace_put_work_nq);
+ if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) {
+ if (!work_pending(&rreq->work)) {
+ netfs_get_request(rreq, netfs_rreq_trace_get_work);
+ if (!queue_work(system_unbound_wq, &rreq->work))
+ netfs_put_request(rreq, true, netfs_rreq_trace_put_work_nq);
+ }
+ } else {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue);
+ wake_up(&rreq->waitq);
}
}
/**
* netfs_read_subreq_progress - Note progress of a read operation.
* @subreq: The read request that has terminated.
- * @was_async: True if we're in an asynchronous context.
*
* This tells the read side of netfs lib that a contributory I/O operation has
* made some progress and that it may be possible to unlock some folios.
*
* Before calling, the filesystem should update subreq->transferred to track
* the amount of data copied into the output buffer.
- *
- * If @was_async is true, the caller might be running in softirq or interrupt
- * context and we can't sleep.
*/
-void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq,
- bool was_async)
+void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ size_t fsize = PAGE_SIZE << rreq->front_folio_order;
trace_netfs_sreq(subreq, netfs_sreq_trace_progress);
- if (subreq->transferred > subreq->consumed &&
+ /* If we are at the head of the queue, wake up the collector,
+ * getting a ref to it if we were the ones to do so.
+ */
+ if (subreq->start + subreq->transferred > rreq->cleaned_to + fsize &&
(rreq->origin == NETFS_READAHEAD ||
rreq->origin == NETFS_READPAGE ||
- rreq->origin == NETFS_READ_FOR_WRITE)) {
- netfs_consume_read_data(subreq, was_async);
+ rreq->origin == NETFS_READ_FOR_WRITE) &&
+ list_is_first(&subreq->rreq_link, &stream->subrequests)
+ ) {
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ netfs_wake_read_collector(rreq);
}
}
EXPORT_SYMBOL(netfs_read_subreq_progress);
@@ -448,27 +518,23 @@ EXPORT_SYMBOL(netfs_read_subreq_progress);
/**
* netfs_read_subreq_terminated - Note the termination of an I/O operation.
* @subreq: The I/O request that has terminated.
- * @error: Error code indicating type of completion.
- * @was_async: The termination was asynchronous
*
* This tells the read helper that a contributory I/O operation has terminated,
* one way or another, and that it should integrate the results.
*
- * The caller indicates the outcome of the operation through @error, supplying
- * 0 to indicate a successful or retryable transfer (if NETFS_SREQ_NEED_RETRY
- * is set) or a negative error code. The helper will look after reissuing I/O
- * operations as appropriate and writing downloaded data to the cache.
+ * The caller indicates the outcome of the operation through @subreq->error,
+ * supplying 0 to indicate a successful or retryable transfer (if
+ * NETFS_SREQ_NEED_RETRY is set) or a negative error code. The helper will
+ * look after reissuing I/O operations as appropriate and writing downloaded
+ * data to the cache.
*
* Before calling, the filesystem should update subreq->transferred to track
* the amount of data copied into the output buffer.
- *
- * If @was_async is true, the caller might be running in softirq or interrupt
- * context and we can't sleep.
*/
-void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
- int error, bool was_async)
+void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
switch (subreq->source) {
case NETFS_READ_FROM_CACHE:
@@ -481,71 +547,156 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
break;
}
- if (rreq->origin != NETFS_DIO_READ) {
- /* Collect buffered reads.
- *
- * If the read completed validly short, then we can clear the
- * tail before going on to unlock the folios.
- */
- if (error == 0 && subreq->transferred < subreq->len &&
- (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags) ||
- test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags))) {
- netfs_clear_unread(subreq);
- subreq->transferred = subreq->len;
- trace_netfs_sreq(subreq, netfs_sreq_trace_clear);
- }
- if (subreq->transferred > subreq->consumed &&
- (rreq->origin == NETFS_READAHEAD ||
- rreq->origin == NETFS_READPAGE ||
- rreq->origin == NETFS_READ_FOR_WRITE)) {
- netfs_consume_read_data(subreq, was_async);
- __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
- }
- rreq->transferred += subreq->transferred;
- }
-
/* Deal with retry requests, short reads and errors. If we retry
* but don't make progress, we abandon the attempt.
*/
- if (!error && subreq->transferred < subreq->len) {
+ if (!subreq->error && subreq->transferred < subreq->len) {
if (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags)) {
trace_netfs_sreq(subreq, netfs_sreq_trace_hit_eof);
+ } else if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_need_clear);
+ } else if (test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_need_retry);
+ } else if (test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) {
+ __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_partial_read);
} else {
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ subreq->error = -ENODATA;
trace_netfs_sreq(subreq, netfs_sreq_trace_short);
- if (subreq->transferred > subreq->consumed) {
- /* If we didn't read new data, abandon retry. */
- if (subreq->retry_count &&
- test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) {
- __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
- }
- } else if (test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) {
- __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags);
- } else {
- __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
- error = -ENODATA;
- }
}
}
- subreq->error = error;
- trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
-
- if (unlikely(error < 0)) {
- trace_netfs_failure(rreq, subreq, error, netfs_fail_read);
+ if (unlikely(subreq->error < 0)) {
+ trace_netfs_failure(rreq, subreq, subreq->error, netfs_fail_read);
if (subreq->source == NETFS_READ_FROM_CACHE) {
netfs_stat(&netfs_n_rh_read_failed);
+ __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
} else {
netfs_stat(&netfs_n_rh_download_failed);
- set_bit(NETFS_RREQ_FAILED, &rreq->flags);
- rreq->error = subreq->error;
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
}
+ trace_netfs_rreq(rreq, netfs_rreq_trace_set_pause);
+ set_bit(NETFS_RREQ_PAUSE, &rreq->flags);
}
- if (atomic_dec_and_test(&rreq->nr_outstanding))
- netfs_rreq_terminated(rreq, was_async);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
+
+ clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+ smp_mb__after_atomic(); /* Clear IN_PROGRESS before task state */
+
+ /* If we are at the head of the queue, wake up the collector. */
+ if (list_is_first(&subreq->rreq_link, &stream->subrequests))
+ netfs_wake_read_collector(rreq);
- netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+ netfs_put_subrequest(subreq, true, netfs_sreq_trace_put_terminated);
}
EXPORT_SYMBOL(netfs_read_subreq_terminated);
+
+/*
+ * Handle termination of a read from the cache.
+ */
+void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async)
+{
+ struct netfs_io_subrequest *subreq = priv;
+
+ if (transferred_or_error > 0) {
+ subreq->error = 0;
+ if (transferred_or_error > 0) {
+ subreq->transferred += transferred_or_error;
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
+ }
+ } else {
+ subreq->error = transferred_or_error;
+ }
+ netfs_read_subreq_terminated(subreq);
+}
+
+/*
+ * Wait for the read operation to complete, successfully or otherwise.
+ */
+ssize_t netfs_wait_for_read(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *subreq;
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ DEFINE_WAIT(myself);
+ ssize_t ret;
+
+ for (;;) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
+ prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
+
+ subreq = list_first_entry_or_null(&stream->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+ if (subreq &&
+ (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) ||
+ test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
+ __set_current_state(TASK_RUNNING);
+ netfs_read_collection(rreq);
+ continue;
+ }
+
+ if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
+ break;
+
+ schedule();
+ trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue);
+ }
+
+ finish_wait(&rreq->waitq, &myself);
+
+ ret = rreq->error;
+ if (ret == 0) {
+ ret = rreq->transferred;
+ switch (rreq->origin) {
+ case NETFS_DIO_READ:
+ case NETFS_READ_SINGLE:
+ ret = rreq->transferred;
+ break;
+ default:
+ if (rreq->submitted < rreq->len) {
+ trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
+ ret = -EIO;
+ }
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Wait for a paused read operation to unpause or complete in some manner.
+ */
+void netfs_wait_for_pause(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *subreq;
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ DEFINE_WAIT(myself);
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wait_pause);
+
+ for (;;) {
+ trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
+ prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
+
+ subreq = list_first_entry_or_null(&stream->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+ if (subreq &&
+ (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) ||
+ test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
+ __set_current_state(TASK_RUNNING);
+ netfs_read_collection(rreq);
+ continue;
+ }
+
+ if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags) ||
+ !test_bit(NETFS_RREQ_PAUSE, &rreq->flags))
+ break;
+
+ schedule();
+ trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue);
+ }
+
+ finish_wait(&rreq->waitq, &myself);
+}
diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c
index 54d5004fec18..cf7727060215 100644
--- a/fs/netfs/read_pgpriv2.c
+++ b/fs/netfs/read_pgpriv2.c
@@ -14,52 +14,11 @@
#include "internal.h"
/*
- * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2. The
- * third mark in the folio queue is used to indicate that this folio needs
- * writing.
- */
-void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
- struct netfs_io_request *rreq,
- struct folio_queue *folioq,
- int slot)
-{
- struct folio *folio = folioq_folio(folioq, slot);
-
- trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
- folio_start_private_2(folio);
- folioq_mark3(folioq, slot);
-}
-
-/*
- * [DEPRECATED] Cancel PG_private_2 on all marked folios in the event of an
- * unrecoverable error.
- */
-static void netfs_pgpriv2_cancel(struct folio_queue *folioq)
-{
- struct folio *folio;
- int slot;
-
- while (folioq) {
- if (!folioq->marks3) {
- folioq = folioq->next;
- continue;
- }
-
- slot = __ffs(folioq->marks3);
- folio = folioq_folio(folioq, slot);
-
- trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
- folio_end_private_2(folio);
- folioq_unmark3(folioq, slot);
- }
-}
-
-/*
* [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
*/
-static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio *folio)
+static void netfs_pgpriv2_copy_folio(struct netfs_io_request *creq, struct folio *folio)
{
- struct netfs_io_stream *cache = &wreq->io_streams[1];
+ struct netfs_io_stream *cache = &creq->io_streams[1];
size_t fsize = folio_size(folio), flen = fsize;
loff_t fpos = folio_pos(folio), i_size;
bool to_eof = false;
@@ -70,17 +29,17 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio
* of the page to beyond it, but cannot move i_size into or through the
* page since we have it locked.
*/
- i_size = i_size_read(wreq->inode);
+ i_size = i_size_read(creq->inode);
if (fpos >= i_size) {
/* mmap beyond eof. */
_debug("beyond eof");
folio_end_private_2(folio);
- return 0;
+ return;
}
- if (fpos + fsize > wreq->i_size)
- wreq->i_size = i_size;
+ if (fpos + fsize > creq->i_size)
+ creq->i_size = i_size;
if (flen > i_size - fpos) {
flen = i_size - fpos;
@@ -94,8 +53,10 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio
trace_netfs_folio(folio, netfs_folio_trace_store_copy);
/* Attach the folio to the rolling buffer. */
- if (netfs_buffer_append_folio(wreq, folio, false) < 0)
- return -ENOMEM;
+ if (rolling_buffer_append(&creq->buffer, folio, 0) < 0) {
+ clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &creq->flags);
+ return;
+ }
cache->submit_extendable_to = fsize;
cache->submit_off = 0;
@@ -109,11 +70,11 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio
do {
ssize_t part;
- wreq->io_iter.iov_offset = cache->submit_off;
+ creq->buffer.iter.iov_offset = cache->submit_off;
- atomic64_set(&wreq->issued_to, fpos + cache->submit_off);
+ atomic64_set(&creq->issued_to, fpos + cache->submit_off);
cache->submit_extendable_to = fsize - cache->submit_off;
- part = netfs_advance_write(wreq, cache, fpos + cache->submit_off,
+ part = netfs_advance_write(creq, cache, fpos + cache->submit_off,
cache->submit_len, to_eof);
cache->submit_off += part;
if (part > cache->submit_len)
@@ -122,98 +83,95 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio
cache->submit_len -= part;
} while (cache->submit_len > 0);
- wreq->io_iter.iov_offset = 0;
- iov_iter_advance(&wreq->io_iter, fsize);
- atomic64_set(&wreq->issued_to, fpos + fsize);
+ creq->buffer.iter.iov_offset = 0;
+ rolling_buffer_advance(&creq->buffer, fsize);
+ atomic64_set(&creq->issued_to, fpos + fsize);
if (flen < fsize)
- netfs_issue_write(wreq, cache);
-
- _leave(" = 0");
- return 0;
+ netfs_issue_write(creq, cache);
}
/*
- * [DEPRECATED] Go through the buffer and write any folios that are marked with
- * the third mark to the cache.
+ * [DEPRECATED] Set up copying to the cache.
*/
-void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
+static struct netfs_io_request *netfs_pgpriv2_begin_copy_to_cache(
+ struct netfs_io_request *rreq, struct folio *folio)
{
- struct netfs_io_request *wreq;
- struct folio_queue *folioq;
- struct folio *folio;
- int error = 0;
- int slot = 0;
-
- _enter("");
+ struct netfs_io_request *creq;
if (!fscache_resources_valid(&rreq->cache_resources))
- goto couldnt_start;
+ goto cancel;
- /* Need the first folio to be able to set up the op. */
- for (folioq = rreq->buffer; folioq; folioq = folioq->next) {
- if (folioq->marks3) {
- slot = __ffs(folioq->marks3);
- break;
- }
- }
- if (!folioq)
- return;
- folio = folioq_folio(folioq, slot);
-
- wreq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
+ creq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
NETFS_PGPRIV2_COPY_TO_CACHE);
- if (IS_ERR(wreq)) {
- kleave(" [create %ld]", PTR_ERR(wreq));
- goto couldnt_start;
- }
+ if (IS_ERR(creq))
+ goto cancel;
- trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
+ if (!creq->io_streams[1].avail)
+ goto cancel_put;
+
+ trace_netfs_write(creq, netfs_write_trace_copy_to_cache);
netfs_stat(&netfs_n_wh_copy_to_cache);
- if (!wreq->io_streams[1].avail) {
- netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
- goto couldnt_start;
- }
+ rreq->copy_to_cache = creq;
+ return creq;
+
+cancel_put:
+ netfs_put_request(creq, false, netfs_rreq_trace_put_return);
+cancel:
+ rreq->copy_to_cache = ERR_PTR(-ENOBUFS);
+ clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
+ return ERR_PTR(-ENOBUFS);
+}
- for (;;) {
- error = netfs_pgpriv2_copy_folio(wreq, folio);
- if (error < 0)
- break;
+/*
+ * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2 and add
+ * it to the copy write request.
+ */
+void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio)
+{
+ struct netfs_io_request *creq = rreq->copy_to_cache;
- folioq_unmark3(folioq, slot);
- if (!folioq->marks3) {
- folioq = folioq->next;
- if (!folioq)
- break;
- }
+ if (!creq)
+ creq = netfs_pgpriv2_begin_copy_to_cache(rreq, folio);
+ if (IS_ERR(creq))
+ return;
- slot = __ffs(folioq->marks3);
- folio = folioq_folio(folioq, slot);
- }
+ trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
+ folio_start_private_2(folio);
+ netfs_pgpriv2_copy_folio(creq, folio);
+}
- netfs_issue_write(wreq, &wreq->io_streams[1]);
+/*
+ * [DEPRECATED] End writing to the cache, flushing out any outstanding writes.
+ */
+void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq)
+{
+ struct netfs_io_request *creq = rreq->copy_to_cache;
+
+ if (IS_ERR_OR_NULL(creq))
+ return;
+
+ netfs_issue_write(creq, &creq->io_streams[1]);
smp_wmb(); /* Write lists before ALL_QUEUED. */
- set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+ set_bit(NETFS_RREQ_ALL_QUEUED, &creq->flags);
- netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
- _leave(" = %d", error);
-couldnt_start:
- netfs_pgpriv2_cancel(rreq->buffer);
+ netfs_put_request(creq, false, netfs_rreq_trace_put_return);
+ creq->copy_to_cache = NULL;
}
/*
* [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
* copying.
*/
-bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
+bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *creq)
{
- struct folio_queue *folioq = wreq->buffer;
- unsigned long long collected_to = wreq->collected_to;
- unsigned int slot = wreq->buffer_head_slot;
+ struct folio_queue *folioq = creq->buffer.tail;
+ unsigned long long collected_to = creq->collected_to;
+ unsigned int slot = creq->buffer.first_tail_slot;
bool made_progress = false;
if (slot >= folioq_nr_slots(folioq)) {
- folioq = netfs_delete_buffer_head(wreq);
+ folioq = rolling_buffer_delete_spent(&creq->buffer);
slot = 0;
}
@@ -225,16 +183,16 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
folio = folioq_folio(folioq, slot);
if (WARN_ONCE(!folio_test_private_2(folio),
"R=%08x: folio %lx is not marked private_2\n",
- wreq->debug_id, folio->index))
+ creq->debug_id, folio->index))
trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
fpos = folio_pos(folio);
fsize = folio_size(folio);
flen = fsize;
- fend = min_t(unsigned long long, fpos + flen, wreq->i_size);
+ fend = min_t(unsigned long long, fpos + flen, creq->i_size);
- trace_netfs_collect_folio(wreq, folio, fend, collected_to);
+ trace_netfs_collect_folio(creq, folio, fend, collected_to);
/* Unlock any folio we've transferred all of. */
if (collected_to < fend)
@@ -242,7 +200,7 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
trace_netfs_folio(folio, netfs_folio_trace_end_copy);
folio_end_private_2(folio);
- wreq->cleaned_to = fpos + fsize;
+ creq->cleaned_to = fpos + fsize;
made_progress = true;
/* Clean up the head folioq. If we clear an entire folioq, then
@@ -252,9 +210,9 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
folioq_clear(folioq, slot);
slot++;
if (slot >= folioq_nr_slots(folioq)) {
- if (READ_ONCE(wreq->buffer_tail) == folioq)
- break;
- folioq = netfs_delete_buffer_head(wreq);
+ folioq = rolling_buffer_delete_spent(&creq->buffer);
+ if (!folioq)
+ goto done;
slot = 0;
}
@@ -262,7 +220,8 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
break;
}
- wreq->buffer = folioq;
- wreq->buffer_head_slot = slot;
+ creq->buffer.tail = folioq;
+done:
+ creq->buffer.first_tail_slot = slot;
return made_progress;
}
diff --git a/fs/netfs/read_retry.c b/fs/netfs/read_retry.c
index 16b676c68dcd..2290af0d51ac 100644
--- a/fs/netfs/read_retry.c
+++ b/fs/netfs/read_retry.c
@@ -12,15 +12,7 @@
static void netfs_reissue_read(struct netfs_io_request *rreq,
struct netfs_io_subrequest *subreq)
{
- struct iov_iter *io_iter = &subreq->io_iter;
-
- if (iov_iter_is_folioq(io_iter)) {
- subreq->curr_folioq = (struct folio_queue *)io_iter->folioq;
- subreq->curr_folioq_slot = io_iter->folioq_slot;
- subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot];
- }
-
- atomic_inc(&rreq->nr_outstanding);
+ __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
subreq->rreq->netfs_ops->issue_read(subreq);
@@ -33,13 +25,12 @@ static void netfs_reissue_read(struct netfs_io_request *rreq,
static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
- struct netfs_io_stream *stream0 = &rreq->io_streams[0];
- LIST_HEAD(sublist);
- LIST_HEAD(queue);
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ struct list_head *next;
_enter("R=%x", rreq->debug_id);
- if (list_empty(&rreq->subrequests))
+ if (list_empty(&stream->subrequests))
return;
if (rreq->netfs_ops->retry_request)
@@ -50,9 +41,7 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
*/
if (!rreq->netfs_ops->prepare_read &&
!rreq->cache_resources.ops) {
- struct netfs_io_subrequest *subreq;
-
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+ list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
break;
if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
@@ -75,48 +64,44 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
* populating with smaller subrequests. In the event that the subreq
* we just launched finishes before we insert the next subreq, it'll
* fill in rreq->prev_donated instead.
-
+ *
* Note: Alternatively, we could split the tail subrequest right before
* we reissue it and fix up the donations under lock.
*/
- list_splice_init(&rreq->subrequests, &queue);
+ next = stream->subrequests.next;
do {
- struct netfs_io_subrequest *from;
+ struct netfs_io_subrequest *from, *to, *tmp;
struct iov_iter source;
unsigned long long start, len;
- size_t part, deferred_next_donated = 0;
+ size_t part;
bool boundary = false;
/* Go through the subreqs and find the next span of contiguous
* buffer that we then rejig (cifs, for example, needs the
* rsize renegotiating) and reissue.
*/
- from = list_first_entry(&queue, struct netfs_io_subrequest, rreq_link);
- list_move_tail(&from->rreq_link, &sublist);
+ from = list_entry(next, struct netfs_io_subrequest, rreq_link);
+ to = from;
start = from->start + from->transferred;
len = from->len - from->transferred;
- _debug("from R=%08x[%x] s=%llx ctl=%zx/%zx/%zx",
+ _debug("from R=%08x[%x] s=%llx ctl=%zx/%zx",
rreq->debug_id, from->debug_index,
- from->start, from->consumed, from->transferred, from->len);
+ from->start, from->transferred, from->len);
if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
!test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
goto abandon;
- deferred_next_donated = from->next_donated;
- while ((subreq = list_first_entry_or_null(
- &queue, struct netfs_io_subrequest, rreq_link))) {
- if (subreq->start != start + len ||
- subreq->transferred > 0 ||
+ list_for_each_continue(next, &stream->subrequests) {
+ subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
+ if (subreq->start + subreq->transferred != start + len ||
+ test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
!test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
break;
- list_move_tail(&subreq->rreq_link, &sublist);
- len += subreq->len;
- deferred_next_donated = subreq->next_donated;
- if (test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags))
- break;
+ to = subreq;
+ len += to->len;
}
_debug(" - range: %llx-%llx %llx", start, start + len - 1, len);
@@ -129,38 +114,31 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
source.count = len;
/* Work through the sublist. */
- while ((subreq = list_first_entry_or_null(
- &sublist, struct netfs_io_subrequest, rreq_link))) {
- list_del(&subreq->rreq_link);
-
+ subreq = from;
+ list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
+ if (!len)
+ break;
subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
subreq->start = start - subreq->transferred;
subreq->len = len + subreq->transferred;
- stream0->sreq_max_len = subreq->len;
-
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
subreq->retry_count++;
- spin_lock_bh(&rreq->lock);
- list_add_tail(&subreq->rreq_link, &rreq->subrequests);
- subreq->prev_donated += rreq->prev_donated;
- rreq->prev_donated = 0;
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
- spin_unlock_bh(&rreq->lock);
-
- BUG_ON(!len);
/* Renegotiate max_len (rsize) */
+ stream->sreq_max_len = subreq->len;
if (rreq->netfs_ops->prepare_read &&
rreq->netfs_ops->prepare_read(subreq) < 0) {
trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ goto abandon;
}
- part = umin(len, stream0->sreq_max_len);
- if (unlikely(rreq->io_streams[0].sreq_max_segs))
- part = netfs_limit_iter(&source, 0, part, stream0->sreq_max_segs);
+ part = umin(len, stream->sreq_max_len);
+ if (unlikely(stream->sreq_max_segs))
+ part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
subreq->len = subreq->transferred + part;
subreq->io_iter = source;
iov_iter_truncate(&subreq->io_iter, part);
@@ -170,57 +148,105 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
if (!len) {
if (boundary)
__set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
- subreq->next_donated = deferred_next_donated;
} else {
__clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
- subreq->next_donated = 0;
}
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
netfs_reissue_read(rreq, subreq);
- if (!len)
+ if (subreq == to)
break;
-
- /* If we ran out of subrequests, allocate another. */
- if (list_empty(&sublist)) {
- subreq = netfs_alloc_subrequest(rreq);
- if (!subreq)
- goto abandon;
- subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
- subreq->start = start;
-
- /* We get two refs, but need just one. */
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_new);
- trace_netfs_sreq(subreq, netfs_sreq_trace_split);
- list_add_tail(&subreq->rreq_link, &sublist);
- }
}
/* If we managed to use fewer subreqs, we can discard the
- * excess.
+ * excess; if we used the same number, then we're done.
*/
- while ((subreq = list_first_entry_or_null(
- &sublist, struct netfs_io_subrequest, rreq_link))) {
- trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
- list_del(&subreq->rreq_link);
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
+ if (!len) {
+ if (subreq == to)
+ continue;
+ list_for_each_entry_safe_from(subreq, tmp,
+ &stream->subrequests, rreq_link) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
+ list_del(&subreq->rreq_link);
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
+ if (subreq == to)
+ break;
+ }
+ continue;
}
- } while (!list_empty(&queue));
+ /* We ran out of subrequests, so we need to allocate some more
+ * and insert them after.
+ */
+ do {
+ subreq = netfs_alloc_subrequest(rreq);
+ if (!subreq) {
+ subreq = to;
+ goto abandon_after;
+ }
+ subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+ subreq->start = start;
+ subreq->len = len;
+ subreq->debug_index = atomic_inc_return(&rreq->subreq_counter);
+ subreq->stream_nr = stream->stream_nr;
+ subreq->retry_count = 1;
+
+ trace_netfs_sreq_ref(rreq->debug_id, subreq->debug_index,
+ refcount_read(&subreq->ref),
+ netfs_sreq_trace_new);
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+
+ list_add(&subreq->rreq_link, &to->rreq_link);
+ to = list_next_entry(to, rreq_link);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+
+ stream->sreq_max_len = umin(len, rreq->rsize);
+ stream->sreq_max_segs = 0;
+ if (unlikely(stream->sreq_max_segs))
+ part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
+
+ netfs_stat(&netfs_n_rh_download);
+ if (rreq->netfs_ops->prepare_read(subreq) < 0) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed);
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ goto abandon;
+ }
+
+ part = umin(len, stream->sreq_max_len);
+ subreq->len = subreq->transferred + part;
+ subreq->io_iter = source;
+ iov_iter_truncate(&subreq->io_iter, part);
+ iov_iter_advance(&source, part);
+
+ len -= part;
+ start += part;
+ if (!len && boundary) {
+ __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
+ boundary = false;
+ }
+
+ netfs_reissue_read(rreq, subreq);
+ } while (len);
+
+ } while (!list_is_head(next, &stream->subrequests));
return;
- /* If we hit ENOMEM, fail all remaining subrequests */
+ /* If we hit an error, fail all remaining incomplete subrequests */
+abandon_after:
+ if (list_is_last(&subreq->rreq_link, &stream->subrequests))
+ return;
+ subreq = list_next_entry(subreq, rreq_link);
abandon:
- list_splice_init(&sublist, &queue);
- list_for_each_entry(subreq, &queue, rreq_link) {
- if (!subreq->error)
- subreq->error = -ENOMEM;
- __clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
+ if (!subreq->error &&
+ !test_bit(NETFS_SREQ_FAILED, &subreq->flags) &&
+ !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
+ continue;
+ subreq->error = -ENOMEM;
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
}
- spin_lock_bh(&rreq->lock);
- list_splice_tail_init(&queue, &rreq->subrequests);
- spin_unlock_bh(&rreq->lock);
}
/*
@@ -228,14 +254,19 @@ abandon:
*/
void netfs_retry_reads(struct netfs_io_request *rreq)
{
- trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
+ struct netfs_io_subrequest *subreq;
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
- atomic_inc(&rreq->nr_outstanding);
+ /* Wait for all outstanding I/O to quiesce before performing retries as
+ * we may need to renegotiate the I/O sizes.
+ */
+ list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
+ wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
+ TASK_UNINTERRUPTIBLE);
+ }
+ trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
netfs_retry_read_subrequests(rreq);
-
- if (atomic_dec_and_test(&rreq->nr_outstanding))
- netfs_rreq_terminated(rreq, false);
}
/*
@@ -246,7 +277,7 @@ void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq)
{
struct folio_queue *p;
- for (p = rreq->buffer; p; p = p->next) {
+ for (p = rreq->buffer.tail; p; p = p->next) {
for (int slot = 0; slot < folioq_count(p); slot++) {
struct folio *folio = folioq_folio(p, slot);
diff --git a/fs/netfs/read_single.c b/fs/netfs/read_single.c
new file mode 100644
index 000000000000..fea0ecdecc53
--- /dev/null
+++ b/fs/netfs/read_single.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Single, monolithic object support (e.g. AFS directory).
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/sched/mm.h>
+#include <linux/task_io_accounting_ops.h>
+#include <linux/netfs.h>
+#include "internal.h"
+
+/**
+ * netfs_single_mark_inode_dirty - Mark a single, monolithic object inode dirty
+ * @inode: The inode to mark
+ *
+ * Mark an inode that contains a single, monolithic object as dirty so that its
+ * writepages op will get called. If set, the SINGLE_NO_UPLOAD flag indicates
+ * that the object will only be written to the cache and not uploaded (e.g. AFS
+ * directory contents).
+ */
+void netfs_single_mark_inode_dirty(struct inode *inode)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+ bool cache_only = test_bit(NETFS_ICTX_SINGLE_NO_UPLOAD, &ictx->flags);
+ bool caching = fscache_cookie_enabled(netfs_i_cookie(netfs_inode(inode)));
+
+ if (cache_only && !caching)
+ return;
+
+ mark_inode_dirty(inode);
+
+ if (caching && !(inode->i_state & I_PINNING_NETFS_WB)) {
+ bool need_use = false;
+
+ spin_lock(&inode->i_lock);
+ if (!(inode->i_state & I_PINNING_NETFS_WB)) {
+ inode->i_state |= I_PINNING_NETFS_WB;
+ need_use = true;
+ }
+ spin_unlock(&inode->i_lock);
+
+ if (need_use)
+ fscache_use_cookie(netfs_i_cookie(ictx), true);
+ }
+
+}
+EXPORT_SYMBOL(netfs_single_mark_inode_dirty);
+
+static int netfs_single_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx)
+{
+ return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx));
+}
+
+static void netfs_single_cache_prepare_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+ if (!cres->ops) {
+ subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+ return;
+ }
+ subreq->source = cres->ops->prepare_read(subreq, rreq->i_size);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+
+}
+
+static void netfs_single_read_cache(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+ _enter("R=%08x[%x]", rreq->debug_id, subreq->debug_index);
+ netfs_stat(&netfs_n_rh_read);
+ cres->ops->read(cres, subreq->start, &subreq->io_iter, NETFS_READ_HOLE_FAIL,
+ netfs_cache_read_terminated, subreq);
+}
+
+/*
+ * Perform a read to a buffer from the cache or the server. Only a single
+ * subreq is permitted as the object must be fetched in a single transaction.
+ */
+static int netfs_single_dispatch_read(struct netfs_io_request *rreq)
+{
+ struct netfs_io_stream *stream = &rreq->io_streams[0];
+ struct netfs_io_subrequest *subreq;
+ int ret = 0;
+
+ subreq = netfs_alloc_subrequest(rreq);
+ if (!subreq)
+ return -ENOMEM;
+
+ subreq->source = NETFS_SOURCE_UNKNOWN;
+ subreq->start = 0;
+ subreq->len = rreq->len;
+ subreq->io_iter = rreq->buffer.iter;
+
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+
+ spin_lock(&rreq->lock);
+ list_add_tail(&subreq->rreq_link, &stream->subrequests);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_added);
+ stream->front = subreq;
+ /* Store list pointers before active flag */
+ smp_store_release(&stream->active, true);
+ spin_unlock(&rreq->lock);
+
+ netfs_single_cache_prepare_read(rreq, subreq);
+ switch (subreq->source) {
+ case NETFS_DOWNLOAD_FROM_SERVER:
+ netfs_stat(&netfs_n_rh_download);
+ if (rreq->netfs_ops->prepare_read) {
+ ret = rreq->netfs_ops->prepare_read(subreq);
+ if (ret < 0)
+ goto cancel;
+ }
+
+ rreq->netfs_ops->issue_read(subreq);
+ rreq->submitted += subreq->len;
+ break;
+ case NETFS_READ_FROM_CACHE:
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ netfs_single_read_cache(rreq, subreq);
+ rreq->submitted += subreq->len;
+ ret = 0;
+ break;
+ default:
+ pr_warn("Unexpected single-read source %u\n", subreq->source);
+ WARN_ON_ONCE(true);
+ ret = -EIO;
+ break;
+ }
+
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags);
+ return ret;
+cancel:
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel);
+ return ret;
+}
+
+/**
+ * netfs_read_single - Synchronously read a single blob of pages.
+ * @inode: The inode to read from.
+ * @file: The file we're using to read or NULL.
+ * @iter: The buffer we're reading into.
+ *
+ * Fulfil a read request for a single monolithic object by drawing data from
+ * the cache if possible, or the netfs if not. The buffer may be larger than
+ * the file content; unused beyond the EOF will be zero-filled. The content
+ * will be read with a single I/O request (though this may be retried).
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled. If caching is enabled,
+ * the data will be stored as a single object into the cache.
+ */
+ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter)
+{
+ struct netfs_io_request *rreq;
+ struct netfs_inode *ictx = netfs_inode(inode);
+ ssize_t ret;
+
+ rreq = netfs_alloc_request(inode->i_mapping, file, 0, iov_iter_count(iter),
+ NETFS_READ_SINGLE);
+ if (IS_ERR(rreq))
+ return PTR_ERR(rreq);
+
+ ret = netfs_single_begin_cache_read(rreq, ictx);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto cleanup_free;
+
+ netfs_stat(&netfs_n_rh_read_single);
+ trace_netfs_read(rreq, 0, rreq->len, netfs_read_trace_read_single);
+
+ rreq->buffer.iter = *iter;
+ netfs_single_dispatch_read(rreq);
+
+ ret = netfs_wait_for_read(rreq);
+ netfs_put_request(rreq, true, netfs_rreq_trace_put_return);
+ return ret;
+
+cleanup_free:
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_read_single);
diff --git a/fs/netfs/rolling_buffer.c b/fs/netfs/rolling_buffer.c
new file mode 100644
index 000000000000..75d97af14b4a
--- /dev/null
+++ b/fs/netfs/rolling_buffer.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Rolling buffer helpers
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/bitops.h>
+#include <linux/pagemap.h>
+#include <linux/rolling_buffer.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+static atomic_t debug_ids;
+
+/**
+ * netfs_folioq_alloc - Allocate a folio_queue struct
+ * @rreq_id: Associated debugging ID for tracing purposes
+ * @gfp: Allocation constraints
+ * @trace: Trace tag to indicate the purpose of the allocation
+ *
+ * Allocate, initialise and account the folio_queue struct and log a trace line
+ * to mark the allocation.
+ */
+struct folio_queue *netfs_folioq_alloc(unsigned int rreq_id, gfp_t gfp,
+ unsigned int /*enum netfs_folioq_trace*/ trace)
+{
+ struct folio_queue *fq;
+
+ fq = kmalloc(sizeof(*fq), gfp);
+ if (fq) {
+ netfs_stat(&netfs_n_folioq);
+ folioq_init(fq, rreq_id);
+ fq->debug_id = atomic_inc_return(&debug_ids);
+ trace_netfs_folioq(fq, trace);
+ }
+ return fq;
+}
+EXPORT_SYMBOL(netfs_folioq_alloc);
+
+/**
+ * netfs_folioq_free - Free a folio_queue struct
+ * @folioq: The object to free
+ * @trace: Trace tag to indicate which free
+ *
+ * Free and unaccount the folio_queue struct.
+ */
+void netfs_folioq_free(struct folio_queue *folioq,
+ unsigned int /*enum netfs_trace_folioq*/ trace)
+{
+ trace_netfs_folioq(folioq, trace);
+ netfs_stat_d(&netfs_n_folioq);
+ kfree(folioq);
+}
+EXPORT_SYMBOL(netfs_folioq_free);
+
+/*
+ * Initialise a rolling buffer. We allocate an empty folio queue struct to so
+ * that the pointers can be independently driven by the producer and the
+ * consumer.
+ */
+int rolling_buffer_init(struct rolling_buffer *roll, unsigned int rreq_id,
+ unsigned int direction)
+{
+ struct folio_queue *fq;
+
+ fq = netfs_folioq_alloc(rreq_id, GFP_NOFS, netfs_trace_folioq_rollbuf_init);
+ if (!fq)
+ return -ENOMEM;
+
+ roll->head = fq;
+ roll->tail = fq;
+ iov_iter_folio_queue(&roll->iter, direction, fq, 0, 0, 0);
+ return 0;
+}
+
+/*
+ * Add another folio_queue to a rolling buffer if there's no space left.
+ */
+int rolling_buffer_make_space(struct rolling_buffer *roll)
+{
+ struct folio_queue *fq, *head = roll->head;
+
+ if (!folioq_full(head))
+ return 0;
+
+ fq = netfs_folioq_alloc(head->rreq_id, GFP_NOFS, netfs_trace_folioq_make_space);
+ if (!fq)
+ return -ENOMEM;
+ fq->prev = head;
+
+ roll->head = fq;
+ if (folioq_full(head)) {
+ /* Make sure we don't leave the master iterator pointing to a
+ * block that might get immediately consumed.
+ */
+ if (roll->iter.folioq == head &&
+ roll->iter.folioq_slot == folioq_nr_slots(head)) {
+ roll->iter.folioq = fq;
+ roll->iter.folioq_slot = 0;
+ }
+ }
+
+ /* Make sure the initialisation is stored before the next pointer.
+ *
+ * [!] NOTE: After we set head->next, the consumer is at liberty to
+ * immediately delete the old head.
+ */
+ smp_store_release(&head->next, fq);
+ return 0;
+}
+
+/*
+ * Decant the list of folios to read into a rolling buffer.
+ */
+ssize_t rolling_buffer_load_from_ra(struct rolling_buffer *roll,
+ struct readahead_control *ractl,
+ struct folio_batch *put_batch)
+{
+ struct folio_queue *fq;
+ struct page **vec;
+ int nr, ix, to;
+ ssize_t size = 0;
+
+ if (rolling_buffer_make_space(roll) < 0)
+ return -ENOMEM;
+
+ fq = roll->head;
+ vec = (struct page **)fq->vec.folios;
+ nr = __readahead_batch(ractl, vec + folio_batch_count(&fq->vec),
+ folio_batch_space(&fq->vec));
+ ix = fq->vec.nr;
+ to = ix + nr;
+ fq->vec.nr = to;
+ for (; ix < to; ix++) {
+ struct folio *folio = folioq_folio(fq, ix);
+ unsigned int order = folio_order(folio);
+
+ fq->orders[ix] = order;
+ size += PAGE_SIZE << order;
+ trace_netfs_folio(folio, netfs_folio_trace_read);
+ if (!folio_batch_add(put_batch, folio))
+ folio_batch_release(put_batch);
+ }
+ WRITE_ONCE(roll->iter.count, roll->iter.count + size);
+
+ /* Store the counter after setting the slot. */
+ smp_store_release(&roll->next_head_slot, to);
+
+ for (; ix < folioq_nr_slots(fq); ix++)
+ folioq_clear(fq, ix);
+
+ return size;
+}
+
+/*
+ * Append a folio to the rolling buffer.
+ */
+ssize_t rolling_buffer_append(struct rolling_buffer *roll, struct folio *folio,
+ unsigned int flags)
+{
+ ssize_t size = folio_size(folio);
+ int slot;
+
+ if (rolling_buffer_make_space(roll) < 0)
+ return -ENOMEM;
+
+ slot = folioq_append(roll->head, folio);
+ if (flags & ROLLBUF_MARK_1)
+ folioq_mark(roll->head, slot);
+ if (flags & ROLLBUF_MARK_2)
+ folioq_mark2(roll->head, slot);
+
+ WRITE_ONCE(roll->iter.count, roll->iter.count + size);
+
+ /* Store the counter after setting the slot. */
+ smp_store_release(&roll->next_head_slot, slot);
+ return size;
+}
+
+/*
+ * Delete a spent buffer from a rolling queue and return the next in line. We
+ * don't return the last buffer to keep the pointers independent, but return
+ * NULL instead.
+ */
+struct folio_queue *rolling_buffer_delete_spent(struct rolling_buffer *roll)
+{
+ struct folio_queue *spent = roll->tail, *next = READ_ONCE(spent->next);
+
+ if (!next)
+ return NULL;
+ next->prev = NULL;
+ netfs_folioq_free(spent, netfs_trace_folioq_delete);
+ roll->tail = next;
+ return next;
+}
+
+/*
+ * Clear out a rolling queue. Folios that have mark 1 set are put.
+ */
+void rolling_buffer_clear(struct rolling_buffer *roll)
+{
+ struct folio_batch fbatch;
+ struct folio_queue *p;
+
+ folio_batch_init(&fbatch);
+
+ while ((p = roll->tail)) {
+ roll->tail = p->next;
+ for (int slot = 0; slot < folioq_count(p); slot++) {
+ struct folio *folio = folioq_folio(p, slot);
+
+ if (!folio)
+ continue;
+ if (folioq_is_marked(p, slot)) {
+ trace_netfs_folio(folio, netfs_folio_trace_put);
+ if (!folio_batch_add(&fbatch, folio))
+ folio_batch_release(&fbatch);
+ }
+ }
+
+ netfs_folioq_free(p, netfs_trace_folioq_clear);
+ }
+
+ folio_batch_release(&fbatch);
+}
diff --git a/fs/netfs/stats.c b/fs/netfs/stats.c
index 8e63516b40f6..f1af344266cc 100644
--- a/fs/netfs/stats.c
+++ b/fs/netfs/stats.c
@@ -12,6 +12,7 @@
atomic_t netfs_n_rh_dio_read;
atomic_t netfs_n_rh_readahead;
atomic_t netfs_n_rh_read_folio;
+atomic_t netfs_n_rh_read_single;
atomic_t netfs_n_rh_rreq;
atomic_t netfs_n_rh_sreq;
atomic_t netfs_n_rh_download;
@@ -46,10 +47,11 @@ atomic_t netfs_n_folioq;
int netfs_stats_show(struct seq_file *m, void *v)
{
- seq_printf(m, "Reads : DR=%u RA=%u RF=%u WB=%u WBZ=%u\n",
+ seq_printf(m, "Reads : DR=%u RA=%u RF=%u RS=%u WB=%u WBZ=%u\n",
atomic_read(&netfs_n_rh_dio_read),
atomic_read(&netfs_n_rh_readahead),
atomic_read(&netfs_n_rh_read_folio),
+ atomic_read(&netfs_n_rh_read_single),
atomic_read(&netfs_n_rh_write_begin),
atomic_read(&netfs_n_rh_write_zskip));
seq_printf(m, "Writes : BW=%u WT=%u DW=%u WP=%u 2C=%u\n",
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index ca3a11ed9b54..294f67795f79 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -17,10 +17,38 @@
#define HIT_PENDING 0x01 /* A front op was still pending */
#define NEED_REASSESS 0x02 /* Need to loop round and reassess */
#define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */
-#define BUFFERED 0x08 /* The pagecache needs cleaning up */
+#define NEED_UNLOCK 0x08 /* The pagecache needs unlocking */
#define NEED_RETRY 0x10 /* A front op requests retrying */
#define SAW_FAILURE 0x20 /* One stream or hit a permanent failure */
+static void netfs_dump_request(const struct netfs_io_request *rreq)
+{
+ pr_err("Request R=%08x r=%d fl=%lx or=%x e=%ld\n",
+ rreq->debug_id, refcount_read(&rreq->ref), rreq->flags,
+ rreq->origin, rreq->error);
+ pr_err(" st=%llx tsl=%zx/%llx/%llx\n",
+ rreq->start, rreq->transferred, rreq->submitted, rreq->len);
+ pr_err(" cci=%llx/%llx/%llx\n",
+ rreq->cleaned_to, rreq->collected_to, atomic64_read(&rreq->issued_to));
+ pr_err(" iw=%pSR\n", rreq->netfs_ops->issue_write);
+ for (int i = 0; i < NR_IO_STREAMS; i++) {
+ const struct netfs_io_subrequest *sreq;
+ const struct netfs_io_stream *s = &rreq->io_streams[i];
+
+ pr_err(" str[%x] s=%x e=%d acnf=%u,%u,%u,%u\n",
+ s->stream_nr, s->source, s->error,
+ s->avail, s->active, s->need_retry, s->failed);
+ pr_err(" str[%x] ct=%llx t=%zx\n",
+ s->stream_nr, s->collected_to, s->transferred);
+ list_for_each_entry(sreq, &s->subrequests, rreq_link) {
+ pr_err(" sreq[%x:%x] sc=%u s=%llx t=%zx/%zx r=%d f=%lx\n",
+ sreq->stream_nr, sreq->debug_index, sreq->source,
+ sreq->start, sreq->transferred, sreq->len,
+ refcount_read(&sreq->ref), sreq->flags);
+ }
+ }
+}
+
/*
* Successful completion of write of a folio to the server and/or cache. Note
* that we are not allowed to lock the folio here on pain of deadlocking with
@@ -83,9 +111,15 @@ end_wb:
static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
unsigned int *notes)
{
- struct folio_queue *folioq = wreq->buffer;
+ struct folio_queue *folioq = wreq->buffer.tail;
unsigned long long collected_to = wreq->collected_to;
- unsigned int slot = wreq->buffer_head_slot;
+ unsigned int slot = wreq->buffer.first_tail_slot;
+
+ if (WARN_ON_ONCE(!folioq)) {
+ pr_err("[!] Writeback unlock found empty rolling buffer!\n");
+ netfs_dump_request(wreq);
+ return;
+ }
if (wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE) {
if (netfs_pgpriv2_unlock_copied_folios(wreq))
@@ -94,7 +128,9 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
}
if (slot >= folioq_nr_slots(folioq)) {
- folioq = netfs_delete_buffer_head(wreq);
+ folioq = rolling_buffer_delete_spent(&wreq->buffer);
+ if (!folioq)
+ return;
slot = 0;
}
@@ -134,9 +170,9 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
folioq_clear(folioq, slot);
slot++;
if (slot >= folioq_nr_slots(folioq)) {
- if (READ_ONCE(wreq->buffer_tail) == folioq)
- break;
- folioq = netfs_delete_buffer_head(wreq);
+ folioq = rolling_buffer_delete_spent(&wreq->buffer);
+ if (!folioq)
+ goto done;
slot = 0;
}
@@ -144,222 +180,9 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
break;
}
- wreq->buffer = folioq;
- wreq->buffer_head_slot = slot;
-}
-
-/*
- * Perform retries on the streams that need it.
- */
-static void netfs_retry_write_stream(struct netfs_io_request *wreq,
- struct netfs_io_stream *stream)
-{
- struct list_head *next;
-
- _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
-
- if (list_empty(&stream->subrequests))
- return;
-
- if (stream->source == NETFS_UPLOAD_TO_SERVER &&
- wreq->netfs_ops->retry_request)
- wreq->netfs_ops->retry_request(wreq, stream);
-
- if (unlikely(stream->failed))
- return;
-
- /* If there's no renegotiation to do, just resend each failed subreq. */
- if (!stream->prepare_write) {
- struct netfs_io_subrequest *subreq;
-
- list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
- if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
- break;
- if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
- struct iov_iter source = subreq->io_iter;
-
- iov_iter_revert(&source, subreq->len - source.count);
- netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
- netfs_reissue_write(stream, subreq, &source);
- }
- }
- return;
- }
-
- next = stream->subrequests.next;
-
- do {
- struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
- struct iov_iter source;
- unsigned long long start, len;
- size_t part;
- bool boundary = false;
-
- /* Go through the stream and find the next span of contiguous
- * data that we then rejig (cifs, for example, needs the wsize
- * renegotiating) and reissue.
- */
- from = list_entry(next, struct netfs_io_subrequest, rreq_link);
- to = from;
- start = from->start + from->transferred;
- len = from->len - from->transferred;
-
- if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
- !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
- return;
-
- list_for_each_continue(next, &stream->subrequests) {
- subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
- if (subreq->start + subreq->transferred != start + len ||
- test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
- !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
- break;
- to = subreq;
- len += to->len;
- }
-
- /* Determine the set of buffers we're going to use. Each
- * subreq gets a subset of a single overall contiguous buffer.
- */
- netfs_reset_iter(from);
- source = from->io_iter;
- source.count = len;
-
- /* Work through the sublist. */
- subreq = from;
- list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
- if (!len)
- break;
- /* Renegotiate max_len (wsize) */
- trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
- __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
- subreq->retry_count++;
- stream->prepare_write(subreq);
-
- part = min(len, stream->sreq_max_len);
- subreq->len = part;
- subreq->start = start;
- subreq->transferred = 0;
- len -= part;
- start += part;
- if (len && subreq == to &&
- __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags))
- boundary = true;
-
- netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
- netfs_reissue_write(stream, subreq, &source);
- if (subreq == to)
- break;
- }
-
- /* If we managed to use fewer subreqs, we can discard the
- * excess; if we used the same number, then we're done.
- */
- if (!len) {
- if (subreq == to)
- continue;
- list_for_each_entry_safe_from(subreq, tmp,
- &stream->subrequests, rreq_link) {
- trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
- list_del(&subreq->rreq_link);
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
- if (subreq == to)
- break;
- }
- continue;
- }
-
- /* We ran out of subrequests, so we need to allocate some more
- * and insert them after.
- */
- do {
- subreq = netfs_alloc_subrequest(wreq);
- subreq->source = to->source;
- subreq->start = start;
- subreq->debug_index = atomic_inc_return(&wreq->subreq_counter);
- subreq->stream_nr = to->stream_nr;
- subreq->retry_count = 1;
-
- trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
- refcount_read(&subreq->ref),
- netfs_sreq_trace_new);
- netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
-
- list_add(&subreq->rreq_link, &to->rreq_link);
- to = list_next_entry(to, rreq_link);
- trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
-
- stream->sreq_max_len = len;
- stream->sreq_max_segs = INT_MAX;
- switch (stream->source) {
- case NETFS_UPLOAD_TO_SERVER:
- netfs_stat(&netfs_n_wh_upload);
- stream->sreq_max_len = umin(len, wreq->wsize);
- break;
- case NETFS_WRITE_TO_CACHE:
- netfs_stat(&netfs_n_wh_write);
- break;
- default:
- WARN_ON_ONCE(1);
- }
-
- stream->prepare_write(subreq);
-
- part = umin(len, stream->sreq_max_len);
- subreq->len = subreq->transferred + part;
- len -= part;
- start += part;
- if (!len && boundary) {
- __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
- boundary = false;
- }
-
- netfs_reissue_write(stream, subreq, &source);
- if (!len)
- break;
-
- } while (len);
-
- } while (!list_is_head(next, &stream->subrequests));
-}
-
-/*
- * Perform retries on the streams that need it. If we're doing content
- * encryption and the server copy changed due to a third-party write, we may
- * need to do an RMW cycle and also rewrite the data to the cache.
- */
-static void netfs_retry_writes(struct netfs_io_request *wreq)
-{
- struct netfs_io_subrequest *subreq;
- struct netfs_io_stream *stream;
- int s;
-
- /* Wait for all outstanding I/O to quiesce before performing retries as
- * we may need to renegotiate the I/O sizes.
- */
- for (s = 0; s < NR_IO_STREAMS; s++) {
- stream = &wreq->io_streams[s];
- if (!stream->active)
- continue;
-
- list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
- wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
- TASK_UNINTERRUPTIBLE);
- }
- }
-
- // TODO: Enc: Fetch changed partial pages
- // TODO: Enc: Reencrypt content if needed.
- // TODO: Enc: Wind back transferred point.
- // TODO: Enc: Mark cache pages for retry.
-
- for (s = 0; s < NR_IO_STREAMS; s++) {
- stream = &wreq->io_streams[s];
- if (stream->need_retry) {
- stream->need_retry = false;
- netfs_retry_write_stream(wreq, stream);
- }
- }
+ wreq->buffer.tail = folioq;
+done:
+ wreq->buffer.first_tail_slot = slot;
}
/*
@@ -390,7 +213,7 @@ reassess_streams:
if (wreq->origin == NETFS_WRITEBACK ||
wreq->origin == NETFS_WRITETHROUGH ||
wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE)
- notes = BUFFERED;
+ notes = NEED_UNLOCK;
else
notes = 0;
@@ -449,14 +272,14 @@ reassess_streams:
cancel:
/* Remove if completely consumed. */
- spin_lock_bh(&wreq->lock);
+ spin_lock(&wreq->lock);
remove = front;
list_del_init(&front->rreq_link);
front = list_first_entry_or_null(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
stream->front = front;
- spin_unlock_bh(&wreq->lock);
+ spin_unlock(&wreq->lock);
netfs_put_subrequest(remove, false,
notes & SAW_FAILURE ?
netfs_sreq_trace_put_cancel :
@@ -487,7 +310,7 @@ reassess_streams:
trace_netfs_collect_state(wreq, wreq->collected_to, notes);
/* Unlock any folios that we have now finished with. */
- if (notes & BUFFERED) {
+ if (notes & NEED_UNLOCK) {
if (wreq->cleaned_to < wreq->collected_to)
netfs_writeback_unlock_folios(wreq, &notes);
} else {
@@ -500,7 +323,9 @@ reassess_streams:
goto need_retry;
if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
trace_netfs_rreq(wreq, netfs_rreq_trace_unpause);
- clear_and_wake_up_bit(NETFS_RREQ_PAUSE, &wreq->flags);
+ clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags);
+ smp_mb__after_atomic(); /* Set PAUSE before task state */
+ wake_up(&wreq->waitq);
}
if (notes & NEED_REASSESS) {
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index ff0e82505a0b..69727411683e 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -94,9 +94,10 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
{
struct netfs_io_request *wreq;
struct netfs_inode *ictx;
- bool is_buffered = (origin == NETFS_WRITEBACK ||
- origin == NETFS_WRITETHROUGH ||
- origin == NETFS_PGPRIV2_COPY_TO_CACHE);
+ bool is_cacheable = (origin == NETFS_WRITEBACK ||
+ origin == NETFS_WRITEBACK_SINGLE ||
+ origin == NETFS_WRITETHROUGH ||
+ origin == NETFS_PGPRIV2_COPY_TO_CACHE);
wreq = netfs_alloc_request(mapping, file, start, 0, origin);
if (IS_ERR(wreq))
@@ -105,8 +106,10 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
_enter("R=%x", wreq->debug_id);
ictx = netfs_inode(wreq->inode);
- if (is_buffered && netfs_is_cache_enabled(ictx))
+ if (is_cacheable && netfs_is_cache_enabled(ictx))
fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx));
+ if (rolling_buffer_init(&wreq->buffer, wreq->debug_id, ITER_SOURCE) < 0)
+ goto nomem;
wreq->cleaned_to = wreq->start;
@@ -129,6 +132,10 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
}
return wreq;
+nomem:
+ wreq->error = -ENOMEM;
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_failed);
+ return ERR_PTR(-ENOMEM);
}
/**
@@ -153,16 +160,15 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
loff_t start)
{
struct netfs_io_subrequest *subreq;
- struct iov_iter *wreq_iter = &wreq->io_iter;
+ struct iov_iter *wreq_iter = &wreq->buffer.iter;
/* Make sure we don't point the iterator at a used-up folio_queue
* struct being used as a placeholder to prevent the queue from
* collapsing. In such a case, extend the queue.
*/
if (iov_iter_is_folioq(wreq_iter) &&
- wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq)) {
- netfs_buffer_make_space(wreq);
- }
+ wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq))
+ rolling_buffer_make_space(&wreq->buffer);
subreq = netfs_alloc_subrequest(wreq);
subreq->source = stream->source;
@@ -198,7 +204,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
* the list. The collector only goes nextwards and uses the lock to
* remove entries off of the front.
*/
- spin_lock_bh(&wreq->lock);
+ spin_lock(&wreq->lock);
list_add_tail(&subreq->rreq_link, &stream->subrequests);
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
stream->front = subreq;
@@ -209,7 +215,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
}
}
- spin_unlock_bh(&wreq->lock);
+ spin_unlock(&wreq->lock);
stream->construct = subreq;
}
@@ -268,9 +274,9 @@ void netfs_issue_write(struct netfs_io_request *wreq,
* we can avoid overrunning the credits obtained (cifs) and try to parallelise
* content-crypto preparation with network writes.
*/
-int netfs_advance_write(struct netfs_io_request *wreq,
- struct netfs_io_stream *stream,
- loff_t start, size_t len, bool to_eof)
+size_t netfs_advance_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream,
+ loff_t start, size_t len, bool to_eof)
{
struct netfs_io_subrequest *subreq = stream->construct;
size_t part;
@@ -327,6 +333,9 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
_enter("");
+ if (rolling_buffer_make_space(&wreq->buffer) < 0)
+ return -ENOMEM;
+
/* netfs_perform_write() may shift i_size around the page or from out
* of the page to beyond it, but cannot move i_size into or through the
* page since we have it locked.
@@ -431,7 +440,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
}
/* Attach the folio to the rolling buffer. */
- netfs_buffer_append_folio(wreq, folio, false);
+ rolling_buffer_append(&wreq->buffer, folio, 0);
/* Move the submission point forward to allow for write-streaming data
* not starting at the front of the page. We don't do write-streaming
@@ -444,7 +453,8 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
stream = &wreq->io_streams[s];
stream->submit_off = foff;
stream->submit_len = flen;
- if ((stream->source == NETFS_WRITE_TO_CACHE && streamw) ||
+ if (!stream->avail ||
+ (stream->source == NETFS_WRITE_TO_CACHE && streamw) ||
(stream->source == NETFS_UPLOAD_TO_SERVER &&
fgroup == NETFS_FOLIO_COPY_TO_CACHE)) {
stream->submit_off = UINT_MAX;
@@ -478,7 +488,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
/* Advance the iterator(s). */
if (stream->submit_off > iter_off) {
- iov_iter_advance(&wreq->io_iter, stream->submit_off - iter_off);
+ rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off);
iter_off = stream->submit_off;
}
@@ -496,7 +506,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
}
if (fsize > iter_off)
- iov_iter_advance(&wreq->io_iter, fsize - iter_off);
+ rolling_buffer_advance(&wreq->buffer, fsize - iter_off);
atomic64_set(&wreq->issued_to, fpos + fsize);
if (!debug)
@@ -635,7 +645,7 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c
struct folio **writethrough_cache)
{
_enter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
- wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end);
+ wreq->debug_id, wreq->buffer.iter.count, wreq->wsize, copied, to_page_end);
if (!*writethrough_cache) {
if (folio_test_dirty(folio))
@@ -710,10 +720,10 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
part = netfs_advance_write(wreq, upload, start, len, false);
start += part;
len -= part;
- iov_iter_advance(&wreq->io_iter, part);
+ rolling_buffer_advance(&wreq->buffer, part);
if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause);
- wait_on_bit(&wreq->flags, NETFS_RREQ_PAUSE, TASK_UNINTERRUPTIBLE);
+ wait_event(wreq->waitq, !test_bit(NETFS_RREQ_PAUSE, &wreq->flags));
}
if (test_bit(NETFS_RREQ_FAILED, &wreq->flags))
break;
@@ -723,3 +733,194 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
_leave(" = %d", error);
return error;
}
+
+/*
+ * Write some of a pending folio data back to the server and/or the cache.
+ */
+static int netfs_write_folio_single(struct netfs_io_request *wreq,
+ struct folio *folio)
+{
+ struct netfs_io_stream *upload = &wreq->io_streams[0];
+ struct netfs_io_stream *cache = &wreq->io_streams[1];
+ struct netfs_io_stream *stream;
+ size_t iter_off = 0;
+ size_t fsize = folio_size(folio), flen;
+ loff_t fpos = folio_pos(folio);
+ bool to_eof = false;
+ bool no_debug = false;
+
+ _enter("");
+
+ flen = folio_size(folio);
+ if (flen > wreq->i_size - fpos) {
+ flen = wreq->i_size - fpos;
+ folio_zero_segment(folio, flen, fsize);
+ to_eof = true;
+ } else if (flen == wreq->i_size - fpos) {
+ to_eof = true;
+ }
+
+ _debug("folio %zx/%zx", flen, fsize);
+
+ if (!upload->avail && !cache->avail) {
+ trace_netfs_folio(folio, netfs_folio_trace_cancel_store);
+ return 0;
+ }
+
+ if (!upload->construct)
+ trace_netfs_folio(folio, netfs_folio_trace_store);
+ else
+ trace_netfs_folio(folio, netfs_folio_trace_store_plus);
+
+ /* Attach the folio to the rolling buffer. */
+ folio_get(folio);
+ rolling_buffer_append(&wreq->buffer, folio, NETFS_ROLLBUF_PUT_MARK);
+
+ /* Move the submission point forward to allow for write-streaming data
+ * not starting at the front of the page. We don't do write-streaming
+ * with the cache as the cache requires DIO alignment.
+ *
+ * Also skip uploading for data that's been read and just needs copying
+ * to the cache.
+ */
+ for (int s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ stream->submit_off = 0;
+ stream->submit_len = flen;
+ if (!stream->avail) {
+ stream->submit_off = UINT_MAX;
+ stream->submit_len = 0;
+ }
+ }
+
+ /* Attach the folio to one or more subrequests. For a big folio, we
+ * could end up with thousands of subrequests if the wsize is small -
+ * but we might need to wait during the creation of subrequests for
+ * network resources (eg. SMB credits).
+ */
+ for (;;) {
+ ssize_t part;
+ size_t lowest_off = ULONG_MAX;
+ int choose_s = -1;
+
+ /* Always add to the lowest-submitted stream first. */
+ for (int s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->submit_len > 0 &&
+ stream->submit_off < lowest_off) {
+ lowest_off = stream->submit_off;
+ choose_s = s;
+ }
+ }
+
+ if (choose_s < 0)
+ break;
+ stream = &wreq->io_streams[choose_s];
+
+ /* Advance the iterator(s). */
+ if (stream->submit_off > iter_off) {
+ rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off);
+ iter_off = stream->submit_off;
+ }
+
+ atomic64_set(&wreq->issued_to, fpos + stream->submit_off);
+ stream->submit_extendable_to = fsize - stream->submit_off;
+ part = netfs_advance_write(wreq, stream, fpos + stream->submit_off,
+ stream->submit_len, to_eof);
+ stream->submit_off += part;
+ if (part > stream->submit_len)
+ stream->submit_len = 0;
+ else
+ stream->submit_len -= part;
+ if (part > 0)
+ no_debug = true;
+ }
+
+ wreq->buffer.iter.iov_offset = 0;
+ if (fsize > iter_off)
+ rolling_buffer_advance(&wreq->buffer, fsize - iter_off);
+ atomic64_set(&wreq->issued_to, fpos + fsize);
+
+ if (!no_debug)
+ kdebug("R=%x: No submit", wreq->debug_id);
+ _leave(" = 0");
+ return 0;
+}
+
+/**
+ * netfs_writeback_single - Write back a monolithic payload
+ * @mapping: The mapping to write from
+ * @wbc: Hints from the VM
+ * @iter: Data to write, must be ITER_FOLIOQ.
+ *
+ * Write a monolithic, non-pagecache object back to the server and/or
+ * the cache.
+ */
+int netfs_writeback_single(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct iov_iter *iter)
+{
+ struct netfs_io_request *wreq;
+ struct netfs_inode *ictx = netfs_inode(mapping->host);
+ struct folio_queue *fq;
+ size_t size = iov_iter_count(iter);
+ int ret;
+
+ if (WARN_ON_ONCE(!iov_iter_is_folioq(iter)))
+ return -EIO;
+
+ if (!mutex_trylock(&ictx->wb_lock)) {
+ if (wbc->sync_mode == WB_SYNC_NONE) {
+ netfs_stat(&netfs_n_wb_lock_skip);
+ return 0;
+ }
+ netfs_stat(&netfs_n_wb_lock_wait);
+ mutex_lock(&ictx->wb_lock);
+ }
+
+ wreq = netfs_create_write_req(mapping, NULL, 0, NETFS_WRITEBACK_SINGLE);
+ if (IS_ERR(wreq)) {
+ ret = PTR_ERR(wreq);
+ goto couldnt_start;
+ }
+
+ trace_netfs_write(wreq, netfs_write_trace_writeback);
+ netfs_stat(&netfs_n_wh_writepages);
+
+ if (__test_and_set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))
+ wreq->netfs_ops->begin_writeback(wreq);
+
+ for (fq = (struct folio_queue *)iter->folioq; fq; fq = fq->next) {
+ for (int slot = 0; slot < folioq_count(fq); slot++) {
+ struct folio *folio = folioq_folio(fq, slot);
+ size_t part = umin(folioq_folio_size(fq, slot), size);
+
+ _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to));
+
+ ret = netfs_write_folio_single(wreq, folio);
+ if (ret < 0)
+ goto stop;
+ size -= part;
+ if (size <= 0)
+ goto stop;
+ }
+ }
+
+stop:
+ for (int s = 0; s < NR_IO_STREAMS; s++)
+ netfs_issue_write(wreq, &wreq->io_streams[s]);
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+
+ mutex_unlock(&ictx->wb_lock);
+
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+ _leave(" = %d", ret);
+ return ret;
+
+couldnt_start:
+ mutex_unlock(&ictx->wb_lock);
+ _leave(" = %d", ret);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_writeback_single);
diff --git a/fs/netfs/write_retry.c b/fs/netfs/write_retry.c
new file mode 100644
index 000000000000..c841a851dd73
--- /dev/null
+++ b/fs/netfs/write_retry.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem write retrying.
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+/*
+ * Perform retries on the streams that need it.
+ */
+static void netfs_retry_write_stream(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream)
+{
+ struct list_head *next;
+
+ _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
+
+ if (list_empty(&stream->subrequests))
+ return;
+
+ if (stream->source == NETFS_UPLOAD_TO_SERVER &&
+ wreq->netfs_ops->retry_request)
+ wreq->netfs_ops->retry_request(wreq, stream);
+
+ if (unlikely(stream->failed))
+ return;
+
+ /* If there's no renegotiation to do, just resend each failed subreq. */
+ if (!stream->prepare_write) {
+ struct netfs_io_subrequest *subreq;
+
+ list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
+ if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
+ break;
+ if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+ struct iov_iter source = subreq->io_iter;
+
+ iov_iter_revert(&source, subreq->len - source.count);
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ netfs_reissue_write(stream, subreq, &source);
+ }
+ }
+ return;
+ }
+
+ next = stream->subrequests.next;
+
+ do {
+ struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
+ struct iov_iter source;
+ unsigned long long start, len;
+ size_t part;
+ bool boundary = false;
+
+ /* Go through the stream and find the next span of contiguous
+ * data that we then rejig (cifs, for example, needs the wsize
+ * renegotiating) and reissue.
+ */
+ from = list_entry(next, struct netfs_io_subrequest, rreq_link);
+ to = from;
+ start = from->start + from->transferred;
+ len = from->len - from->transferred;
+
+ if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
+ !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
+ return;
+
+ list_for_each_continue(next, &stream->subrequests) {
+ subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
+ if (subreq->start + subreq->transferred != start + len ||
+ test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
+ !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
+ break;
+ to = subreq;
+ len += to->len;
+ }
+
+ /* Determine the set of buffers we're going to use. Each
+ * subreq gets a subset of a single overall contiguous buffer.
+ */
+ netfs_reset_iter(from);
+ source = from->io_iter;
+ source.count = len;
+
+ /* Work through the sublist. */
+ subreq = from;
+ list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
+ if (!len)
+ break;
+
+ subreq->start = start;
+ subreq->len = len;
+ __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ subreq->retry_count++;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+
+ /* Renegotiate max_len (wsize) */
+ stream->sreq_max_len = len;
+ stream->prepare_write(subreq);
+
+ part = umin(len, stream->sreq_max_len);
+ if (unlikely(stream->sreq_max_segs))
+ part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs);
+ subreq->len = part;
+ subreq->transferred = 0;
+ len -= part;
+ start += part;
+ if (len && subreq == to &&
+ __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags))
+ boundary = true;
+
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ netfs_reissue_write(stream, subreq, &source);
+ if (subreq == to)
+ break;
+ }
+
+ /* If we managed to use fewer subreqs, we can discard the
+ * excess; if we used the same number, then we're done.
+ */
+ if (!len) {
+ if (subreq == to)
+ continue;
+ list_for_each_entry_safe_from(subreq, tmp,
+ &stream->subrequests, rreq_link) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
+ list_del(&subreq->rreq_link);
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
+ if (subreq == to)
+ break;
+ }
+ continue;
+ }
+
+ /* We ran out of subrequests, so we need to allocate some more
+ * and insert them after.
+ */
+ do {
+ subreq = netfs_alloc_subrequest(wreq);
+ subreq->source = to->source;
+ subreq->start = start;
+ subreq->debug_index = atomic_inc_return(&wreq->subreq_counter);
+ subreq->stream_nr = to->stream_nr;
+ subreq->retry_count = 1;
+
+ trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
+ refcount_read(&subreq->ref),
+ netfs_sreq_trace_new);
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+
+ list_add(&subreq->rreq_link, &to->rreq_link);
+ to = list_next_entry(to, rreq_link);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+
+ stream->sreq_max_len = len;
+ stream->sreq_max_segs = INT_MAX;
+ switch (stream->source) {
+ case NETFS_UPLOAD_TO_SERVER:
+ netfs_stat(&netfs_n_wh_upload);
+ stream->sreq_max_len = umin(len, wreq->wsize);
+ break;
+ case NETFS_WRITE_TO_CACHE:
+ netfs_stat(&netfs_n_wh_write);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ stream->prepare_write(subreq);
+
+ part = umin(len, stream->sreq_max_len);
+ subreq->len = subreq->transferred + part;
+ len -= part;
+ start += part;
+ if (!len && boundary) {
+ __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
+ boundary = false;
+ }
+
+ netfs_reissue_write(stream, subreq, &source);
+ if (!len)
+ break;
+
+ } while (len);
+
+ } while (!list_is_head(next, &stream->subrequests));
+}
+
+/*
+ * Perform retries on the streams that need it. If we're doing content
+ * encryption and the server copy changed due to a third-party write, we may
+ * need to do an RMW cycle and also rewrite the data to the cache.
+ */
+void netfs_retry_writes(struct netfs_io_request *wreq)
+{
+ struct netfs_io_subrequest *subreq;
+ struct netfs_io_stream *stream;
+ int s;
+
+ /* Wait for all outstanding I/O to quiesce before performing retries as
+ * we may need to renegotiate the I/O sizes.
+ */
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (!stream->active)
+ continue;
+
+ list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
+ wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
+ TASK_UNINTERRUPTIBLE);
+ }
+ }
+
+ // TODO: Enc: Fetch changed partial pages
+ // TODO: Enc: Reencrypt content if needed.
+ // TODO: Enc: Wind back transferred point.
+ // TODO: Enc: Mark cache pages for retry.
+
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->need_retry) {
+ stream->need_retry = false;
+ netfs_retry_write_stream(wreq, stream);
+ }
+ }
+}
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index d49e4ce27999..e278a1ad1ca3 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -314,8 +314,10 @@ static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
&nfs_async_read_completion_ops);
netfs = nfs_netfs_alloc(sreq);
- if (!netfs)
- return netfs_read_subreq_terminated(sreq, -ENOMEM, false);
+ if (!netfs) {
+ sreq->error = -ENOMEM;
+ return netfs_read_subreq_terminated(sreq);
+ }
pgio.pg_netfs = netfs; /* used in completion */
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 772d485e96d3..9d86868f4998 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -74,7 +74,8 @@ static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
*/
netfs->sreq->transferred = min_t(s64, netfs->sreq->len,
atomic64_read(&netfs->transferred));
- netfs_read_subreq_terminated(netfs->sreq, netfs->error, false);
+ netfs->sreq->error = netfs->error;
+ netfs_read_subreq_terminated(netfs->sreq);
kfree(netfs);
}
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 93e33d1ee891..4dc327e02456 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -27,7 +27,7 @@ int nfsd_setuser(struct svc_cred *cred, struct svc_export *exp)
int flags = nfsexp_flags(cred, exp);
/* discard any old override before preparing the new set */
- revert_creds(get_cred(current_real_cred()));
+ put_cred(revert_creds(get_cred(current_real_cred())));
new = prepare_creds();
if (!new)
return -ENOMEM;
@@ -80,7 +80,6 @@ int nfsd_setuser(struct svc_cred *cred, struct svc_export *exp)
new->cap_effective = cap_raise_nfsd_set(new->cap_effective,
new->cap_permitted);
put_cred(override_creds(new));
- put_cred(new);
return 0;
oom:
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index a1cdba42c4fa..dc5c9d8e8202 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -1248,7 +1248,7 @@ nfsd_file_acquire_local(struct net *net, struct svc_cred *cred,
beres = nfsd_file_do_acquire(NULL, net, cred, client,
fhp, may_flags, NULL, pnf, true);
- revert_creds(save_cred);
+ put_cred(revert_creds(save_cred));
return beres;
}
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 4a765555bf84..7f2ceeb118a4 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -82,14 +82,13 @@ nfs4_save_creds(const struct cred **original_creds)
new->fsuid = GLOBAL_ROOT_UID;
new->fsgid = GLOBAL_ROOT_GID;
*original_creds = override_creds(new);
- put_cred(new);
return 0;
}
static void
nfs4_reset_creds(const struct cred *original)
{
- revert_creds(original);
+ put_cred(revert_creds(original));
}
static void
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 6a831cb242df..98d6459724a7 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -222,7 +222,6 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net,
cap_raise_nfsd_set(new->cap_effective,
new->cap_permitted);
put_cred(override_creds(new));
- put_cred(new);
} else {
error = nfsd_setuser_and_check_port(rqstp, cred, exp);
if (error)
diff --git a/fs/nsfs.c b/fs/nsfs.c
index c675fc40ce2d..663f8656158d 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -274,10 +274,7 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl,
if (usize < MNT_NS_INFO_SIZE_VER0)
return -EINVAL;
- if (previous)
- mnt_ns = lookup_prev_mnt_ns(to_mnt_ns(ns));
- else
- mnt_ns = lookup_next_mnt_ns(to_mnt_ns(ns));
+ mnt_ns = get_sequential_mnt_ns(to_mnt_ns(ns), previous);
if (IS_ERR(mnt_ns))
return PTR_ERR(mnt_ns);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 213206ebdd58..7799f4d16ce9 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1065,26 +1065,39 @@ int ocfs2_find_entry(const char *name, int namelen,
{
struct buffer_head *bh;
struct ocfs2_dir_entry *res_dir = NULL;
+ int ret = 0;
if (ocfs2_dir_indexed(dir))
return ocfs2_find_entry_dx(name, namelen, dir, lookup);
+ if (unlikely(i_size_read(dir) <= 0)) {
+ ret = -EFSCORRUPTED;
+ mlog_errno(ret);
+ goto out;
+ }
/*
* The unindexed dir code only uses part of the lookup
* structure, so there's no reason to push it down further
* than this.
*/
- if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+ if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+ if (unlikely(i_size_read(dir) > dir->i_sb->s_blocksize)) {
+ ret = -EFSCORRUPTED;
+ mlog_errno(ret);
+ goto out;
+ }
bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir);
- else
+ } else {
bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir);
+ }
if (bh == NULL)
return -ENOENT;
lookup->dl_leaf_bh = bh;
lookup->dl_entry = res_dir;
- return 0;
+out:
+ return ret;
}
/*
@@ -2010,6 +2023,7 @@ int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name,
*
* Return 0 if the name does not exist
* Return -EEXIST if the directory contains the name
+ * Return -EFSCORRUPTED if found corruption
*
* Callers should have i_rwsem + a cluster lock on dir
*/
@@ -2023,9 +2037,12 @@ int ocfs2_check_dir_for_entry(struct inode *dir,
trace_ocfs2_check_dir_for_entry(
(unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name);
- if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) {
+ ret = ocfs2_find_entry(name, namelen, dir, &lookup);
+ if (ret == 0) {
ret = -EEXIST;
mlog_errno(ret);
+ } else if (ret == -ENOENT) {
+ ret = 0;
}
ocfs2_free_dir_lookup_result(&lookup);
diff --git a/fs/open.c b/fs/open.c
index e6911101fe71..ffcfef67ac86 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -402,7 +402,6 @@ static bool access_need_override_creds(int flags)
static const struct cred *access_override_creds(void)
{
- const struct cred *old_cred;
struct cred *override_cred;
override_cred = prepare_creds();
@@ -447,13 +446,7 @@ static const struct cred *access_override_creds(void)
* freeing.
*/
override_cred->non_rcu = 1;
-
- old_cred = override_creds(override_cred);
-
- /* override_cred() gets its own ref */
- put_cred(override_cred);
-
- return old_cred;
+ return override_creds(override_cred);
}
static long do_faccessat(int dfd, const char __user *filename, int mode, int flags)
@@ -523,7 +516,7 @@ out_path_release:
}
out:
if (old_cred)
- revert_creds(old_cred);
+ put_cred(revert_creds(old_cred));
return res;
}
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 08e683917d12..c9993ff66fc2 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -575,12 +575,12 @@ static const struct cred *ovl_setup_cred_for_create(struct dentry *dentry,
}
/*
- * Caller is going to match this with revert_creds_light() and drop
+ * Caller is going to match this with revert_creds() and drop
* referenec on the returned creds.
* We must be called with creator creds already, otherwise we risk
* leaking creds.
*/
- old_cred = override_creds_light(override_cred);
+ old_cred = override_creds(override_cred);
WARN_ON_ONCE(old_cred != ovl_creds(dentry->d_sb));
return override_cred;
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 9aa7493b1e10..0819c739cc2f 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -65,12 +65,12 @@ const struct cred *ovl_override_creds(struct super_block *sb)
{
struct ovl_fs *ofs = OVL_FS(sb);
- return override_creds_light(ofs->creator_cred);
+ return override_creds(ofs->creator_cred);
}
void ovl_revert_creds(const struct cred *old_cred)
{
- revert_creds_light(old_cred);
+ revert_creds(old_cred);
}
/*
diff --git a/fs/pidfs.c b/fs/pidfs.c
index 618abb1fa1b8..049352f973de 100644
--- a/fs/pidfs.c
+++ b/fs/pidfs.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/anon_inodes.h>
+#include <linux/exportfs.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/cgroup.h>
@@ -23,6 +24,97 @@
#include "internal.h"
#include "mount.h"
+static struct rb_root pidfs_ino_tree = RB_ROOT;
+
+#if BITS_PER_LONG == 32
+static inline unsigned long pidfs_ino(u64 ino)
+{
+ return lower_32_bits(ino);
+}
+
+/* On 32 bit the generation number are the upper 32 bits. */
+static inline u32 pidfs_gen(u64 ino)
+{
+ return upper_32_bits(ino);
+}
+
+#else
+
+/* On 64 bit simply return ino. */
+static inline unsigned long pidfs_ino(u64 ino)
+{
+ return ino;
+}
+
+/* On 64 bit the generation number is 0. */
+static inline u32 pidfs_gen(u64 ino)
+{
+ return 0;
+}
+#endif
+
+static int pidfs_ino_cmp(struct rb_node *a, const struct rb_node *b)
+{
+ struct pid *pid_a = rb_entry(a, struct pid, pidfs_node);
+ struct pid *pid_b = rb_entry(b, struct pid, pidfs_node);
+ u64 pid_ino_a = pid_a->ino;
+ u64 pid_ino_b = pid_b->ino;
+
+ if (pid_ino_a < pid_ino_b)
+ return -1;
+ if (pid_ino_a > pid_ino_b)
+ return 1;
+ return 0;
+}
+
+void pidfs_add_pid(struct pid *pid)
+{
+ static u64 pidfs_ino_nr = 2;
+
+ /*
+ * On 64 bit nothing special happens. The 64bit number assigned
+ * to struct pid is the inode number.
+ *
+ * On 32 bit the 64 bit number assigned to struct pid is split
+ * into two 32 bit numbers. The lower 32 bits are used as the
+ * inode number and the upper 32 bits are used as the inode
+ * generation number.
+ *
+ * On 32 bit pidfs_ino() will return the lower 32 bit. When
+ * pidfs_ino() returns zero a wrap around happened. When a
+ * wraparound happens the 64 bit number will be incremented by 2
+ * so inode numbering starts at 2 again.
+ *
+ * On 64 bit comparing two pidfds is as simple as comparing
+ * inode numbers.
+ *
+ * When a wraparound happens on 32 bit multiple pidfds with the
+ * same inode number are likely to exist (This isn't a problem
+ * since before pidfs pidfds used the anonymous inode meaning
+ * all pidfds had the same inode number.). Userspace can
+ * reconstruct the 64 bit identifier by retrieving both the
+ * inode number and the inode generation number to compare or
+ * use file handles.
+ */
+ if (pidfs_ino(pidfs_ino_nr) == 0)
+ pidfs_ino_nr += 2;
+
+ pid->ino = pidfs_ino_nr;
+ pid->stashed = NULL;
+ pidfs_ino_nr++;
+
+ write_seqcount_begin(&pidmap_lock_seq);
+ rb_find_add_rcu(&pid->pidfs_node, &pidfs_ino_tree, pidfs_ino_cmp);
+ write_seqcount_end(&pidmap_lock_seq);
+}
+
+void pidfs_remove_pid(struct pid *pid)
+{
+ write_seqcount_begin(&pidmap_lock_seq);
+ rb_erase(&pid->pidfs_node, &pidfs_ino_tree);
+ write_seqcount_end(&pidmap_lock_seq);
+}
+
#ifdef CONFIG_PROC_FS
/**
* pidfd_show_fdinfo - print information about a pidfd
@@ -190,6 +282,27 @@ static long pidfd_info(struct task_struct *task, unsigned int cmd, unsigned long
return 0;
}
+static bool pidfs_ioctl_valid(unsigned int cmd)
+{
+ switch (cmd) {
+ case FS_IOC_GETVERSION:
+ case PIDFD_GET_CGROUP_NAMESPACE:
+ case PIDFD_GET_INFO:
+ case PIDFD_GET_IPC_NAMESPACE:
+ case PIDFD_GET_MNT_NAMESPACE:
+ case PIDFD_GET_NET_NAMESPACE:
+ case PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE:
+ case PIDFD_GET_TIME_NAMESPACE:
+ case PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE:
+ case PIDFD_GET_UTS_NAMESPACE:
+ case PIDFD_GET_USER_NAMESPACE:
+ case PIDFD_GET_PID_NAMESPACE:
+ return true;
+ }
+
+ return false;
+}
+
static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct task_struct *task __free(put_task) = NULL;
@@ -198,6 +311,17 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct ns_common *ns_common = NULL;
struct pid_namespace *pid_ns;
+ if (!pidfs_ioctl_valid(cmd))
+ return -ENOIOCTLCMD;
+
+ if (cmd == FS_IOC_GETVERSION) {
+ if (!arg)
+ return -EINVAL;
+
+ __u32 __user *argp = (__u32 __user *)arg;
+ return put_user(file_inode(file)->i_generation, argp);
+ }
+
task = get_pid_task(pid, PIDTYPE_PID);
if (!task)
return -ESRCH;
@@ -318,40 +442,6 @@ struct pid *pidfd_pid(const struct file *file)
static struct vfsmount *pidfs_mnt __ro_after_init;
-#if BITS_PER_LONG == 32
-/*
- * Provide a fallback mechanism for 32-bit systems so processes remain
- * reliably comparable by inode number even on those systems.
- */
-static DEFINE_IDA(pidfd_inum_ida);
-
-static int pidfs_inum(struct pid *pid, unsigned long *ino)
-{
- int ret;
-
- ret = ida_alloc_range(&pidfd_inum_ida, RESERVED_PIDS + 1,
- UINT_MAX, GFP_ATOMIC);
- if (ret < 0)
- return -ENOSPC;
-
- *ino = ret;
- return 0;
-}
-
-static inline void pidfs_free_inum(unsigned long ino)
-{
- if (ino > 0)
- ida_free(&pidfd_inum_ida, ino);
-}
-#else
-static inline int pidfs_inum(struct pid *pid, unsigned long *ino)
-{
- *ino = pid->ino;
- return 0;
-}
-#define pidfs_free_inum(ino) ((void)(ino))
-#endif
-
/*
* The vfs falls back to simple_setattr() if i_op->setattr() isn't
* implemented. Let's reject it completely until we have a clean
@@ -403,7 +493,6 @@ static void pidfs_evict_inode(struct inode *inode)
clear_inode(inode);
put_pid(pid);
- pidfs_free_inum(inode->i_ino);
}
static const struct super_operations pidfs_sops = {
@@ -421,25 +510,149 @@ static char *pidfs_dname(struct dentry *dentry, char *buffer, int buflen)
return dynamic_dname(buffer, buflen, "anon_inode:[pidfd]");
}
-static const struct dentry_operations pidfs_dentry_operations = {
+const struct dentry_operations pidfs_dentry_operations = {
.d_delete = always_delete_dentry,
.d_dname = pidfs_dname,
.d_prune = stashed_dentry_prune,
};
+static int pidfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
+ struct inode *parent)
+{
+ const struct pid *pid = inode->i_private;
+
+ if (*max_len < 2) {
+ *max_len = 2;
+ return FILEID_INVALID;
+ }
+
+ *max_len = 2;
+ *(u64 *)fh = pid->ino;
+ return FILEID_KERNFS;
+}
+
+static int pidfs_ino_find(const void *key, const struct rb_node *node)
+{
+ const u64 pid_ino = *(u64 *)key;
+ const struct pid *pid = rb_entry(node, struct pid, pidfs_node);
+
+ if (pid_ino < pid->ino)
+ return -1;
+ if (pid_ino > pid->ino)
+ return 1;
+ return 0;
+}
+
+/* Find a struct pid based on the inode number. */
+static struct pid *pidfs_ino_get_pid(u64 ino)
+{
+ struct pid *pid;
+ struct rb_node *node;
+ unsigned int seq;
+
+ guard(rcu)();
+ do {
+ seq = read_seqcount_begin(&pidmap_lock_seq);
+ node = rb_find_rcu(&ino, &pidfs_ino_tree, pidfs_ino_find);
+ if (node)
+ break;
+ } while (read_seqcount_retry(&pidmap_lock_seq, seq));
+
+ if (!node)
+ return NULL;
+
+ pid = rb_entry(node, struct pid, pidfs_node);
+
+ /* Within our pid namespace hierarchy? */
+ if (pid_vnr(pid) == 0)
+ return NULL;
+
+ return get_pid(pid);
+}
+
+static struct dentry *pidfs_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len,
+ int fh_type)
+{
+ int ret;
+ u64 pid_ino;
+ struct path path;
+ struct pid *pid;
+
+ if (fh_len < 2)
+ return NULL;
+
+ switch (fh_type) {
+ case FILEID_KERNFS:
+ pid_ino = *(u64 *)fid;
+ break;
+ default:
+ return NULL;
+ }
+
+ pid = pidfs_ino_get_pid(pid_ino);
+ if (!pid)
+ return NULL;
+
+ ret = path_from_stashed(&pid->stashed, pidfs_mnt, pid, &path);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mntput(path.mnt);
+ return path.dentry;
+}
+
+/*
+ * Make sure that we reject any nonsensical flags that users pass via
+ * open_by_handle_at(). Note that PIDFD_THREAD is defined as O_EXCL, and
+ * PIDFD_NONBLOCK as O_NONBLOCK.
+ */
+#define VALID_FILE_HANDLE_OPEN_FLAGS \
+ (O_RDONLY | O_WRONLY | O_RDWR | O_NONBLOCK | O_CLOEXEC | O_EXCL)
+
+static int pidfs_export_permission(struct handle_to_path_ctx *ctx,
+ unsigned int oflags)
+{
+ if (oflags & ~(VALID_FILE_HANDLE_OPEN_FLAGS | O_LARGEFILE))
+ return -EINVAL;
+
+ /*
+ * pidfd_ino_get_pid() will verify that the struct pid is part
+ * of the caller's pid namespace hierarchy. No further
+ * permission checks are needed.
+ */
+ return 0;
+}
+
+static struct file *pidfs_export_open(struct path *path, unsigned int oflags)
+{
+ /*
+ * Clear O_LARGEFILE as open_by_handle_at() forces it and raise
+ * O_RDWR as pidfds always are.
+ */
+ oflags &= ~O_LARGEFILE;
+ return dentry_open(path, oflags | O_RDWR, current_cred());
+}
+
+static const struct export_operations pidfs_export_operations = {
+ .encode_fh = pidfs_encode_fh,
+ .fh_to_dentry = pidfs_fh_to_dentry,
+ .open = pidfs_export_open,
+ .permission = pidfs_export_permission,
+};
+
static int pidfs_init_inode(struct inode *inode, void *data)
{
+ const struct pid *pid = data;
+
inode->i_private = data;
inode->i_flags |= S_PRIVATE;
inode->i_mode |= S_IRWXU;
inode->i_op = &pidfs_inode_operations;
inode->i_fop = &pidfs_file_operations;
- /*
- * Inode numbering for pidfs start at RESERVED_PIDS + 1. This
- * avoids collisions with the root inode which is 1 for pseudo
- * filesystems.
- */
- return pidfs_inum(data, &inode->i_ino);
+ inode->i_ino = pidfs_ino(pid->ino);
+ inode->i_generation = pidfs_gen(pid->ino);
+ return 0;
}
static void pidfs_put_data(void *data)
@@ -462,6 +675,7 @@ static int pidfs_init_fs_context(struct fs_context *fc)
return -ENOMEM;
ctx->ops = &pidfs_sops;
+ ctx->eops = &pidfs_export_operations;
ctx->dops = &pidfs_dentry_operations;
fc->s_fs_info = (void *)&pidfs_stashed_ops;
return 0;
diff --git a/fs/pipe.c b/fs/pipe.c
index 12b22c2723b7..82fede0f2111 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -253,7 +253,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
size_t total_len = iov_iter_count(to);
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
- bool was_full, wake_next_reader = false;
+ bool wake_writer = false, wake_next_reader = false;
ssize_t ret;
/* Null read succeeds. */
@@ -264,14 +264,13 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
mutex_lock(&pipe->mutex);
/*
- * We only wake up writers if the pipe was full when we started
- * reading in order to avoid unnecessary wakeups.
+ * We only wake up writers if the pipe was full when we started reading
+ * and it is no longer full after reading to avoid unnecessary wakeups.
*
* But when we do wake up writers, we do so using a sync wakeup
* (WF_SYNC), because we want them to get going and generate more
* data for us.
*/
- was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
for (;;) {
/* Read ->head with a barrier vs post_one_notification() */
unsigned int head = smp_load_acquire(&pipe->head);
@@ -340,8 +339,10 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
buf->len = 0;
}
- if (!buf->len)
+ if (!buf->len) {
+ wake_writer |= pipe_full(head, tail, pipe->max_usage);
tail = pipe_update_tail(pipe, buf, tail);
+ }
total_len -= chars;
if (!total_len)
break; /* common path: read succeeded */
@@ -377,7 +378,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
* _very_ unlikely case that the pipe was full, but we got
* no data.
*/
- if (unlikely(was_full))
+ if (unlikely(wake_writer))
wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
@@ -390,15 +391,15 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
return -ERESTARTSYS;
- mutex_lock(&pipe->mutex);
- was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
+ wake_writer = false;
wake_next_reader = true;
+ mutex_lock(&pipe->mutex);
}
if (pipe_empty(pipe->head, pipe->tail))
wake_next_reader = false;
mutex_unlock(&pipe->mutex);
- if (was_full)
+ if (wake_writer)
wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
if (wake_next_reader)
wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
diff --git a/fs/pnode.c b/fs/pnode.c
index a799e0315cc9..ef048f008bdd 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -611,10 +611,10 @@ int propagate_umount(struct list_head *list)
continue;
} else if (child->mnt.mnt_flags & MNT_UMOUNT) {
/*
- * We have come accross an partially unmounted
- * mount in list that has not been visited yet.
- * Remember it has been visited and continue
- * about our merry way.
+ * We have come across a partially unmounted
+ * mount in a list that has not been visited
+ * yet. Remember it has been visited and
+ * continue about our merry way.
*/
list_add_tail(&child->mnt_umounting, &visited);
continue;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 55ed3510d2bb..d6a0369caa93 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -500,7 +500,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
* a program is not able to use ptrace(2) in that case. It is
* safe because the task has stopped executing permanently.
*/
- if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) {
+ if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE|PF_POSTCOREDUMP))) {
if (try_get_task_stack(task)) {
eip = KSTK_EIP(task);
esp = KSTK_ESP(task);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index e376f48c4b8b..1cb33771bf9f 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -65,7 +65,11 @@ static inline void kc_unxlate_dev_mem_ptr(phys_addr_t phys, void *virt)
#endif
static LIST_HEAD(kclist_head);
-static DECLARE_RWSEM(kclist_lock);
+static int kcore_nphdr;
+static size_t kcore_phdrs_len;
+static size_t kcore_notes_len;
+static size_t kcore_data_offset;
+DEFINE_STATIC_PERCPU_RWSEM(kclist_lock);
static int kcore_need_update = 1;
/*
@@ -101,33 +105,32 @@ void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
list_add_tail(&new->list, &kclist_head);
}
-static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
- size_t *data_offset)
+static void update_kcore_size(void)
{
size_t try, size;
struct kcore_list *m;
- *nphdr = 1; /* PT_NOTE */
+ kcore_nphdr = 1; /* PT_NOTE */
size = 0;
list_for_each_entry(m, &kclist_head, list) {
try = kc_vaddr_to_offset((size_t)m->addr + m->size);
if (try > size)
size = try;
- *nphdr = *nphdr + 1;
+ kcore_nphdr++;
}
- *phdrs_len = *nphdr * sizeof(struct elf_phdr);
- *notes_len = (4 * sizeof(struct elf_note) +
- 3 * ALIGN(sizeof(CORE_STR), 4) +
- VMCOREINFO_NOTE_NAME_BYTES +
- ALIGN(sizeof(struct elf_prstatus), 4) +
- ALIGN(sizeof(struct elf_prpsinfo), 4) +
- ALIGN(arch_task_struct_size, 4) +
- ALIGN(vmcoreinfo_size, 4));
- *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
- *notes_len);
- return *data_offset + size;
+ kcore_phdrs_len = kcore_nphdr * sizeof(struct elf_phdr);
+ kcore_notes_len = (4 * sizeof(struct elf_note) +
+ 3 * ALIGN(sizeof(CORE_STR), 4) +
+ VMCOREINFO_NOTE_NAME_BYTES +
+ ALIGN(sizeof(struct elf_prstatus), 4) +
+ ALIGN(sizeof(struct elf_prpsinfo), 4) +
+ ALIGN(arch_task_struct_size, 4) +
+ ALIGN(vmcoreinfo_size, 4));
+ kcore_data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + kcore_phdrs_len +
+ kcore_notes_len);
+ proc_root_kcore->size = kcore_data_offset + size;
}
#ifdef CONFIG_HIGHMEM
@@ -270,12 +273,10 @@ static int kcore_update_ram(void)
{
LIST_HEAD(list);
LIST_HEAD(garbage);
- int nphdr;
- size_t phdrs_len, notes_len, data_offset;
struct kcore_list *tmp, *pos;
int ret = 0;
- down_write(&kclist_lock);
+ percpu_down_write(&kclist_lock);
if (!xchg(&kcore_need_update, 0))
goto out;
@@ -293,11 +294,10 @@ static int kcore_update_ram(void)
}
list_splice_tail(&list, &kclist_head);
- proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, &notes_len,
- &data_offset);
+ update_kcore_size();
out:
- up_write(&kclist_lock);
+ percpu_up_write(&kclist_lock);
list_for_each_entry_safe(pos, tmp, &garbage, list) {
list_del(&pos->list);
kfree(pos);
@@ -326,27 +326,24 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
struct file *file = iocb->ki_filp;
char *buf = file->private_data;
loff_t *fpos = &iocb->ki_pos;
- size_t phdrs_offset, notes_offset, data_offset;
+ size_t phdrs_offset, notes_offset;
size_t page_offline_frozen = 1;
- size_t phdrs_len, notes_len;
struct kcore_list *m;
size_t tsz;
- int nphdr;
unsigned long start;
size_t buflen = iov_iter_count(iter);
size_t orig_buflen = buflen;
int ret = 0;
- down_read(&kclist_lock);
+ percpu_down_read(&kclist_lock);
/*
* Don't race against drivers that set PageOffline() and expect no
* further page access.
*/
page_offline_freeze();
- get_kcore_size(&nphdr, &phdrs_len, &notes_len, &data_offset);
phdrs_offset = sizeof(struct elfhdr);
- notes_offset = phdrs_offset + phdrs_len;
+ notes_offset = phdrs_offset + kcore_phdrs_len;
/* ELF file header. */
if (buflen && *fpos < sizeof(struct elfhdr)) {
@@ -368,7 +365,7 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
.e_flags = ELF_CORE_EFLAGS,
.e_ehsize = sizeof(struct elfhdr),
.e_phentsize = sizeof(struct elf_phdr),
- .e_phnum = nphdr,
+ .e_phnum = kcore_nphdr,
};
tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
@@ -382,10 +379,10 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
}
/* ELF program headers. */
- if (buflen && *fpos < phdrs_offset + phdrs_len) {
+ if (buflen && *fpos < phdrs_offset + kcore_phdrs_len) {
struct elf_phdr *phdrs, *phdr;
- phdrs = kzalloc(phdrs_len, GFP_KERNEL);
+ phdrs = kzalloc(kcore_phdrs_len, GFP_KERNEL);
if (!phdrs) {
ret = -ENOMEM;
goto out;
@@ -393,13 +390,14 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
phdrs[0].p_type = PT_NOTE;
phdrs[0].p_offset = notes_offset;
- phdrs[0].p_filesz = notes_len;
+ phdrs[0].p_filesz = kcore_notes_len;
phdr = &phdrs[1];
list_for_each_entry(m, &kclist_head, list) {
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R | PF_W | PF_X;
- phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
+ phdr->p_offset = kc_vaddr_to_offset(m->addr)
+ + kcore_data_offset;
phdr->p_vaddr = (size_t)m->addr;
if (m->type == KCORE_RAM)
phdr->p_paddr = __pa(m->addr);
@@ -412,7 +410,8 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
phdr++;
}
- tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
+ tsz = min_t(size_t, buflen,
+ phdrs_offset + kcore_phdrs_len - *fpos);
if (copy_to_iter((char *)phdrs + *fpos - phdrs_offset, tsz,
iter) != tsz) {
kfree(phdrs);
@@ -426,7 +425,7 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
}
/* ELF note segment. */
- if (buflen && *fpos < notes_offset + notes_len) {
+ if (buflen && *fpos < notes_offset + kcore_notes_len) {
struct elf_prstatus prstatus = {};
struct elf_prpsinfo prpsinfo = {
.pr_sname = 'R',
@@ -438,7 +437,7 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
strscpy(prpsinfo.pr_psargs, saved_command_line,
sizeof(prpsinfo.pr_psargs));
- notes = kzalloc(notes_len, GFP_KERNEL);
+ notes = kzalloc(kcore_notes_len, GFP_KERNEL);
if (!notes) {
ret = -ENOMEM;
goto out;
@@ -459,9 +458,10 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
*/
append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
vmcoreinfo_data,
- min(vmcoreinfo_size, notes_len - i));
+ min(vmcoreinfo_size, kcore_notes_len - i));
- tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
+ tsz = min_t(size_t, buflen,
+ notes_offset + kcore_notes_len - *fpos);
if (copy_to_iter(notes + *fpos - notes_offset, tsz, iter) != tsz) {
kfree(notes);
ret = -EFAULT;
@@ -477,7 +477,7 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
* Check to see if our file offset matches with any of
* the addresses in the elf_phdr on our list.
*/
- start = kc_offset_to_vaddr(*fpos - data_offset);
+ start = kc_offset_to_vaddr(*fpos - kcore_data_offset);
if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
tsz = buflen;
@@ -626,7 +626,7 @@ skip:
out:
page_offline_thaw();
- up_read(&kclist_lock);
+ percpu_up_read(&kclist_lock);
if (ret)
return ret;
return orig_buflen - buflen;
@@ -663,6 +663,7 @@ static int release_kcore(struct inode *inode, struct file *file)
}
static const struct proc_ops kcore_proc_ops = {
+ .proc_flags = PROC_ENTRY_PERMANENT,
.proc_read_iter = read_kcore_iter,
.proc_open = open_kcore,
.proc_release = release_kcore,
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 8e159fc78c0a..c610224faf10 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -83,7 +83,7 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl
if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
res = ns_get_name(name, sizeof(name), task, ns_ops);
if (res >= 0)
- res = readlink_copy(buffer, buflen, name);
+ res = readlink_copy(buffer, buflen, name, strlen(name));
}
put_task_struct(task);
return res;
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 3d8a82cee63e..658bf199d424 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -404,6 +404,8 @@ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
if (!iov_iter_count(iter))
return acc;
}
+
+ cond_resched();
}
return acc;
diff --git a/fs/select.c b/fs/select.c
index e223d1fe9d55..7da531b1cf6b 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -786,7 +786,7 @@ static inline int get_sigset_argpack(struct sigset_argpack *to,
}
return 0;
Efault:
- user_access_end();
+ user_read_access_end();
return -EFAULT;
}
@@ -1355,7 +1355,7 @@ static inline int get_compat_sigset_argpack(struct compat_sigset_argpack *to,
}
return 0;
Efault:
- user_access_end();
+ user_read_access_end();
return -EFAULT;
}
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 6cb1e81993f8..7f1cacc89dbb 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -152,8 +152,17 @@ again:
spin_unlock(&ses->ses_lock);
rc = cifs_negotiate_protocol(0, ses, server);
- if (!rc)
+ if (!rc) {
rc = cifs_setup_session(0, ses, server, ses->local_nls);
+ if ((rc == -EACCES) || (rc == -EHOSTDOWN) || (rc == -EKEYREVOKED)) {
+ /*
+ * Try alternate password for next reconnect if an alternate
+ * password is available.
+ */
+ if (ses->password2)
+ swap(ses->password2, ses->password);
+ }
+ }
/* do we need to reconnect tcon? */
if (rc || !tcon->need_reconnect) {
@@ -1258,14 +1267,6 @@ openRetry:
return rc;
}
-static void cifs_readv_worker(struct work_struct *work)
-{
- struct cifs_io_subrequest *rdata =
- container_of(work, struct cifs_io_subrequest, subreq.work);
-
- netfs_read_subreq_terminated(&rdata->subreq, rdata->result, false);
-}
-
static void
cifs_readv_callback(struct mid_q_entry *mid)
{
@@ -1330,11 +1331,13 @@ cifs_readv_callback(struct mid_q_entry *mid)
} else if (rdata->got_bytes > 0) {
__set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
}
+ if (rdata->got_bytes)
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
}
rdata->credits.value = 0;
+ rdata->subreq.error = rdata->result;
rdata->subreq.transferred += rdata->got_bytes;
- INIT_WORK(&rdata->subreq.work, cifs_readv_worker);
queue_work(cifsiod_wq, &rdata->subreq.work);
release_mid(mid);
add_credits(server, &credits, 0);
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index ddcc9e514a0e..eaa6be4456d0 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -1044,6 +1044,7 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
/* Release netns reference for this server. */
put_net(cifs_net_ns(server));
kfree(server->leaf_fullpath);
+ kfree(server->hostname);
kfree(server);
length = atomic_dec_return(&tcpSesAllocCount);
@@ -1670,8 +1671,6 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
kfree_sensitive(server->session_key.response);
server->session_key.response = NULL;
server->session_key.len = 0;
- kfree(server->hostname);
- server->hostname = NULL;
task = xchg(&server->tsk, NULL);
if (task)
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 3b2d33291a7e..79de2f2f9c41 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -227,7 +227,8 @@ static void cifs_issue_read(struct netfs_io_subrequest *subreq)
return;
failed:
- netfs_read_subreq_terminated(subreq, rc, false);
+ subreq->error = rc;
+ netfs_read_subreq_terminated(subreq);
}
/*
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 87cb1872db28..7121d9e0f404 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -4388,7 +4388,7 @@ static struct folio_queue *cifs_alloc_folioq_buffer(ssize_t size)
p = kmalloc(sizeof(*p), GFP_NOFS);
if (!p)
goto nomem;
- folioq_init(p);
+ folioq_init(p, 0);
if (tail) {
tail->next = p;
p->prev = tail;
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 0577556f0a41..9f54596a6866 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -4500,14 +4500,6 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
return rc;
}
-static void smb2_readv_worker(struct work_struct *work)
-{
- struct cifs_io_subrequest *rdata =
- container_of(work, struct cifs_io_subrequest, subreq.work);
-
- netfs_read_subreq_terminated(&rdata->subreq, rdata->result, false);
-}
-
static void
smb2_readv_callback(struct mid_q_entry *mid)
{
@@ -4615,16 +4607,17 @@ smb2_readv_callback(struct mid_q_entry *mid)
__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
rdata->result = 0;
}
- __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
+ if (rdata->got_bytes)
+ __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
}
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
server->credits, server->in_flight,
0, cifs_trace_rw_credits_read_response_clear);
rdata->credits.value = 0;
+ rdata->subreq.error = rdata->result;
rdata->subreq.transferred += rdata->got_bytes;
trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress);
- INIT_WORK(&rdata->subreq.work, smb2_readv_worker);
- queue_work(cifsiod_wq, &rdata->subreq.work);
+ netfs_read_subreq_terminated(&rdata->subreq);
release_mid(mid);
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
server->credits, server->in_flight,
diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
index f51cd0816b1a..425c756bcfb8 100644
--- a/fs/smb/server/smb_common.c
+++ b/fs/smb/server/smb_common.c
@@ -781,10 +781,6 @@ int __ksmbd_override_fsids(struct ksmbd_work *work,
WARN_ON(work->saved_cred);
work->saved_cred = override_creds(cred);
- if (!work->saved_cred) {
- abort_creds(cred);
- return -EINVAL;
- }
return 0;
}
@@ -796,13 +792,11 @@ int ksmbd_override_fsids(struct ksmbd_work *work)
void ksmbd_revert_fsids(struct ksmbd_work *work)
{
const struct cred *cred;
-
WARN_ON(!work->saved_cred);
- cred = current_cred();
- revert_creds(work->saved_cred);
- put_cred(cred);
+ cred = revert_creds(work->saved_cred);
work->saved_cred = NULL;
+ put_cred(cred);
}
__le32 smb_map_generic_desired_access(__le32 daccess)
diff --git a/fs/stat.c b/fs/stat.c
index 0870e969a8a0..2c0e111a098a 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -725,6 +725,7 @@ cp_statx(const struct kstat *stat, struct statx __user *buffer)
tmp.stx_mnt_id = stat->mnt_id;
tmp.stx_dio_mem_align = stat->dio_mem_align;
tmp.stx_dio_offset_align = stat->dio_offset_align;
+ tmp.stx_dio_read_offset_align = stat->dio_read_offset_align;
tmp.stx_subvol = stat->subvol;
tmp.stx_atomic_write_unit_min = stat->atomic_write_unit_min;
tmp.stx_atomic_write_unit_max = stat->atomic_write_unit_max;
diff --git a/fs/vboxsf/Kconfig b/fs/vboxsf/Kconfig
index b84586ae08b3..d4694026db8b 100644
--- a/fs/vboxsf/Kconfig
+++ b/fs/vboxsf/Kconfig
@@ -1,6 +1,6 @@
config VBOXSF_FS
tristate "VirtualBox guest shared folder (vboxsf) support"
- depends on X86 && VBOXGUEST
+ depends on (ARM64 || X86) && VBOXGUEST
select NLS
help
VirtualBox hosts can share folders with guests, this driver
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 0789c18aaa18..f95103325318 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1204,7 +1204,16 @@ xfs_file_ioctl(
struct xfs_buftarg *target = xfs_inode_buftarg(ip);
struct dioattr da;
- da.d_mem = da.d_miniosz = target->bt_logical_sectorsize;
+ da.d_mem = target->bt_logical_sectorsize;
+
+ /*
+ * See xfs_report_dioalign() for an explanation about why this
+ * reports a value larger than the sector size for COW inodes.
+ */
+ if (xfs_is_cow_inode(ip))
+ da.d_miniosz = xfs_inode_alloc_unitsize(ip);
+ else
+ da.d_miniosz = target->bt_logical_sectorsize;
da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
if (copy_to_user(arg, &da, sizeof(da)))
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 207e0dadffc3..40289fe6f5b2 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -573,17 +573,43 @@ xfs_stat_blksize(
}
static void
-xfs_get_atomic_write_attr(
+xfs_report_dioalign(
struct xfs_inode *ip,
- unsigned int *unit_min,
- unsigned int *unit_max)
+ struct kstat *stat)
{
- if (!xfs_inode_can_atomicwrite(ip)) {
- *unit_min = *unit_max = 0;
- return;
- }
+ struct xfs_buftarg *target = xfs_inode_buftarg(ip);
+ struct block_device *bdev = target->bt_bdev;
+
+ stat->result_mask |= STATX_DIOALIGN | STATX_DIO_READ_ALIGN;
+ stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
- *unit_min = *unit_max = ip->i_mount->m_sb.sb_blocksize;
+ /*
+ * For COW inodes, we can only perform out of place writes of entire
+ * allocation units (blocks or RT extents).
+ * For writes smaller than the allocation unit, we must fall back to
+ * buffered I/O to perform read-modify-write cycles. At best this is
+ * highly inefficient; at worst it leads to page cache invalidation
+ * races. Tell applications to avoid this by reporting the larger write
+ * alignment in dio_offset_align, and the smaller read alignment in
+ * dio_read_offset_align.
+ */
+ stat->dio_read_offset_align = bdev_logical_block_size(bdev);
+ if (xfs_is_cow_inode(ip))
+ stat->dio_offset_align = xfs_inode_alloc_unitsize(ip);
+ else
+ stat->dio_offset_align = stat->dio_read_offset_align;
+}
+
+static void
+xfs_report_atomic_write(
+ struct xfs_inode *ip,
+ struct kstat *stat)
+{
+ unsigned int unit_min = 0, unit_max = 0;
+
+ if (xfs_inode_can_atomicwrite(ip))
+ unit_min = unit_max = ip->i_mount->m_sb.sb_blocksize;
+ generic_fill_statx_atomic_writes(stat, unit_min, unit_max);
}
STATIC int
@@ -647,22 +673,10 @@ xfs_vn_getattr(
stat->rdev = inode->i_rdev;
break;
case S_IFREG:
- if (request_mask & STATX_DIOALIGN) {
- struct xfs_buftarg *target = xfs_inode_buftarg(ip);
- struct block_device *bdev = target->bt_bdev;
-
- stat->result_mask |= STATX_DIOALIGN;
- stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
- stat->dio_offset_align = bdev_logical_block_size(bdev);
- }
- if (request_mask & STATX_WRITE_ATOMIC) {
- unsigned int unit_min, unit_max;
-
- xfs_get_atomic_write_attr(ip, &unit_min,
- &unit_max);
- generic_fill_statx_atomic_writes(stat,
- unit_min, unit_max);
- }
+ if (request_mask & (STATX_DIOALIGN | STATX_DIO_READ_ALIGN))
+ xfs_report_dioalign(ip, stat);
+ if (request_mask & STATX_WRITE_ATOMIC)
+ xfs_report_atomic_write(ip, stat);
fallthrough;
default:
stat->blksize = xfs_stat_blksize(ip);
diff --git a/include/linux/cred.h b/include/linux/cred.h
index e4a3155fe409..0c3c4b16b469 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -155,8 +155,6 @@ extern struct cred *prepare_creds(void);
extern struct cred *prepare_exec_creds(void);
extern int commit_creds(struct cred *);
extern void abort_creds(struct cred *);
-extern const struct cred *override_creds(const struct cred *);
-extern void revert_creds(const struct cred *);
extern struct cred *prepare_kernel_cred(struct task_struct *);
extern int set_security_override(struct cred *, u32);
extern int set_security_override_from_ctx(struct cred *, const char *);
@@ -172,12 +170,7 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
cred->cap_inheritable));
}
-/*
- * Override creds without bumping reference count. Caller must ensure
- * reference remains valid or has taken reference. Almost always not the
- * interface you want. Use override_creds()/revert_creds() instead.
- */
-static inline const struct cred *override_creds_light(const struct cred *override_cred)
+static inline const struct cred *override_creds(const struct cred *override_cred)
{
const struct cred *old = current->cred;
@@ -185,35 +178,12 @@ static inline const struct cred *override_creds_light(const struct cred *overrid
return old;
}
-static inline void revert_creds_light(const struct cred *revert_cred)
-{
- rcu_assign_pointer(current->cred, revert_cred);
-}
-
-/**
- * get_new_cred_many - Get references on a new set of credentials
- * @cred: The new credentials to reference
- * @nr: Number of references to acquire
- *
- * Get references on the specified set of new credentials. The caller must
- * release all acquired references.
- */
-static inline struct cred *get_new_cred_many(struct cred *cred, int nr)
+static inline const struct cred *revert_creds(const struct cred *revert_cred)
{
- atomic_long_add(nr, &cred->usage);
- return cred;
-}
+ const struct cred *override_cred = current->cred;
-/**
- * get_new_cred - Get a reference on a new set of credentials
- * @cred: The new credentials to reference
- *
- * Get a reference on the specified set of new credentials. The caller must
- * release the reference.
- */
-static inline struct cred *get_new_cred(struct cred *cred)
-{
- return get_new_cred_many(cred, 1);
+ rcu_assign_pointer(current->cred, revert_cred);
+ return override_cred;
}
/**
@@ -236,7 +206,8 @@ static inline const struct cred *get_cred_many(const struct cred *cred, int nr)
if (!cred)
return cred;
nonconst_cred->non_rcu = 0;
- return get_new_cred_many(nonconst_cred, nr);
+ atomic_long_add(nr, &nonconst_cred->usage);
+ return cred;
}
/*
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 4cc8801e50e3..a087606ace19 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -3,6 +3,7 @@
#define LINUX_EXPORTFS_H 1
#include <linux/types.h>
+#include <linux/path.h>
struct dentry;
struct iattr;
@@ -156,6 +157,17 @@ struct fid {
};
};
+enum handle_to_path_flags {
+ HANDLE_CHECK_PERMS = (1 << 0),
+ HANDLE_CHECK_SUBTREE = (1 << 1),
+};
+
+struct handle_to_path_ctx {
+ struct path root;
+ enum handle_to_path_flags flags;
+ unsigned int fh_flags;
+};
+
#define EXPORT_FH_CONNECTABLE 0x1 /* Encode file handle with parent */
#define EXPORT_FH_FID 0x2 /* File handle may be non-decodeable */
#define EXPORT_FH_DIR_ONLY 0x4 /* Only decode file handle for a directory */
@@ -225,6 +237,12 @@ struct fid {
* is also a directory. In the event that it cannot be found, or storage
* space cannot be allocated, a %ERR_PTR should be returned.
*
+ * permission:
+ * Allow filesystems to specify a custom permission function.
+ *
+ * open:
+ * Allow filesystems to specify a custom open function.
+ *
* commit_metadata:
* @commit_metadata should commit metadata changes to stable storage.
*
@@ -251,6 +269,8 @@ struct export_operations {
bool write, u32 *device_generation);
int (*commit_blocks)(struct inode *inode, struct iomap *iomaps,
int nr_iomaps, struct iattr *iattr);
+ int (*permission)(struct handle_to_path_ctx *ctx, unsigned int oflags);
+ struct file * (*open)(struct path *path, unsigned int oflags);
#define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */
#define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */
#define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h
index c50882f19235..966092ffa89a 100644
--- a/include/linux/fiemap.h
+++ b/include/linux/fiemap.h
@@ -5,12 +5,18 @@
#include <uapi/linux/fiemap.h>
#include <linux/fs.h>
+/**
+ * struct fiemap_extent_info - fiemap request to a filesystem
+ * @fi_flags: Flags as passed from user
+ * @fi_extents_mapped: Number of mapped extents
+ * @fi_extents_max: Size of fiemap_extent array
+ * @fi_extents_start: Start of fiemap_extent array
+ */
struct fiemap_extent_info {
- unsigned int fi_flags; /* Flags as passed from user */
- unsigned int fi_extents_mapped; /* Number of mapped extents */
- unsigned int fi_extents_max; /* Size of fiemap_extent array */
- struct fiemap_extent __user *fi_extents_start; /* Start of
- fiemap_extent array */
+ unsigned int fi_flags;
+ unsigned int fi_extents_mapped;
+ unsigned int fi_extents_max;
+ struct fiemap_extent __user *fi_extents_start;
};
int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h
index 3abe614ef5f0..4d3f8074c137 100644
--- a/include/linux/folio_queue.h
+++ b/include/linux/folio_queue.h
@@ -37,16 +37,20 @@ struct folio_queue {
#if PAGEVEC_SIZE > BITS_PER_LONG
#error marks is not big enough
#endif
+ unsigned int rreq_id;
+ unsigned int debug_id;
};
/**
* folioq_init - Initialise a folio queue segment
* @folioq: The segment to initialise
+ * @rreq_id: The request identifier to use in tracelines.
*
- * Initialise a folio queue segment. Note that the folio pointers are
- * left uninitialised.
+ * Initialise a folio queue segment and set an identifier to be used in traces.
+ *
+ * Note that the folio pointers are left uninitialised.
*/
-static inline void folioq_init(struct folio_queue *folioq)
+static inline void folioq_init(struct folio_queue *folioq, unsigned int rreq_id)
{
folio_batch_init(&folioq->vec);
folioq->next = NULL;
@@ -54,6 +58,8 @@ static inline void folioq_init(struct folio_queue *folioq)
folioq->marks = 0;
folioq->marks2 = 0;
folioq->marks3 = 0;
+ folioq->rreq_id = rreq_id;
+ folioq->debug_id = 0;
}
/**
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7e29433c5ecc..2b8447f53200 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -322,6 +322,7 @@ struct readahead_control;
#define IOCB_NOWAIT (__force int) RWF_NOWAIT
#define IOCB_APPEND (__force int) RWF_APPEND
#define IOCB_ATOMIC (__force int) RWF_ATOMIC
+#define IOCB_DONTCACHE (__force int) RWF_DONTCACHE
/* non-RWF related bits - start at 16 */
#define IOCB_EVENTFD (1 << 16)
@@ -356,7 +357,8 @@ struct readahead_control;
{ IOCB_SYNC, "SYNC" }, \
{ IOCB_NOWAIT, "NOWAIT" }, \
{ IOCB_APPEND, "APPEND" }, \
- { IOCB_ATOMIC, "ATOMIC"}, \
+ { IOCB_ATOMIC, "ATOMIC" }, \
+ { IOCB_DONTCACHE, "DONTCACHE" }, \
{ IOCB_EVENTFD, "EVENTFD"}, \
{ IOCB_DIRECT, "DIRECT" }, \
{ IOCB_WRITE, "WRITE" }, \
@@ -626,6 +628,7 @@ is_uncached_acl(struct posix_acl *acl)
#define IOP_XATTR 0x0008
#define IOP_DEFAULT_READLINK 0x0010
#define IOP_MGTIME 0x0020
+#define IOP_CACHED_LINK 0x0040
/*
* Keep mostly read-only and often accessed (especially for
@@ -723,7 +726,10 @@ struct inode {
};
struct file_lock_context *i_flctx;
struct address_space i_data;
- struct list_head i_devices;
+ union {
+ struct list_head i_devices;
+ int i_linklen;
+ };
union {
struct pipe_inode_info *i_pipe;
struct cdev *i_cdev;
@@ -749,6 +755,13 @@ struct inode {
void *i_private; /* fs or device private pointer */
} __randomize_layout;
+static inline void inode_set_cached_link(struct inode *inode, char *link, int linklen)
+{
+ inode->i_link = link;
+ inode->i_linklen = linklen;
+ inode->i_opflags |= IOP_CACHED_LINK;
+}
+
/*
* Get bit address from inode->i_state to use with wait_var_event()
* infrastructre.
@@ -2127,6 +2140,8 @@ struct file_operations {
#define FOP_UNSIGNED_OFFSET ((__force fop_flags_t)(1 << 5))
/* Supports asynchronous lock callbacks */
#define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6))
+/* File system supports uncached read/write buffered IO */
+#define FOP_DONTCACHE ((__force fop_flags_t)(1 << 7))
/* Wrap a directory iterator that needs exclusive inode access */
int wrap_directory_iterator(struct file *, struct dir_context *,
@@ -3351,7 +3366,7 @@ extern const struct file_operations generic_ro_fops;
#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
-extern int readlink_copy(char __user *, int, const char *);
+extern int readlink_copy(char __user *, int, const char *, int);
extern int page_readlink(struct dentry *, char __user *, int);
extern const char *page_get_link(struct dentry *, struct inode *,
struct delayed_call *);
@@ -3468,7 +3483,6 @@ struct offset_ctx {
void simple_offset_init(struct offset_ctx *octx);
int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
-int simple_offset_empty(struct dentry *dentry);
int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
int simple_offset_rename_exchange(struct inode *old_dir,
@@ -3614,6 +3628,14 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags,
if (!(ki->ki_filp->f_mode & FMODE_CAN_ATOMIC_WRITE))
return -EOPNOTSUPP;
}
+ if (flags & RWF_DONTCACHE) {
+ /* file system must support it */
+ if (!(ki->ki_filp->f_op->fop_flags & FOP_DONTCACHE))
+ return -EOPNOTSUPP;
+ /* DAX mappings not supported */
+ if (IS_DAX(ki->ki_filp->f_mapping->host))
+ return -EOPNOTSUPP;
+ }
kiocb_flags |= (__force int) (flags & RWF_SUPPORTED);
if (flags & RWF_SYNC)
kiocb_flags |= IOCB_DSYNC;
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 7ef5f7ef31a9..f7bfdcf0dda3 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -386,6 +386,7 @@ extern void __init hrtimers_init(void);
extern void sysrq_timer_list_show(void);
int hrtimers_prepare_cpu(unsigned int cpu);
+int hrtimers_cpu_starting(unsigned int cpu);
#ifdef CONFIG_HOTPLUG_CPU
int hrtimers_cpu_dying(unsigned int cpu);
#else
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index c3a1f78bc884..c39f119659ba 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -34,14 +34,24 @@ struct lockref {
};
};
-extern void lockref_get(struct lockref *);
-extern int lockref_put_return(struct lockref *);
-extern int lockref_get_not_zero(struct lockref *);
-extern int lockref_put_not_zero(struct lockref *);
-extern int lockref_put_or_lock(struct lockref *);
-
-extern void lockref_mark_dead(struct lockref *);
-extern int lockref_get_not_dead(struct lockref *);
+/**
+ * lockref_init - Initialize a lockref
+ * @lockref: pointer to lockref structure
+ * @count: initial count
+ */
+static inline void lockref_init(struct lockref *lockref, unsigned int count)
+{
+ spin_lock_init(&lockref->lock);
+ lockref->count = count;
+}
+
+void lockref_get(struct lockref *lockref);
+int lockref_put_return(struct lockref *lockref);
+bool lockref_get_not_zero(struct lockref *lockref);
+bool lockref_put_or_lock(struct lockref *lockref);
+
+void lockref_mark_dead(struct lockref *lockref);
+bool lockref_get_not_dead(struct lockref *lockref);
/* Must be called under spinlock for reliable results */
static inline bool __lockref_is_dead(const struct lockref *l)
diff --git a/include/linux/module.h b/include/linux/module.h
index 94acbacdcdf1..b3a643435357 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -773,7 +773,8 @@ void *__module_writable_address(struct module *mod, void *loc);
static inline void *module_writable_address(struct module *mod, void *loc)
{
- if (!IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX) || !mod)
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX) || !mod ||
+ mod->state != MODULE_STATE_UNFORMED)
return loc;
return __module_writable_address(mod, loc);
}
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 04213d8ef837..dcc17ce8a959 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -75,7 +75,7 @@ struct vfsmount {
static inline struct mnt_idmap *mnt_idmap(const struct vfsmount *mnt)
{
/* Pairs with smp_store_release() in do_idmap_mount(). */
- return smp_load_acquire(&mnt->mnt_idmap);
+ return READ_ONCE(mnt->mnt_idmap);
}
extern int mnt_want_write(struct vfsmount *mnt);
@@ -113,7 +113,7 @@ extern struct vfsmount *kern_mount(struct file_system_type *);
extern void kern_unmount(struct vfsmount *mnt);
extern int may_umount_tree(struct vfsmount *);
extern int may_umount(struct vfsmount *);
-extern long do_mount(const char *, const char __user *,
+int do_mount(const char *, const char __user *,
const char *, unsigned long, void *);
extern struct vfsmount *collect_mounts(const struct path *);
extern void drop_collected_mounts(struct vfsmount *);
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index ecdd5ced16a8..071d05d81d38 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -18,9 +18,11 @@
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/uio.h>
+#include <linux/rolling_buffer.h>
enum netfs_sreq_ref_trace;
typedef struct mempool_s mempool_t;
+struct folio_queue;
/**
* folio_start_private_2 - Start an fscache write on a folio. [DEPRECATED]
@@ -71,6 +73,7 @@ struct netfs_inode {
#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
#define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */
#define NETFS_ICTX_MODIFIED_ATTR 3 /* Indicate change in mtime/ctime */
+#define NETFS_ICTX_SINGLE_NO_UPLOAD 4 /* Monolithic payload, cache but no upload */
};
/*
@@ -178,9 +181,6 @@ struct netfs_io_subrequest {
unsigned long long start; /* Where to start the I/O */
size_t len; /* Size of the I/O */
size_t transferred; /* Amount of data transferred */
- size_t consumed; /* Amount of read data consumed */
- size_t prev_donated; /* Amount of data donated from previous subreq */
- size_t next_donated; /* Amount of data donated from next subreq */
refcount_t ref;
short error; /* 0 or error that occurred */
unsigned short debug_index; /* Index in list (for debugging output) */
@@ -188,9 +188,6 @@ struct netfs_io_subrequest {
u8 retry_count; /* The number of retries (0 on initial pass) */
enum netfs_io_source source; /* Where to read from/write to */
unsigned char stream_nr; /* I/O stream this belongs to */
- unsigned char curr_folioq_slot; /* Folio currently being read */
- unsigned char curr_folio_order; /* Order of folio */
- struct folio_queue *curr_folioq; /* Queue segment in which current folio resides */
unsigned long flags;
#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
@@ -208,9 +205,11 @@ enum netfs_io_origin {
NETFS_READAHEAD, /* This read was triggered by readahead */
NETFS_READPAGE, /* This read is a synchronous read */
NETFS_READ_GAPS, /* This read is a synchronous read to fill gaps */
+ NETFS_READ_SINGLE, /* This read should be treated as a single object */
NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
NETFS_DIO_READ, /* This is a direct I/O read */
NETFS_WRITEBACK, /* This write was triggered by writepages */
+ NETFS_WRITEBACK_SINGLE, /* This monolithic write was triggered by writepages */
NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
NETFS_DIO_WRITE, /* This is a direct I/O write */
@@ -231,16 +230,16 @@ struct netfs_io_request {
struct address_space *mapping; /* The mapping being accessed */
struct kiocb *iocb; /* AIO completion vector */
struct netfs_cache_resources cache_resources;
+ struct netfs_io_request *copy_to_cache; /* Request to write just-read data to the cache */
struct readahead_control *ractl; /* Readahead descriptor */
struct list_head proc_link; /* Link in netfs_iorequests */
- struct list_head subrequests; /* Contributory I/O operations */
struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */
#define NR_IO_STREAMS 2 //wreq->nr_io_streams
struct netfs_group *group; /* Writeback group being written back */
- struct folio_queue *buffer; /* Head of I/O buffer */
- struct folio_queue *buffer_tail; /* Tail of I/O buffer */
- struct iov_iter iter; /* Unencrypted-side iterator */
- struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */
+ struct rolling_buffer buffer; /* Unencrypted buffer */
+#define NETFS_ROLLBUF_PUT_MARK ROLLBUF_MARK_1
+#define NETFS_ROLLBUF_PAGECACHE_MARK ROLLBUF_MARK_2
+ wait_queue_head_t waitq; /* Processor waiter */
void *netfs_priv; /* Private data for the netfs */
void *netfs_priv2; /* Private data for the netfs */
struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */
@@ -251,28 +250,28 @@ struct netfs_io_request {
atomic_t subreq_counter; /* Next subreq->debug_index */
unsigned int nr_group_rel; /* Number of refs to release on ->group */
spinlock_t lock; /* Lock for queuing subreqs */
- atomic_t nr_outstanding; /* Number of ops in progress */
unsigned long long submitted; /* Amount submitted for I/O so far */
unsigned long long len; /* Length of the request */
size_t transferred; /* Amount to be indicated as transferred */
long error; /* 0 or error that occurred */
enum netfs_io_origin origin; /* Origin of the request */
bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
- u8 buffer_head_slot; /* First slot in ->buffer */
- u8 buffer_tail_slot; /* Next slot in ->buffer_tail */
unsigned long long i_size; /* Size of the file */
unsigned long long start; /* Start position */
atomic64_t issued_to; /* Write issuer folio cursor */
unsigned long long collected_to; /* Point we've collected to */
unsigned long long cleaned_to; /* Position we've cleaned folios to */
+ unsigned long long abandon_to; /* Position to abandon folios to */
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
- size_t prev_donated; /* Fallback for subreq->prev_donated */
+ unsigned char front_folio_order; /* Order (size) of front folio */
refcount_t ref;
unsigned long flags;
+#define NETFS_RREQ_OFFLOAD_COLLECTION 0 /* Offload collection to workqueue */
#define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */
#define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */
#define NETFS_RREQ_FAILED 4 /* The request failed */
#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
+#define NETFS_RREQ_FOLIO_COPY_TO_CACHE 6 /* Copy current folio to cache from read */
#define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */
#define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */
#define NETFS_RREQ_BLOCKED 10 /* We blocked */
@@ -409,6 +408,13 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
struct netfs_group *netfs_group);
ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from);
+/* Single, monolithic object read/write API. */
+void netfs_single_mark_inode_dirty(struct inode *inode);
+ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter);
+int netfs_writeback_single(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct iov_iter *iter);
+
/* Address operations API */
struct readahead_control;
void netfs_readahead(struct readahead_control *);
@@ -428,10 +434,8 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp);
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
/* (Sub)request management API. */
-void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq,
- bool was_async);
-void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq,
- int error, bool was_async);
+void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq);
+void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq);
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
enum netfs_sreq_ref_trace what);
void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
@@ -453,6 +457,18 @@ void netfs_end_io_write(struct inode *inode);
int netfs_start_io_direct(struct inode *inode);
void netfs_end_io_direct(struct inode *inode);
+/* Miscellaneous APIs. */
+struct folio_queue *netfs_folioq_alloc(unsigned int rreq_id, gfp_t gfp,
+ unsigned int trace /*enum netfs_folioq_trace*/);
+void netfs_folioq_free(struct folio_queue *folioq,
+ unsigned int trace /*enum netfs_trace_folioq*/);
+
+/* Buffer wrangling helpers API. */
+int netfs_alloc_folioq_buffer(struct address_space *mapping,
+ struct folio_queue **_buffer,
+ size_t *_cur_size, ssize_t size, gfp_t gfp);
+void netfs_free_folioq_buffer(struct folio_queue *fq);
+
/**
* netfs_inode - Get the netfs inode context from the inode
* @inode: The inode to query
diff --git a/include/linux/pid.h b/include/linux/pid.h
index a3aad9b4074c..98837a1ff0f3 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -59,6 +59,7 @@ struct pid
spinlock_t lock;
struct dentry *stashed;
u64 ino;
+ struct rb_node pidfs_node;
/* lists of tasks that use this pid */
struct hlist_head tasks[PIDTYPE_MAX];
struct hlist_head inodes;
@@ -68,6 +69,7 @@ struct pid
struct upid numbers[];
};
+extern seqcount_spinlock_t pidmap_lock_seq;
extern struct pid init_struct_pid;
struct file;
@@ -106,9 +108,6 @@ extern void exchange_tids(struct task_struct *task, struct task_struct *old);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
-extern int pid_max;
-extern int pid_max_min, pid_max_max;
-
/*
* look up a PID in the hash table. Must be called with the tasklist_lock
* or rcu_read_lock() held.
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index f9f9931e02d6..7c67a5811199 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -30,6 +30,7 @@ struct pid_namespace {
struct task_struct *child_reaper;
struct kmem_cache *pid_cachep;
unsigned int level;
+ int pid_max;
struct pid_namespace *parent;
#ifdef CONFIG_BSD_PROCESS_ACCT
struct fs_pin *bacct;
@@ -38,9 +39,14 @@ struct pid_namespace {
struct ucounts *ucounts;
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+ struct work_struct work;
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_set set;
+ struct ctl_table_header *sysctls;
+#if defined(CONFIG_MEMFD_CREATE)
int memfd_noexec_scope;
#endif
+#endif
} __randomize_layout;
extern struct pid_namespace init_pid_ns;
@@ -117,6 +123,8 @@ static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
void pidhash_init(void);
void pid_idr_init(void);
+int register_pidns_sysctls(struct pid_namespace *pidns);
+void unregister_pidns_sysctls(struct pid_namespace *pidns);
static inline bool task_is_in_init_pid_ns(struct task_struct *tsk)
{
diff --git a/include/linux/pidfs.h b/include/linux/pidfs.h
index 75bdf9807802..7c830d0dec9a 100644
--- a/include/linux/pidfs.h
+++ b/include/linux/pidfs.h
@@ -4,5 +4,8 @@
struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags);
void __init pidfs_init(void);
+void pidfs_add_pid(struct pid *pid);
+void pidfs_remove_pid(struct pid *pid);
+extern const struct dentry_operations pidfs_dentry_operations;
#endif /* _LINUX_PID_FS_H */
diff --git a/include/linux/pruss_driver.h b/include/linux/pruss_driver.h
index c9a31c567e85..2e18fef1a2e1 100644
--- a/include/linux/pruss_driver.h
+++ b/include/linux/pruss_driver.h
@@ -144,32 +144,32 @@ static inline int pruss_release_mem_region(struct pruss *pruss,
static inline int pruss_cfg_get_gpmux(struct pruss *pruss,
enum pruss_pru_id pru_id, u8 *mux)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
static inline int pruss_cfg_set_gpmux(struct pruss *pruss,
enum pruss_pru_id pru_id, u8 mux)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
static inline int pruss_cfg_gpimode(struct pruss *pruss,
enum pruss_pru_id pru_id,
enum pruss_gpi_mode mode)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
static inline int pruss_cfg_miirt_enable(struct pruss *pruss, bool enable)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
static inline int pruss_cfg_xfr_enable(struct pruss *pruss,
enum pru_type pru_type,
- bool enable);
+ bool enable)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
#endif /* CONFIG_TI_PRUSS */
diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h
index 730f77381d55..2503f7625d65 100644
--- a/include/linux/pseudo_fs.h
+++ b/include/linux/pseudo_fs.h
@@ -5,6 +5,7 @@
struct pseudo_fs_context {
const struct super_operations *ops;
+ const struct export_operations *eops;
const struct xattr_handler * const *xattr;
const struct dentry_operations *dops;
unsigned long magic;
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 14dfa6008467..1b11926ddd47 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -30,6 +30,17 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
* way, we must not access it directly
*/
#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
+/*
+ * Return the ->prev pointer of a list_head in an rcu safe way. Don't
+ * access it directly.
+ *
+ * Any list traversed with list_bidir_prev_rcu() must never use
+ * list_del_rcu(). Doing so will poison the ->prev pointer that
+ * list_bidir_prev_rcu() relies on, which will result in segfaults.
+ * To prevent these segfaults, use list_bidir_del_rcu() instead
+ * of list_del_rcu().
+ */
+#define list_bidir_prev_rcu(list) (*((struct list_head __rcu **)(&(list)->prev)))
/**
* list_tail_rcu - returns the prev pointer of the head of the list
@@ -159,6 +170,39 @@ static inline void list_del_rcu(struct list_head *entry)
}
/**
+ * list_bidir_del_rcu - deletes entry from list without re-initialization
+ * @entry: the element to delete from the list.
+ *
+ * In contrast to list_del_rcu() doesn't poison the prev pointer thus
+ * allowing backwards traversal via list_bidir_prev_rcu().
+ *
+ * Note: list_empty() on entry does not return true after this because
+ * the entry is in a special undefined state that permits RCU-based
+ * lockfree reverse traversal. In particular this means that we can not
+ * poison the forward and backwards pointers that may still be used for
+ * walking the list.
+ *
+ * The caller must take whatever precautions are necessary (such as
+ * holding appropriate locks) to avoid racing with another list-mutation
+ * primitive, such as list_bidir_del_rcu() or list_add_rcu(), running on
+ * this same list. However, it is perfectly legal to run concurrently
+ * with the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ *
+ * Note that list_del_rcu() and list_bidir_del_rcu() must not be used on
+ * the same list.
+ *
+ * Note that the caller is not permitted to immediately free
+ * the newly deleted entry. Instead, either synchronize_rcu()
+ * or call_rcu() must be used to defer freeing until an RCU
+ * grace period has elapsed.
+ */
+static inline void list_bidir_del_rcu(struct list_head *entry)
+{
+ __list_del_entry(entry);
+}
+
+/**
* hlist_del_init_rcu - deletes entry from hash list with re-initialization
* @n: the element to delete from the hash list.
*
diff --git a/include/linux/rolling_buffer.h b/include/linux/rolling_buffer.h
new file mode 100644
index 000000000000..ac15b1ffdd83
--- /dev/null
+++ b/include/linux/rolling_buffer.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Rolling buffer of folios
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _ROLLING_BUFFER_H
+#define _ROLLING_BUFFER_H
+
+#include <linux/folio_queue.h>
+#include <linux/uio.h>
+
+/*
+ * Rolling buffer. Whilst the buffer is live and in use, folios and folio
+ * queue segments can be added to one end by one thread and removed from the
+ * other end by another thread. The buffer isn't allowed to be empty; it must
+ * always have at least one folio_queue in it so that neither side has to
+ * modify both queue pointers.
+ *
+ * The iterator in the buffer is extended as buffers are inserted. It can be
+ * snapshotted to use a segment of the buffer.
+ */
+struct rolling_buffer {
+ struct folio_queue *head; /* Producer's insertion point */
+ struct folio_queue *tail; /* Consumer's removal point */
+ struct iov_iter iter; /* Iterator tracking what's left in the buffer */
+ u8 next_head_slot; /* Next slot in ->head */
+ u8 first_tail_slot; /* First slot in ->tail */
+};
+
+/*
+ * Snapshot of a rolling buffer.
+ */
+struct rolling_buffer_snapshot {
+ struct folio_queue *curr_folioq; /* Queue segment in which current folio resides */
+ unsigned char curr_slot; /* Folio currently being read */
+ unsigned char curr_order; /* Order of folio */
+};
+
+/* Marks to store per-folio in the internal folio_queue structs. */
+#define ROLLBUF_MARK_1 BIT(0)
+#define ROLLBUF_MARK_2 BIT(1)
+
+int rolling_buffer_init(struct rolling_buffer *roll, unsigned int rreq_id,
+ unsigned int direction);
+int rolling_buffer_make_space(struct rolling_buffer *roll);
+ssize_t rolling_buffer_load_from_ra(struct rolling_buffer *roll,
+ struct readahead_control *ractl,
+ struct folio_batch *put_batch);
+ssize_t rolling_buffer_append(struct rolling_buffer *roll, struct folio *folio,
+ unsigned int flags);
+struct folio_queue *rolling_buffer_delete_spent(struct rolling_buffer *roll);
+void rolling_buffer_clear(struct rolling_buffer *roll);
+
+static inline void rolling_buffer_advance(struct rolling_buffer *roll, size_t amount)
+{
+ iov_iter_advance(&roll->iter, amount);
+}
+
+#endif /* _ROLLING_BUFFER_H */
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 341980599c71..e45531455d3b 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -55,10 +55,10 @@ struct seccomp_data;
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
static inline int secure_computing(void) { return 0; }
-static inline int __secure_computing(const struct seccomp_data *sd) { return 0; }
#else
static inline void secure_computing_strict(int this_syscall) { return; }
#endif
+static inline int __secure_computing(const struct seccomp_data *sd) { return 0; }
static inline long prctl_get_seccomp(void)
{
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 5298765d6ca4..eb20dcaa51b5 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -272,7 +272,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
({ \
unsigned __seq; \
\
- while ((__seq = seqprop_sequence(s)) & 1) \
+ while (unlikely((__seq = seqprop_sequence(s)) & 1)) \
cpu_relax(); \
\
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 3d900c86981c..9d8382e23a9c 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -52,6 +52,7 @@ struct kstat {
u64 mnt_id;
u32 dio_mem_align;
u32 dio_offset_align;
+ u32 dio_read_offset_align;
u64 change_cookie;
u64 subvol;
u32 atomic_write_unit_min;
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index cb40f1a1d081..75342022d144 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -247,6 +247,13 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma,
vma_is_shmem(vma);
}
+static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma)
+{
+ struct userfaultfd_ctx *uffd_ctx = vma->vm_userfaultfd_ctx.ctx;
+
+ return uffd_ctx && (uffd_ctx->features & UFFD_FEATURE_EVENT_REMAP) == 0;
+}
+
extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
extern void dup_userfaultfd_complete(struct list_head *);
void dup_userfaultfd_fail(struct list_head *);
@@ -402,6 +409,11 @@ static inline bool userfaultfd_wp_async(struct vm_area_struct *vma)
return false;
}
+static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma)
+{
+ return false;
+}
+
#endif /* CONFIG_USERFAULTFD */
static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index c858270141bc..c39a426ebf52 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -174,12 +174,4 @@ static inline void sk_mark_napi_id_once(struct sock *sk,
#endif
}
-static inline void sk_mark_napi_id_once_xdp(struct sock *sk,
- const struct xdp_buff *xdp)
-{
-#ifdef CONFIG_NET_RX_BUSY_POLL
- __sk_mark_napi_id_once(sk, xdp->rxq->napi_id);
-#endif
-}
-
#endif /* _LINUX_NET_BUSY_POLL_H */
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 793e6fd78bc5..60a5347922be 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -294,7 +294,7 @@ static inline long page_pool_unref_page(struct page *page, long nr)
static inline void page_pool_ref_netmem(netmem_ref netmem)
{
- atomic_long_inc(&netmem_to_page(netmem)->pp_ref_count);
+ atomic_long_inc(netmem_get_pp_ref_count_ref(netmem));
}
static inline void page_pool_ref_page(struct page *page)
diff --git a/include/net/xdp.h b/include/net/xdp.h
index e6770dd40c91..b5b10f2b88e5 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -62,7 +62,6 @@ struct xdp_rxq_info {
u32 queue_index;
u32 reg_state;
struct xdp_mem_info mem;
- unsigned int napi_id;
u32 frag_size;
} ____cacheline_aligned; /* perf critical, avoid false-sharing */
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 40085afd9160..7a7316d9c0da 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -59,15 +59,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
xp_fill_cb(pool, desc);
}
-static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
-{
-#ifdef CONFIG_NET_RX_BUSY_POLL
- return pool->heads[0].xdp.rxq->napi_id;
-#else
- return 0;
-#endif
-}
-
static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs)
{
@@ -306,11 +297,6 @@ static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
{
}
-static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
-{
- return 0;
-}
-
static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs)
{
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index a0aed1a428a1..b0db89058c91 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -118,6 +118,8 @@ enum yfs_cm_operation {
*/
#define afs_call_traces \
EM(afs_call_trace_alloc, "ALLOC") \
+ EM(afs_call_trace_async_abort, "ASYAB") \
+ EM(afs_call_trace_async_kill, "ASYKL") \
EM(afs_call_trace_free, "FREE ") \
EM(afs_call_trace_get, "GET ") \
EM(afs_call_trace_put, "PUT ") \
@@ -168,12 +170,14 @@ enum yfs_cm_operation {
#define afs_cell_traces \
EM(afs_cell_trace_alloc, "ALLOC ") \
EM(afs_cell_trace_free, "FREE ") \
+ EM(afs_cell_trace_get_atcell, "GET atcell") \
EM(afs_cell_trace_get_queue_dns, "GET q-dns ") \
EM(afs_cell_trace_get_queue_manage, "GET q-mng ") \
EM(afs_cell_trace_get_queue_new, "GET q-new ") \
EM(afs_cell_trace_get_vol, "GET vol ") \
EM(afs_cell_trace_insert, "INSERT ") \
EM(afs_cell_trace_manage, "MANAGE ") \
+ EM(afs_cell_trace_put_atcell, "PUT atcell") \
EM(afs_cell_trace_put_candidate, "PUT candid") \
EM(afs_cell_trace_put_destroy, "PUT destry") \
EM(afs_cell_trace_put_queue_work, "PUT q-work") \
@@ -323,6 +327,44 @@ enum yfs_cm_operation {
EM(yfs_CB_TellMeAboutYourself, "YFSCB.TellMeAboutYourself") \
E_(yfs_CB_CallBack, "YFSCB.CallBack")
+#define afs_cb_promise_traces \
+ EM(afs_cb_promise_clear_cb_break, "CLEAR cb-break") \
+ EM(afs_cb_promise_clear_rmdir, "CLEAR rmdir") \
+ EM(afs_cb_promise_clear_rotate_server, "CLEAR rot-srv") \
+ EM(afs_cb_promise_clear_server_change, "CLEAR srv-chg") \
+ EM(afs_cb_promise_clear_vol_init_cb, "CLEAR vol-init-cb") \
+ EM(afs_cb_promise_set_apply_cb, "SET apply-cb") \
+ EM(afs_cb_promise_set_new_inode, "SET new-inode") \
+ E_(afs_cb_promise_set_new_symlink, "SET new-symlink")
+
+#define afs_vnode_invalid_traces \
+ EM(afs_vnode_invalid_trace_cb_ro_snapshot, "cb-ro-snapshot") \
+ EM(afs_vnode_invalid_trace_cb_scrub, "cb-scrub") \
+ EM(afs_vnode_invalid_trace_cb_v_break, "cb-v-break") \
+ EM(afs_vnode_invalid_trace_expired, "expired") \
+ EM(afs_vnode_invalid_trace_no_cb_promise, "no-cb-promise") \
+ EM(afs_vnode_invalid_trace_vol_expired, "vol-expired") \
+ EM(afs_vnode_invalid_trace_zap_data, "zap-data") \
+ E_(afs_vnode_valid_trace, "valid")
+
+#define afs_dir_invalid_traces \
+ EM(afs_dir_invalid_edit_add_bad_size, "edit-add-bad-size") \
+ EM(afs_dir_invalid_edit_add_no_slots, "edit-add-no-slots") \
+ EM(afs_dir_invalid_edit_add_too_many_blocks, "edit-add-too-many-blocks") \
+ EM(afs_dir_invalid_edit_get_block, "edit-get-block") \
+ EM(afs_dir_invalid_edit_mkdir, "edit-mkdir") \
+ EM(afs_dir_invalid_edit_rem_bad_size, "edit-rem-bad-size") \
+ EM(afs_dir_invalid_edit_rem_wrong_name, "edit-rem-wrong_name") \
+ EM(afs_dir_invalid_edit_upd_bad_size, "edit-upd-bad-size") \
+ EM(afs_dir_invalid_edit_upd_no_dd, "edit-upd-no-dotdot") \
+ EM(afs_dir_invalid_dv_mismatch, "dv-mismatch") \
+ EM(afs_dir_invalid_inval_folio, "inv-folio") \
+ EM(afs_dir_invalid_iter_stale, "iter-stale") \
+ EM(afs_dir_invalid_reclaimed_folio, "reclaimed-folio") \
+ EM(afs_dir_invalid_release_folio, "rel-folio") \
+ EM(afs_dir_invalid_remote, "remote") \
+ E_(afs_dir_invalid_subdir_removed, "subdir-removed")
+
#define afs_edit_dir_ops \
EM(afs_edit_dir_create, "create") \
EM(afs_edit_dir_create_error, "c_fail") \
@@ -332,6 +374,7 @@ enum yfs_cm_operation {
EM(afs_edit_dir_delete_error, "d_err ") \
EM(afs_edit_dir_delete_inval, "d_invl") \
EM(afs_edit_dir_delete_noent, "d_nent") \
+ EM(afs_edit_dir_mkdir, "mk_ent") \
EM(afs_edit_dir_update_dd, "u_ddot") \
EM(afs_edit_dir_update_error, "u_fail") \
EM(afs_edit_dir_update_inval, "u_invl") \
@@ -385,6 +428,7 @@ enum yfs_cm_operation {
EM(afs_file_error_dir_over_end, "DIR_ENT_OVER_END") \
EM(afs_file_error_dir_small, "DIR_SMALL") \
EM(afs_file_error_dir_unmarked_ext, "DIR_UNMARKED_EXT") \
+ EM(afs_file_error_symlink_big, "SYM_BIG") \
EM(afs_file_error_mntpt, "MNTPT_READ_FAILED") \
E_(afs_file_error_writeback_fail, "WRITEBACK_FAILED")
@@ -487,7 +531,9 @@ enum yfs_cm_operation {
enum afs_alist_trace { afs_alist_traces } __mode(byte);
enum afs_call_trace { afs_call_traces } __mode(byte);
enum afs_cb_break_reason { afs_cb_break_reasons } __mode(byte);
+enum afs_cb_promise_trace { afs_cb_promise_traces } __mode(byte);
enum afs_cell_trace { afs_cell_traces } __mode(byte);
+enum afs_dir_invalid_trace { afs_dir_invalid_traces} __mode(byte);
enum afs_edit_dir_op { afs_edit_dir_ops } __mode(byte);
enum afs_edit_dir_reason { afs_edit_dir_reasons } __mode(byte);
enum afs_eproto_cause { afs_eproto_causes } __mode(byte);
@@ -498,6 +544,7 @@ enum afs_flock_operation { afs_flock_operations } __mode(byte);
enum afs_io_error { afs_io_errors } __mode(byte);
enum afs_rotate_trace { afs_rotate_traces } __mode(byte);
enum afs_server_trace { afs_server_traces } __mode(byte);
+enum afs_vnode_invalid_trace { afs_vnode_invalid_traces} __mode(byte);
enum afs_volume_trace { afs_volume_traces } __mode(byte);
#endif /* end __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY */
@@ -513,8 +560,10 @@ enum afs_volume_trace { afs_volume_traces } __mode(byte);
afs_alist_traces;
afs_call_traces;
afs_cb_break_reasons;
+afs_cb_promise_traces;
afs_cell_traces;
afs_cm_operations;
+afs_dir_invalid_traces;
afs_edit_dir_ops;
afs_edit_dir_reasons;
afs_eproto_causes;
@@ -526,6 +575,7 @@ afs_fs_operations;
afs_io_errors;
afs_rotate_traces;
afs_server_traces;
+afs_vnode_invalid_traces;
afs_vl_operations;
yfs_cm_operations;
@@ -670,7 +720,7 @@ TRACE_EVENT(afs_make_fs_call,
}
),
- TP_printk("c=%08x %06llx:%06llx:%06x %s",
+ TP_printk("c=%08x V=%llx i=%llx:%x %s",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -704,7 +754,7 @@ TRACE_EVENT(afs_make_fs_calli,
}
),
- TP_printk("c=%08x %06llx:%06llx:%06x %s i=%u",
+ TP_printk("c=%08x V=%llx i=%llx:%x %s i=%u",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -741,7 +791,7 @@ TRACE_EVENT(afs_make_fs_call1,
__entry->name[__len] = 0;
),
- TP_printk("c=%08x %06llx:%06llx:%06x %s \"%s\"",
+ TP_printk("c=%08x V=%llx i=%llx:%x %s \"%s\"",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -782,7 +832,7 @@ TRACE_EVENT(afs_make_fs_call2,
__entry->name2[__len2] = 0;
),
- TP_printk("c=%08x %06llx:%06llx:%06x %s \"%s\" \"%s\"",
+ TP_printk("c=%08x V=%llx i=%llx:%x %s \"%s\" \"%s\"",
__entry->call,
__entry->fid.vid,
__entry->fid.vnode,
@@ -887,9 +937,9 @@ TRACE_EVENT(afs_sent_data,
);
TRACE_EVENT(afs_dir_check_failed,
- TP_PROTO(struct afs_vnode *vnode, loff_t off, loff_t i_size),
+ TP_PROTO(struct afs_vnode *vnode, loff_t off),
- TP_ARGS(vnode, off, i_size),
+ TP_ARGS(vnode, off),
TP_STRUCT__entry(
__field(struct afs_vnode *, vnode)
@@ -900,7 +950,7 @@ TRACE_EVENT(afs_dir_check_failed,
TP_fast_assign(
__entry->vnode = vnode;
__entry->off = off;
- __entry->i_size = i_size;
+ __entry->i_size = i_size_read(&vnode->netfs.inode);
),
TP_printk("vn=%p %llx/%llx",
@@ -1002,7 +1052,7 @@ TRACE_EVENT(afs_edit_dir,
__entry->name[__len] = 0;
),
- TP_printk("d=%x:%x %s %s %u[%u] f=%x:%x \"%s\"",
+ TP_printk("di=%x:%x %s %s %u[%u] fi=%x:%x \"%s\"",
__entry->vnode, __entry->unique,
__print_symbolic(__entry->why, afs_edit_dir_reasons),
__print_symbolic(__entry->op, afs_edit_dir_ops),
@@ -1011,6 +1061,122 @@ TRACE_EVENT(afs_edit_dir,
__entry->name)
);
+TRACE_EVENT(afs_dir_invalid,
+ TP_PROTO(const struct afs_vnode *dvnode, enum afs_dir_invalid_trace trace),
+
+ TP_ARGS(dvnode, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_dir_invalid_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = dvnode->fid.vnode;
+ __entry->unique = dvnode->fid.unique;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("di=%x:%x %s",
+ __entry->vnode, __entry->unique,
+ __print_symbolic(__entry->trace, afs_dir_invalid_traces))
+ );
+
+TRACE_EVENT(afs_cb_promise,
+ TP_PROTO(const struct afs_vnode *vnode, enum afs_cb_promise_trace trace),
+
+ TP_ARGS(vnode, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_cb_promise_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = vnode->fid.vnode;
+ __entry->unique = vnode->fid.unique;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("di=%x:%x %s",
+ __entry->vnode, __entry->unique,
+ __print_symbolic(__entry->trace, afs_cb_promise_traces))
+ );
+
+TRACE_EVENT(afs_vnode_invalid,
+ TP_PROTO(const struct afs_vnode *vnode, enum afs_vnode_invalid_trace trace),
+
+ TP_ARGS(vnode, trace),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_vnode_invalid_trace, trace)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = vnode->fid.vnode;
+ __entry->unique = vnode->fid.unique;
+ __entry->trace = trace;
+ ),
+
+ TP_printk("di=%x:%x %s",
+ __entry->vnode, __entry->unique,
+ __print_symbolic(__entry->trace, afs_vnode_invalid_traces))
+ );
+
+TRACE_EVENT(afs_set_dv,
+ TP_PROTO(const struct afs_vnode *dvnode, u64 new_dv),
+
+ TP_ARGS(dvnode, new_dv),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(u64, old_dv)
+ __field(u64, new_dv)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = dvnode->fid.vnode;
+ __entry->unique = dvnode->fid.unique;
+ __entry->old_dv = dvnode->status.data_version;
+ __entry->new_dv = new_dv;
+ ),
+
+ TP_printk("di=%x:%x dv=%llx -> dv=%llx",
+ __entry->vnode, __entry->unique,
+ __entry->old_dv, __entry->new_dv)
+ );
+
+TRACE_EVENT(afs_dv_mismatch,
+ TP_PROTO(const struct afs_vnode *dvnode, u64 before_dv, int delta, u64 new_dv),
+
+ TP_ARGS(dvnode, before_dv, delta, new_dv),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(int, delta)
+ __field(u64, before_dv)
+ __field(u64, new_dv)
+ ),
+
+ TP_fast_assign(
+ __entry->vnode = dvnode->fid.vnode;
+ __entry->unique = dvnode->fid.unique;
+ __entry->delta = delta;
+ __entry->before_dv = before_dv;
+ __entry->new_dv = new_dv;
+ ),
+
+ TP_printk("di=%x:%x xdv=%llx+%d dv=%llx",
+ __entry->vnode, __entry->unique,
+ __entry->before_dv, __entry->delta, __entry->new_dv)
+ );
+
TRACE_EVENT(afs_protocol_error,
TP_PROTO(struct afs_call *call, enum afs_eproto_cause cause),
@@ -1611,6 +1777,36 @@ TRACE_EVENT(afs_make_call,
__entry->fid.unique)
);
+TRACE_EVENT(afs_read_recv,
+ TP_PROTO(const struct afs_operation *op, const struct afs_call *call),
+
+ TP_ARGS(op, call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq)
+ __field(unsigned int, sreq)
+ __field(unsigned int, op)
+ __field(unsigned int, op_flags)
+ __field(unsigned int, call)
+ __field(enum afs_call_state, call_state)
+ ),
+
+ TP_fast_assign(
+ __entry->op = op->debug_id;
+ __entry->sreq = op->fetch.subreq->debug_index;
+ __entry->rreq = op->fetch.subreq->rreq->debug_id;
+ __entry->op_flags = op->flags;
+ __entry->call = call->debug_id;
+ __entry->call_state = call->state;
+ ),
+
+ TP_printk("R=%08x[%x] OP=%08x c=%08x cs=%x of=%x",
+ __entry->rreq, __entry->sreq,
+ __entry->op,
+ __entry->call, __entry->call_state,
+ __entry->op_flags)
+ );
+
#endif /* _TRACE_AFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index 7d931db02b93..a743b2a35ea7 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -223,10 +223,10 @@ TRACE_EVENT(cachefiles_ref,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, cookie )
- __field(enum cachefiles_obj_ref_trace, why )
- __field(int, usage )
+ __field(unsigned int, obj)
+ __field(unsigned int, cookie)
+ __field(enum cachefiles_obj_ref_trace, why)
+ __field(int, usage)
),
TP_fast_assign(
@@ -249,10 +249,10 @@ TRACE_EVENT(cachefiles_lookup,
TP_ARGS(obj, dir, de),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(short, error )
- __field(unsigned long, dino )
- __field(unsigned long, ino )
+ __field(unsigned int, obj)
+ __field(short, error)
+ __field(unsigned long, dino)
+ __field(unsigned long, ino)
),
TP_fast_assign(
@@ -273,8 +273,8 @@ TRACE_EVENT(cachefiles_mkdir,
TP_ARGS(dir, subdir),
TP_STRUCT__entry(
- __field(unsigned int, dir )
- __field(unsigned int, subdir )
+ __field(unsigned int, dir)
+ __field(unsigned int, subdir)
),
TP_fast_assign(
@@ -293,8 +293,8 @@ TRACE_EVENT(cachefiles_tmpfile,
TP_ARGS(obj, backer),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
),
TP_fast_assign(
@@ -313,8 +313,8 @@ TRACE_EVENT(cachefiles_link,
TP_ARGS(obj, backer),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
),
TP_fast_assign(
@@ -336,9 +336,9 @@ TRACE_EVENT(cachefiles_unlink,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, ino )
- __field(enum fscache_why_object_killed, why )
+ __field(unsigned int, obj)
+ __field(unsigned int, ino)
+ __field(enum fscache_why_object_killed, why)
),
TP_fast_assign(
@@ -361,9 +361,9 @@ TRACE_EVENT(cachefiles_rename,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, ino )
- __field(enum fscache_why_object_killed, why )
+ __field(unsigned int, obj)
+ __field(unsigned int, ino)
+ __field(enum fscache_why_object_killed, why)
),
TP_fast_assign(
@@ -380,17 +380,20 @@ TRACE_EVENT(cachefiles_rename,
TRACE_EVENT(cachefiles_coherency,
TP_PROTO(struct cachefiles_object *obj,
ino_t ino,
+ u64 disk_aux,
enum cachefiles_content content,
enum cachefiles_coherency_trace why),
- TP_ARGS(obj, ino, content, why),
+ TP_ARGS(obj, ino, disk_aux, content, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(enum cachefiles_coherency_trace, why )
- __field(enum cachefiles_content, content )
- __field(u64, ino )
+ __field(unsigned int, obj)
+ __field(enum cachefiles_coherency_trace, why)
+ __field(enum cachefiles_content, content)
+ __field(u64, ino)
+ __field(u64, aux)
+ __field(u64, disk_aux)
),
TP_fast_assign(
@@ -398,13 +401,17 @@ TRACE_EVENT(cachefiles_coherency,
__entry->why = why;
__entry->content = content;
__entry->ino = ino;
+ __entry->aux = be64_to_cpup((__be64 *)obj->cookie->inline_aux);
+ __entry->disk_aux = disk_aux;
),
- TP_printk("o=%08x %s B=%llx c=%u",
+ TP_printk("o=%08x %s B=%llx c=%u aux=%llx dsk=%llx",
__entry->obj,
__print_symbolic(__entry->why, cachefiles_coherency_traces),
__entry->ino,
- __entry->content)
+ __entry->content,
+ __entry->aux,
+ __entry->disk_aux)
);
TRACE_EVENT(cachefiles_vol_coherency,
@@ -416,9 +423,9 @@ TRACE_EVENT(cachefiles_vol_coherency,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, vol )
- __field(enum cachefiles_coherency_trace, why )
- __field(u64, ino )
+ __field(unsigned int, vol)
+ __field(enum cachefiles_coherency_trace, why)
+ __field(u64, ino)
),
TP_fast_assign(
@@ -445,14 +452,14 @@ TRACE_EVENT(cachefiles_prep_read,
TP_ARGS(obj, start, len, flags, source, why, cache_inode, netfs_inode),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned short, flags )
- __field(enum netfs_io_source, source )
- __field(enum cachefiles_prepare_read_trace, why )
- __field(size_t, len )
- __field(loff_t, start )
- __field(unsigned int, netfs_inode )
- __field(unsigned int, cache_inode )
+ __field(unsigned int, obj)
+ __field(unsigned short, flags)
+ __field(enum netfs_io_source, source)
+ __field(enum cachefiles_prepare_read_trace, why)
+ __field(size_t, len)
+ __field(loff_t, start)
+ __field(unsigned int, netfs_inode)
+ __field(unsigned int, cache_inode)
),
TP_fast_assign(
@@ -484,10 +491,10 @@ TRACE_EVENT(cachefiles_read,
TP_ARGS(obj, backer, start, len),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(size_t, len )
- __field(loff_t, start )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(size_t, len)
+ __field(loff_t, start)
),
TP_fast_assign(
@@ -513,10 +520,10 @@ TRACE_EVENT(cachefiles_write,
TP_ARGS(obj, backer, start, len),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(size_t, len )
- __field(loff_t, start )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(size_t, len)
+ __field(loff_t, start)
),
TP_fast_assign(
@@ -540,11 +547,11 @@ TRACE_EVENT(cachefiles_trunc,
TP_ARGS(obj, backer, from, to, why),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(enum cachefiles_trunc_trace, why )
- __field(loff_t, from )
- __field(loff_t, to )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(enum cachefiles_trunc_trace, why)
+ __field(loff_t, from)
+ __field(loff_t, to)
),
TP_fast_assign(
@@ -571,8 +578,8 @@ TRACE_EVENT(cachefiles_mark_active,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(ino_t, inode )
+ __field(unsigned int, obj)
+ __field(ino_t, inode)
),
TP_fast_assign(
@@ -592,8 +599,8 @@ TRACE_EVENT(cachefiles_mark_failed,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(ino_t, inode )
+ __field(unsigned int, obj)
+ __field(ino_t, inode)
),
TP_fast_assign(
@@ -613,8 +620,8 @@ TRACE_EVENT(cachefiles_mark_inactive,
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(ino_t, inode )
+ __field(unsigned int, obj)
+ __field(ino_t, inode)
),
TP_fast_assign(
@@ -633,10 +640,10 @@ TRACE_EVENT(cachefiles_vfs_error,
TP_ARGS(obj, backer, error, where),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(enum cachefiles_error_trace, where )
- __field(short, error )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(enum cachefiles_error_trace, where)
+ __field(short, error)
),
TP_fast_assign(
@@ -660,10 +667,10 @@ TRACE_EVENT(cachefiles_io_error,
TP_ARGS(obj, backer, error, where),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(enum cachefiles_error_trace, where )
- __field(short, error )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(enum cachefiles_error_trace, where)
+ __field(short, error)
),
TP_fast_assign(
@@ -687,11 +694,11 @@ TRACE_EVENT(cachefiles_ondemand_open,
TP_ARGS(obj, msg, load),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
- __field(unsigned int, object_id )
- __field(unsigned int, fd )
- __field(unsigned int, flags )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
+ __field(unsigned int, object_id)
+ __field(unsigned int, fd)
+ __field(unsigned int, flags)
),
TP_fast_assign(
@@ -717,9 +724,9 @@ TRACE_EVENT(cachefiles_ondemand_copen,
TP_ARGS(obj, msg_id, len),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
- __field(long, len )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
+ __field(long, len)
),
TP_fast_assign(
@@ -740,9 +747,9 @@ TRACE_EVENT(cachefiles_ondemand_close,
TP_ARGS(obj, msg),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
- __field(unsigned int, object_id )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
+ __field(unsigned int, object_id)
),
TP_fast_assign(
@@ -764,11 +771,11 @@ TRACE_EVENT(cachefiles_ondemand_read,
TP_ARGS(obj, msg, load),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
- __field(unsigned int, object_id )
- __field(loff_t, start )
- __field(size_t, len )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
+ __field(unsigned int, object_id)
+ __field(loff_t, start)
+ __field(size_t, len)
),
TP_fast_assign(
@@ -793,8 +800,8 @@ TRACE_EVENT(cachefiles_ondemand_cread,
TP_ARGS(obj, msg_id),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, msg_id )
+ __field(unsigned int, obj)
+ __field(unsigned int, msg_id)
),
TP_fast_assign(
@@ -814,10 +821,10 @@ TRACE_EVENT(cachefiles_ondemand_fd_write,
TP_ARGS(obj, backer, start, len),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, backer )
- __field(loff_t, start )
- __field(size_t, len )
+ __field(unsigned int, obj)
+ __field(unsigned int, backer)
+ __field(loff_t, start)
+ __field(size_t, len)
),
TP_fast_assign(
@@ -840,8 +847,8 @@ TRACE_EVENT(cachefiles_ondemand_fd_release,
TP_ARGS(obj, object_id),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(unsigned int, object_id )
+ __field(unsigned int, obj)
+ __field(unsigned int, object_id)
),
TP_fast_assign(
diff --git a/include/trace/events/hugetlbfs.h b/include/trace/events/hugetlbfs.h
index 8331c904a9ba..59605dfaeeb4 100644
--- a/include/trace/events/hugetlbfs.h
+++ b/include/trace/events/hugetlbfs.h
@@ -23,7 +23,7 @@ TRACE_EVENT(hugetlbfs_alloc_inode,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->dir = dir->i_ino;
+ __entry->dir = dir ? dir->i_ino : 0;
__entry->mode = mode;
),
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index bb8a59c6caa2..d36c857dd249 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -13,6 +13,69 @@
* Thus most bits set go first.
*/
+/* These define the values that are enums (the bits) */
+#define TRACE_GFP_FLAGS_GENERAL \
+ TRACE_GFP_EM(DMA) \
+ TRACE_GFP_EM(HIGHMEM) \
+ TRACE_GFP_EM(DMA32) \
+ TRACE_GFP_EM(MOVABLE) \
+ TRACE_GFP_EM(RECLAIMABLE) \
+ TRACE_GFP_EM(HIGH) \
+ TRACE_GFP_EM(IO) \
+ TRACE_GFP_EM(FS) \
+ TRACE_GFP_EM(ZERO) \
+ TRACE_GFP_EM(DIRECT_RECLAIM) \
+ TRACE_GFP_EM(KSWAPD_RECLAIM) \
+ TRACE_GFP_EM(WRITE) \
+ TRACE_GFP_EM(NOWARN) \
+ TRACE_GFP_EM(RETRY_MAYFAIL) \
+ TRACE_GFP_EM(NOFAIL) \
+ TRACE_GFP_EM(NORETRY) \
+ TRACE_GFP_EM(MEMALLOC) \
+ TRACE_GFP_EM(COMP) \
+ TRACE_GFP_EM(NOMEMALLOC) \
+ TRACE_GFP_EM(HARDWALL) \
+ TRACE_GFP_EM(THISNODE) \
+ TRACE_GFP_EM(ACCOUNT) \
+ TRACE_GFP_EM(ZEROTAGS)
+
+#ifdef CONFIG_KASAN_HW_TAGS
+# define TRACE_GFP_FLAGS_KASAN \
+ TRACE_GFP_EM(SKIP_ZERO) \
+ TRACE_GFP_EM(SKIP_KASAN)
+#else
+# define TRACE_GFP_FLAGS_KASAN
+#endif
+
+#ifdef CONFIG_LOCKDEP
+# define TRACE_GFP_FLAGS_LOCKDEP \
+ TRACE_GFP_EM(NOLOCKDEP)
+#else
+# define TRACE_GFP_FLAGS_LOCKDEP
+#endif
+
+#ifdef CONFIG_SLAB_OBJ_EXT
+# define TRACE_GFP_FLAGS_SLAB \
+ TRACE_GFP_EM(NO_OBJ_EXT)
+#else
+# define TRACE_GFP_FLAGS_SLAB
+#endif
+
+#define TRACE_GFP_FLAGS \
+ TRACE_GFP_FLAGS_GENERAL \
+ TRACE_GFP_FLAGS_KASAN \
+ TRACE_GFP_FLAGS_LOCKDEP \
+ TRACE_GFP_FLAGS_SLAB
+
+#undef TRACE_GFP_EM
+#define TRACE_GFP_EM(a) TRACE_DEFINE_ENUM(___GFP_##a##_BIT);
+
+TRACE_GFP_FLAGS
+
+/* Just in case these are ever used */
+TRACE_DEFINE_ENUM(___GFP_UNUSED_BIT);
+TRACE_DEFINE_ENUM(___GFP_LAST_BIT);
+
#define gfpflag_string(flag) {(__force unsigned long)flag, #flag}
#define __def_gfpflag_names \
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index bf511bca896e..6e699cadcb29 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -21,6 +21,7 @@
EM(netfs_read_trace_readahead, "READAHEAD") \
EM(netfs_read_trace_readpage, "READPAGE ") \
EM(netfs_read_trace_read_gaps, "READ-GAPS") \
+ EM(netfs_read_trace_read_single, "READ-SNGL") \
EM(netfs_read_trace_prefetch_for_write, "PREFETCHW") \
E_(netfs_read_trace_write_begin, "WRITEBEGN")
@@ -35,9 +36,11 @@
EM(NETFS_READAHEAD, "RA") \
EM(NETFS_READPAGE, "RP") \
EM(NETFS_READ_GAPS, "RG") \
+ EM(NETFS_READ_SINGLE, "R1") \
EM(NETFS_READ_FOR_WRITE, "RW") \
EM(NETFS_DIO_READ, "DR") \
EM(NETFS_WRITEBACK, "WB") \
+ EM(NETFS_WRITEBACK_SINGLE, "W1") \
EM(NETFS_WRITETHROUGH, "WT") \
EM(NETFS_UNBUFFERED_WRITE, "UW") \
EM(NETFS_DIO_WRITE, "DW") \
@@ -47,17 +50,23 @@
EM(netfs_rreq_trace_assess, "ASSESS ") \
EM(netfs_rreq_trace_copy, "COPY ") \
EM(netfs_rreq_trace_collect, "COLLECT") \
+ EM(netfs_rreq_trace_complete, "COMPLET") \
+ EM(netfs_rreq_trace_dirty, "DIRTY ") \
EM(netfs_rreq_trace_done, "DONE ") \
EM(netfs_rreq_trace_free, "FREE ") \
EM(netfs_rreq_trace_redirty, "REDIRTY") \
EM(netfs_rreq_trace_resubmit, "RESUBMT") \
+ EM(netfs_rreq_trace_set_abandon, "S-ABNDN") \
EM(netfs_rreq_trace_set_pause, "PAUSE ") \
EM(netfs_rreq_trace_unlock, "UNLOCK ") \
EM(netfs_rreq_trace_unlock_pgpriv2, "UNLCK-2") \
EM(netfs_rreq_trace_unmark, "UNMARK ") \
EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \
EM(netfs_rreq_trace_wait_pause, "WT-PAUS") \
+ EM(netfs_rreq_trace_wait_queue, "WAIT-Q ") \
EM(netfs_rreq_trace_wake_ip, "WAKE-IP") \
+ EM(netfs_rreq_trace_wake_queue, "WAKE-Q ") \
+ EM(netfs_rreq_trace_woke_queue, "WOKE-Q ") \
EM(netfs_rreq_trace_unpause, "UNPAUSE") \
E_(netfs_rreq_trace_write_done, "WR-DONE")
@@ -74,6 +83,10 @@
#define netfs_sreq_traces \
EM(netfs_sreq_trace_add_donations, "+DON ") \
EM(netfs_sreq_trace_added, "ADD ") \
+ EM(netfs_sreq_trace_cache_nowrite, "CA-NW") \
+ EM(netfs_sreq_trace_cache_prepare, "CA-PR") \
+ EM(netfs_sreq_trace_cache_write, "CA-WR") \
+ EM(netfs_sreq_trace_cancel, "CANCL") \
EM(netfs_sreq_trace_clear, "CLEAR") \
EM(netfs_sreq_trace_discard, "DSCRD") \
EM(netfs_sreq_trace_donate_to_prev, "DON-P") \
@@ -84,6 +97,9 @@
EM(netfs_sreq_trace_hit_eof, "EOF ") \
EM(netfs_sreq_trace_io_progress, "IO ") \
EM(netfs_sreq_trace_limited, "LIMIT") \
+ EM(netfs_sreq_trace_need_clear, "N-CLR") \
+ EM(netfs_sreq_trace_partial_read, "PARTR") \
+ EM(netfs_sreq_trace_need_retry, "NRTRY") \
EM(netfs_sreq_trace_prepare, "PREP ") \
EM(netfs_sreq_trace_prep_failed, "PRPFL") \
EM(netfs_sreq_trace_progress, "PRGRS") \
@@ -129,6 +145,7 @@
EM(netfs_sreq_trace_get_submit, "GET SUBMIT") \
EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \
EM(netfs_sreq_trace_new, "NEW ") \
+ EM(netfs_sreq_trace_put_abandon, "PUT ABANDON") \
EM(netfs_sreq_trace_put_cancel, "PUT CANCEL ") \
EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \
EM(netfs_sreq_trace_put_consumed, "PUT CONSUME") \
@@ -152,6 +169,7 @@
EM(netfs_streaming_filled_page, "mod-streamw-f") \
EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \
EM(netfs_folio_trace_abandon, "abandon") \
+ EM(netfs_folio_trace_alloc_buffer, "alloc-buf") \
EM(netfs_folio_trace_cancel_copy, "cancel-copy") \
EM(netfs_folio_trace_cancel_store, "cancel-store") \
EM(netfs_folio_trace_clear, "clear") \
@@ -168,6 +186,7 @@
EM(netfs_folio_trace_mkwrite, "mkwrite") \
EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \
EM(netfs_folio_trace_not_under_wback, "!wback") \
+ EM(netfs_folio_trace_not_locked, "!locked") \
EM(netfs_folio_trace_put, "put") \
EM(netfs_folio_trace_read, "read") \
EM(netfs_folio_trace_read_done, "read-done") \
@@ -191,6 +210,14 @@
EM(netfs_trace_donate_to_next, "to-next") \
E_(netfs_trace_donate_to_deferred_next, "defer-next")
+#define netfs_folioq_traces \
+ EM(netfs_trace_folioq_alloc_buffer, "alloc-buf") \
+ EM(netfs_trace_folioq_clear, "clear") \
+ EM(netfs_trace_folioq_delete, "delete") \
+ EM(netfs_trace_folioq_make_space, "make-space") \
+ EM(netfs_trace_folioq_rollbuf_init, "roll-init") \
+ E_(netfs_trace_folioq_read_progress, "r-progress")
+
#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
@@ -209,6 +236,7 @@ enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
enum netfs_folio_trace { netfs_folio_traces } __mode(byte);
enum netfs_collect_contig_trace { netfs_collect_contig_traces } __mode(byte);
enum netfs_donate_trace { netfs_donate_traces } __mode(byte);
+enum netfs_folioq_trace { netfs_folioq_traces } __mode(byte);
#endif
@@ -232,6 +260,7 @@ netfs_sreq_ref_traces;
netfs_folio_traces;
netfs_collect_contig_traces;
netfs_donate_traces;
+netfs_folioq_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -250,13 +279,13 @@ TRACE_EVENT(netfs_read,
TP_ARGS(rreq, start, len, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(unsigned int, cookie )
- __field(loff_t, i_size )
- __field(loff_t, start )
- __field(size_t, len )
- __field(enum netfs_read_trace, what )
- __field(unsigned int, netfs_inode )
+ __field(unsigned int, rreq)
+ __field(unsigned int, cookie)
+ __field(loff_t, i_size)
+ __field(loff_t, start)
+ __field(size_t, len)
+ __field(enum netfs_read_trace, what)
+ __field(unsigned int, netfs_inode)
),
TP_fast_assign(
@@ -284,10 +313,10 @@ TRACE_EVENT(netfs_rreq,
TP_ARGS(rreq, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(unsigned int, flags )
- __field(enum netfs_io_origin, origin )
- __field(enum netfs_rreq_trace, what )
+ __field(unsigned int, rreq)
+ __field(unsigned int, flags)
+ __field(enum netfs_io_origin, origin)
+ __field(enum netfs_rreq_trace, what)
),
TP_fast_assign(
@@ -311,15 +340,16 @@ TRACE_EVENT(netfs_sreq,
TP_ARGS(sreq, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(unsigned short, index )
- __field(short, error )
- __field(unsigned short, flags )
- __field(enum netfs_io_source, source )
- __field(enum netfs_sreq_trace, what )
- __field(size_t, len )
- __field(size_t, transferred )
- __field(loff_t, start )
+ __field(unsigned int, rreq)
+ __field(unsigned short, index)
+ __field(short, error)
+ __field(unsigned short, flags)
+ __field(enum netfs_io_source, source)
+ __field(enum netfs_sreq_trace, what)
+ __field(u8, slot)
+ __field(size_t, len)
+ __field(size_t, transferred)
+ __field(loff_t, start)
),
TP_fast_assign(
@@ -332,15 +362,16 @@ TRACE_EVENT(netfs_sreq,
__entry->len = sreq->len;
__entry->transferred = sreq->transferred;
__entry->start = sreq->start;
+ __entry->slot = sreq->io_iter.folioq_slot;
),
- TP_printk("R=%08x[%x] %s %s f=%02x s=%llx %zx/%zx e=%d",
+ TP_printk("R=%08x[%x] %s %s f=%02x s=%llx %zx/%zx s=%u e=%d",
__entry->rreq, __entry->index,
__print_symbolic(__entry->source, netfs_sreq_sources),
__print_symbolic(__entry->what, netfs_sreq_traces),
__entry->flags,
__entry->start, __entry->transferred, __entry->len,
- __entry->error)
+ __entry->slot, __entry->error)
);
TRACE_EVENT(netfs_failure,
@@ -351,15 +382,15 @@ TRACE_EVENT(netfs_failure,
TP_ARGS(rreq, sreq, error, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(short, index )
- __field(short, error )
- __field(unsigned short, flags )
- __field(enum netfs_io_source, source )
- __field(enum netfs_failure, what )
- __field(size_t, len )
- __field(size_t, transferred )
- __field(loff_t, start )
+ __field(unsigned int, rreq)
+ __field(short, index)
+ __field(short, error)
+ __field(unsigned short, flags)
+ __field(enum netfs_io_source, source)
+ __field(enum netfs_failure, what)
+ __field(size_t, len)
+ __field(size_t, transferred)
+ __field(loff_t, start)
),
TP_fast_assign(
@@ -390,9 +421,9 @@ TRACE_EVENT(netfs_rreq_ref,
TP_ARGS(rreq_debug_id, ref, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(int, ref )
- __field(enum netfs_rreq_ref_trace, what )
+ __field(unsigned int, rreq)
+ __field(int, ref)
+ __field(enum netfs_rreq_ref_trace, what)
),
TP_fast_assign(
@@ -414,10 +445,10 @@ TRACE_EVENT(netfs_sreq_ref,
TP_ARGS(rreq_debug_id, subreq_debug_index, ref, what),
TP_STRUCT__entry(
- __field(unsigned int, rreq )
- __field(unsigned int, subreq )
- __field(int, ref )
- __field(enum netfs_sreq_ref_trace, what )
+ __field(unsigned int, rreq)
+ __field(unsigned int, subreq)
+ __field(int, ref)
+ __field(enum netfs_sreq_ref_trace, what)
),
TP_fast_assign(
@@ -465,10 +496,10 @@ TRACE_EVENT(netfs_write_iter,
TP_ARGS(iocb, from),
TP_STRUCT__entry(
- __field(unsigned long long, start )
- __field(size_t, len )
- __field(unsigned int, flags )
- __field(unsigned int, ino )
+ __field(unsigned long long, start)
+ __field(size_t, len)
+ __field(unsigned int, flags)
+ __field(unsigned int, ino)
),
TP_fast_assign(
@@ -489,12 +520,12 @@ TRACE_EVENT(netfs_write,
TP_ARGS(wreq, what),
TP_STRUCT__entry(
- __field(unsigned int, wreq )
- __field(unsigned int, cookie )
- __field(unsigned int, ino )
- __field(enum netfs_write_trace, what )
- __field(unsigned long long, start )
- __field(unsigned long long, len )
+ __field(unsigned int, wreq)
+ __field(unsigned int, cookie)
+ __field(unsigned int, ino)
+ __field(enum netfs_write_trace, what)
+ __field(unsigned long long, start)
+ __field(unsigned long long, len)
),
TP_fast_assign(
@@ -522,10 +553,10 @@ TRACE_EVENT(netfs_collect,
TP_ARGS(wreq),
TP_STRUCT__entry(
- __field(unsigned int, wreq )
- __field(unsigned int, len )
- __field(unsigned long long, transferred )
- __field(unsigned long long, start )
+ __field(unsigned int, wreq)
+ __field(unsigned int, len)
+ __field(unsigned long long, transferred)
+ __field(unsigned long long, start)
),
TP_fast_assign(
@@ -548,12 +579,12 @@ TRACE_EVENT(netfs_collect_sreq,
TP_ARGS(wreq, subreq),
TP_STRUCT__entry(
- __field(unsigned int, wreq )
- __field(unsigned int, subreq )
- __field(unsigned int, stream )
- __field(unsigned int, len )
- __field(unsigned int, transferred )
- __field(unsigned long long, start )
+ __field(unsigned int, wreq)
+ __field(unsigned int, subreq)
+ __field(unsigned int, stream)
+ __field(unsigned int, len)
+ __field(unsigned int, transferred)
+ __field(unsigned long long, start)
),
TP_fast_assign(
@@ -579,11 +610,11 @@ TRACE_EVENT(netfs_collect_folio,
TP_ARGS(wreq, folio, fend, collected_to),
TP_STRUCT__entry(
- __field(unsigned int, wreq )
- __field(unsigned long, index )
- __field(unsigned long long, fend )
- __field(unsigned long long, cleaned_to )
- __field(unsigned long long, collected_to )
+ __field(unsigned int, wreq)
+ __field(unsigned long, index)
+ __field(unsigned long long, fend)
+ __field(unsigned long long, cleaned_to)
+ __field(unsigned long long, collected_to)
),
TP_fast_assign(
@@ -608,10 +639,10 @@ TRACE_EVENT(netfs_collect_state,
TP_ARGS(wreq, collected_to, notes),
TP_STRUCT__entry(
- __field(unsigned int, wreq )
- __field(unsigned int, notes )
- __field(unsigned long long, collected_to )
- __field(unsigned long long, cleaned_to )
+ __field(unsigned int, wreq)
+ __field(unsigned int, notes)
+ __field(unsigned long long, collected_to)
+ __field(unsigned long long, cleaned_to)
),
TP_fast_assign(
@@ -680,69 +711,27 @@ TRACE_EVENT(netfs_collect_stream,
__entry->collected_to, __entry->front)
);
-TRACE_EVENT(netfs_progress,
- TP_PROTO(const struct netfs_io_subrequest *subreq,
- unsigned long long start, size_t avail, size_t part),
+TRACE_EVENT(netfs_folioq,
+ TP_PROTO(const struct folio_queue *fq,
+ enum netfs_folioq_trace trace),
- TP_ARGS(subreq, start, avail, part),
+ TP_ARGS(fq, trace),
TP_STRUCT__entry(
__field(unsigned int, rreq)
- __field(unsigned int, subreq)
- __field(unsigned int, consumed)
- __field(unsigned int, transferred)
- __field(unsigned long long, f_start)
- __field(unsigned int, f_avail)
- __field(unsigned int, f_part)
- __field(unsigned char, slot)
+ __field(unsigned int, id)
+ __field(enum netfs_folioq_trace, trace)
),
TP_fast_assign(
- __entry->rreq = subreq->rreq->debug_id;
- __entry->subreq = subreq->debug_index;
- __entry->consumed = subreq->consumed;
- __entry->transferred = subreq->transferred;
- __entry->f_start = start;
- __entry->f_avail = avail;
- __entry->f_part = part;
- __entry->slot = subreq->curr_folioq_slot;
- ),
-
- TP_printk("R=%08x[%02x] s=%llx ct=%x/%x pa=%x/%x sl=%x",
- __entry->rreq, __entry->subreq, __entry->f_start,
- __entry->consumed, __entry->transferred,
- __entry->f_part, __entry->f_avail, __entry->slot)
- );
-
-TRACE_EVENT(netfs_donate,
- TP_PROTO(const struct netfs_io_request *rreq,
- const struct netfs_io_subrequest *from,
- const struct netfs_io_subrequest *to,
- size_t amount,
- enum netfs_donate_trace trace),
-
- TP_ARGS(rreq, from, to, amount, trace),
-
- TP_STRUCT__entry(
- __field(unsigned int, rreq)
- __field(unsigned int, from)
- __field(unsigned int, to)
- __field(unsigned int, amount)
- __field(enum netfs_donate_trace, trace)
- ),
-
- TP_fast_assign(
- __entry->rreq = rreq->debug_id;
- __entry->from = from->debug_index;
- __entry->to = to ? to->debug_index : -1;
- __entry->amount = amount;
+ __entry->rreq = fq ? fq->rreq_id : 0;
+ __entry->id = fq ? fq->debug_id : 0;
__entry->trace = trace;
),
- TP_printk("R=%08x[%02x] -> [%02x] %s am=%x",
- __entry->rreq, __entry->from, __entry->to,
- __print_symbolic(__entry->trace, netfs_donate_traces),
- __entry->amount)
+ TP_printk("R=%08x fq=%x %s",
+ __entry->rreq, __entry->id,
+ __print_symbolic(__entry->trace, netfs_folioq_traces))
);
#undef EM
diff --git a/include/uapi/linux/fiemap.h b/include/uapi/linux/fiemap.h
index 24ca0c00cae3..9d9e8ae32b41 100644
--- a/include/uapi/linux/fiemap.h
+++ b/include/uapi/linux/fiemap.h
@@ -14,37 +14,56 @@
#include <linux/types.h>
+/**
+ * struct fiemap_extent - description of one fiemap extent
+ * @fe_logical: byte offset of the extent in the file
+ * @fe_physical: byte offset of extent on disk
+ * @fe_length: length in bytes for this extent
+ * @fe_flags: FIEMAP_EXTENT_* flags for this extent
+ */
struct fiemap_extent {
- __u64 fe_logical; /* logical offset in bytes for the start of
- * the extent from the beginning of the file */
- __u64 fe_physical; /* physical offset in bytes for the start
- * of the extent from the beginning of the disk */
- __u64 fe_length; /* length in bytes for this extent */
+ __u64 fe_logical;
+ __u64 fe_physical;
+ __u64 fe_length;
+ /* private: */
__u64 fe_reserved64[2];
- __u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */
+ /* public: */
+ __u32 fe_flags;
+ /* private: */
__u32 fe_reserved[3];
};
+/**
+ * struct fiemap - file extent mappings
+ * @fm_start: byte offset (inclusive) at which to start mapping (in)
+ * @fm_length: logical length of mapping which userspace wants (in)
+ * @fm_flags: FIEMAP_FLAG_* flags for request (in/out)
+ * @fm_mapped_extents: number of extents that were mapped (out)
+ * @fm_extent_count: size of fm_extents array (in)
+ * @fm_extents: array of mapped extents (out)
+ */
struct fiemap {
- __u64 fm_start; /* logical offset (inclusive) at
- * which to start mapping (in) */
- __u64 fm_length; /* logical length of mapping which
- * userspace wants (in) */
- __u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */
- __u32 fm_mapped_extents;/* number of extents that were mapped (out) */
- __u32 fm_extent_count; /* size of fm_extents array (in) */
+ __u64 fm_start;
+ __u64 fm_length;
+ __u32 fm_flags;
+ __u32 fm_mapped_extents;
+ __u32 fm_extent_count;
+ /* private: */
__u32 fm_reserved;
- struct fiemap_extent fm_extents[]; /* array of mapped extents (out) */
+ /* public: */
+ struct fiemap_extent fm_extents[];
};
#define FIEMAP_MAX_OFFSET (~0ULL)
+/* flags used in fm_flags: */
#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */
#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute tree */
#define FIEMAP_FLAG_CACHE 0x00000004 /* request caching of the extents */
#define FIEMAP_FLAGS_COMPAT (FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR)
+/* flags used in fe_flags: */
#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */
#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */
#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending.
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 753971770733..56a4f93a08f4 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -332,9 +332,13 @@ typedef int __bitwise __kernel_rwf_t;
/* Atomic Write */
#define RWF_ATOMIC ((__force __kernel_rwf_t)0x00000040)
+/* buffered IO that drops the cache after reading or writing data */
+#define RWF_DONTCACHE ((__force __kernel_rwf_t)0x00000080)
+
/* mask of flags supported by the kernel */
#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
- RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC)
+ RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC |\
+ RWF_DONTCACHE)
#define PROCFS_IOCTL_MAGIC 'f'
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 887a25286441..f78ee3670dd5 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -98,43 +98,93 @@ struct statx_timestamp {
*/
struct statx {
/* 0x00 */
- __u32 stx_mask; /* What results were written [uncond] */
- __u32 stx_blksize; /* Preferred general I/O size [uncond] */
- __u64 stx_attributes; /* Flags conveying information about the file [uncond] */
+ /* What results were written [uncond] */
+ __u32 stx_mask;
+
+ /* Preferred general I/O size [uncond] */
+ __u32 stx_blksize;
+
+ /* Flags conveying information about the file [uncond] */
+ __u64 stx_attributes;
+
/* 0x10 */
- __u32 stx_nlink; /* Number of hard links */
- __u32 stx_uid; /* User ID of owner */
- __u32 stx_gid; /* Group ID of owner */
- __u16 stx_mode; /* File mode */
+ /* Number of hard links */
+ __u32 stx_nlink;
+
+ /* User ID of owner */
+ __u32 stx_uid;
+
+ /* Group ID of owner */
+ __u32 stx_gid;
+
+ /* File mode */
+ __u16 stx_mode;
__u16 __spare0[1];
+
/* 0x20 */
- __u64 stx_ino; /* Inode number */
- __u64 stx_size; /* File size */
- __u64 stx_blocks; /* Number of 512-byte blocks allocated */
- __u64 stx_attributes_mask; /* Mask to show what's supported in stx_attributes */
+ /* Inode number */
+ __u64 stx_ino;
+
+ /* File size */
+ __u64 stx_size;
+
+ /* Number of 512-byte blocks allocated */
+ __u64 stx_blocks;
+
+ /* Mask to show what's supported in stx_attributes */
+ __u64 stx_attributes_mask;
+
/* 0x40 */
- struct statx_timestamp stx_atime; /* Last access time */
- struct statx_timestamp stx_btime; /* File creation time */
- struct statx_timestamp stx_ctime; /* Last attribute change time */
- struct statx_timestamp stx_mtime; /* Last data modification time */
+ /* Last access time */
+ struct statx_timestamp stx_atime;
+
+ /* File creation time */
+ struct statx_timestamp stx_btime;
+
+ /* Last attribute change time */
+ struct statx_timestamp stx_ctime;
+
+ /* Last data modification time */
+ struct statx_timestamp stx_mtime;
+
/* 0x80 */
- __u32 stx_rdev_major; /* Device ID of special file [if bdev/cdev] */
+ /* Device ID of special file [if bdev/cdev] */
+ __u32 stx_rdev_major;
__u32 stx_rdev_minor;
- __u32 stx_dev_major; /* ID of device containing file [uncond] */
+
+ /* ID of device containing file [uncond] */
+ __u32 stx_dev_major;
__u32 stx_dev_minor;
+
/* 0x90 */
__u64 stx_mnt_id;
- __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
- __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
+
+ /* Memory buffer alignment for direct I/O */
+ __u32 stx_dio_mem_align;
+
+ /* File offset alignment for direct I/O */
+ __u32 stx_dio_offset_align;
+
/* 0xa0 */
- __u64 stx_subvol; /* Subvolume identifier */
- __u32 stx_atomic_write_unit_min; /* Min atomic write unit in bytes */
- __u32 stx_atomic_write_unit_max; /* Max atomic write unit in bytes */
+ /* Subvolume identifier */
+ __u64 stx_subvol;
+
+ /* Min atomic write unit in bytes */
+ __u32 stx_atomic_write_unit_min;
+
+ /* Max atomic write unit in bytes */
+ __u32 stx_atomic_write_unit_max;
+
/* 0xb0 */
- __u32 stx_atomic_write_segments_max; /* Max atomic write segment count */
- __u32 __spare1[1];
+ /* Max atomic write segment count */
+ __u32 stx_atomic_write_segments_max;
+
+ /* File offset alignment for direct I/O reads */
+ __u32 stx_dio_read_offset_align;
+
/* 0xb8 */
__u64 __spare3[9]; /* Spare space for future expansion */
+
/* 0x100 */
};
@@ -164,6 +214,7 @@ struct statx {
#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */
#define STATX_WRITE_ATOMIC 0x00010000U /* Want/got atomic_write_* fields */
+#define STATX_DIO_READ_ALIGN 0x00020000U /* Want/got dio read alignment info */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
diff --git a/io_uring/register.c b/io_uring/register.c
index fdd44914c39c..371aec87e078 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -405,8 +405,8 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
{
struct io_ring_ctx_rings o = { }, n = { }, *to_free = NULL;
size_t size, sq_array_offset;
+ unsigned i, tail, old_head;
struct io_uring_params p;
- unsigned i, tail;
void *ptr;
int ret;
@@ -449,10 +449,18 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
if (IS_ERR(n.rings))
return PTR_ERR(n.rings);
- n.rings->sq_ring_mask = p.sq_entries - 1;
- n.rings->cq_ring_mask = p.cq_entries - 1;
- n.rings->sq_ring_entries = p.sq_entries;
- n.rings->cq_ring_entries = p.cq_entries;
+ /*
+ * At this point n.rings is shared with userspace, just like o.rings
+ * is as well. While we don't expect userspace to modify it while
+ * a resize is in progress, and it's most likely that userspace will
+ * shoot itself in the foot if it does, we can't always assume good
+ * intent... Use read/write once helpers from here on to indicate the
+ * shared nature of it.
+ */
+ WRITE_ONCE(n.rings->sq_ring_mask, p.sq_entries - 1);
+ WRITE_ONCE(n.rings->cq_ring_mask, p.cq_entries - 1);
+ WRITE_ONCE(n.rings->sq_ring_entries, p.sq_entries);
+ WRITE_ONCE(n.rings->cq_ring_entries, p.cq_entries);
if (copy_to_user(arg, &p, sizeof(p))) {
io_register_free_rings(&p, &n);
@@ -509,20 +517,22 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
* rings can't hold what is already there, then fail the operation.
*/
n.sq_sqes = ptr;
- tail = o.rings->sq.tail;
- if (tail - o.rings->sq.head > p.sq_entries)
+ tail = READ_ONCE(o.rings->sq.tail);
+ old_head = READ_ONCE(o.rings->sq.head);
+ if (tail - old_head > p.sq_entries)
goto overflow;
- for (i = o.rings->sq.head; i < tail; i++) {
+ for (i = old_head; i < tail; i++) {
unsigned src_head = i & (ctx->sq_entries - 1);
- unsigned dst_head = i & n.rings->sq_ring_mask;
+ unsigned dst_head = i & (p.sq_entries - 1);
n.sq_sqes[dst_head] = o.sq_sqes[src_head];
}
- n.rings->sq.head = o.rings->sq.head;
- n.rings->sq.tail = o.rings->sq.tail;
+ WRITE_ONCE(n.rings->sq.head, READ_ONCE(o.rings->sq.head));
+ WRITE_ONCE(n.rings->sq.tail, READ_ONCE(o.rings->sq.tail));
- tail = o.rings->cq.tail;
- if (tail - o.rings->cq.head > p.cq_entries) {
+ tail = READ_ONCE(o.rings->cq.tail);
+ old_head = READ_ONCE(o.rings->cq.head);
+ if (tail - old_head > p.cq_entries) {
overflow:
/* restore old rings, and return -EOVERFLOW via cleanup path */
ctx->rings = o.rings;
@@ -531,21 +541,21 @@ overflow:
ret = -EOVERFLOW;
goto out;
}
- for (i = o.rings->cq.head; i < tail; i++) {
+ for (i = old_head; i < tail; i++) {
unsigned src_head = i & (ctx->cq_entries - 1);
- unsigned dst_head = i & n.rings->cq_ring_mask;
+ unsigned dst_head = i & (p.cq_entries - 1);
n.rings->cqes[dst_head] = o.rings->cqes[src_head];
}
- n.rings->cq.head = o.rings->cq.head;
- n.rings->cq.tail = o.rings->cq.tail;
+ WRITE_ONCE(n.rings->cq.head, READ_ONCE(o.rings->cq.head));
+ WRITE_ONCE(n.rings->cq.tail, READ_ONCE(o.rings->cq.tail));
/* invalidate cached cqe refill */
ctx->cqe_cached = ctx->cqe_sentinel = NULL;
- n.rings->sq_dropped = o.rings->sq_dropped;
- n.rings->sq_flags = o.rings->sq_flags;
- n.rings->cq_flags = o.rings->cq_flags;
- n.rings->cq_overflow = o.rings->cq_overflow;
+ WRITE_ONCE(n.rings->sq_dropped, READ_ONCE(o.rings->sq_dropped));
+ WRITE_ONCE(n.rings->sq_flags, READ_ONCE(o.rings->sq_flags));
+ WRITE_ONCE(n.rings->cq_flags, READ_ONCE(o.rings->cq_flags));
+ WRITE_ONCE(n.rings->cq_overflow, READ_ONCE(o.rings->cq_overflow));
/* all done, store old pointers and assign new ones */
if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 077f84684c18..69937d0c94f9 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -997,7 +997,7 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
dst_node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
if (!dst_node) {
ret = -ENOMEM;
- goto out_put_free;
+ goto out_unlock;
}
refcount_inc(&src_node->buf->refs);
@@ -1033,14 +1033,6 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
mutex_lock(&src_ctx->uring_lock);
/* someone raced setting up buffers, dump ours */
ret = -EBUSY;
-out_put_free:
- i = data.nr;
- while (i--) {
- if (data.nodes[i]) {
- io_buffer_unmap(src_ctx, data.nodes[i]);
- kfree(data.nodes[i]);
- }
- }
out_unlock:
io_rsrc_data_free(ctx, &data);
mutex_unlock(&src_ctx->uring_lock);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index b605334f8ee6..0509a9733745 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2179,7 +2179,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
},
[CPUHP_AP_HRTIMERS_DYING] = {
.name = "hrtimers:dying",
- .startup.single = NULL,
+ .startup.single = hrtimers_cpu_starting,
.teardown.single = hrtimers_cpu_dying,
},
[CPUHP_AP_TICK_DYING] = {
diff --git a/kernel/cred.c b/kernel/cred.c
index da7da250f7c8..9676965c0981 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -477,56 +477,6 @@ void abort_creds(struct cred *new)
EXPORT_SYMBOL(abort_creds);
/**
- * override_creds - Override the current process's subjective credentials
- * @new: The credentials to be assigned
- *
- * Install a set of temporary override subjective credentials on the current
- * process, returning the old set for later reversion.
- */
-const struct cred *override_creds(const struct cred *new)
-{
- const struct cred *old;
-
- kdebug("override_creds(%p{%ld})", new,
- atomic_long_read(&new->usage));
-
- /*
- * NOTE! This uses 'get_new_cred()' rather than 'get_cred()'.
- *
- * That means that we do not clear the 'non_rcu' flag, since
- * we are only installing the cred into the thread-synchronous
- * '->cred' pointer, not the '->real_cred' pointer that is
- * visible to other threads under RCU.
- */
- get_new_cred((struct cred *)new);
- old = override_creds_light(new);
-
- kdebug("override_creds() = %p{%ld}", old,
- atomic_long_read(&old->usage));
- return old;
-}
-EXPORT_SYMBOL(override_creds);
-
-/**
- * revert_creds - Revert a temporary subjective credentials override
- * @old: The credentials to be restored
- *
- * Revert a temporary set of override subjective credentials to an old set,
- * discarding the override set.
- */
-void revert_creds(const struct cred *old)
-{
- const struct cred *override = current->cred;
-
- kdebug("revert_creds(%p{%ld})", old,
- atomic_long_read(&old->usage));
-
- revert_creds_light(old);
- put_cred(override);
-}
-EXPORT_SYMBOL(revert_creds);
-
-/**
* cred_fscmp - Compare two credentials with respect to filesystem access.
* @a: The first credential
* @b: The second credential
diff --git a/kernel/pid.c b/kernel/pid.c
index 115448e89c3e..3a10a7b6fcf8 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -43,6 +43,7 @@
#include <linux/sched/task.h>
#include <linux/idr.h>
#include <linux/pidfs.h>
+#include <linux/seqlock.h>
#include <net/sock.h>
#include <uapi/linux/pidfd.h>
@@ -60,15 +61,8 @@ struct pid init_struct_pid = {
}, }
};
-int pid_max = PID_MAX_DEFAULT;
-
-int pid_max_min = RESERVED_PIDS + 1;
-int pid_max_max = PID_MAX_LIMIT;
-/*
- * Pseudo filesystems start inode numbering after one. We use Reserved
- * PIDs as a natural offset.
- */
-static u64 pidfs_ino = RESERVED_PIDS;
+static int pid_max_min = RESERVED_PIDS + 1;
+static int pid_max_max = PID_MAX_LIMIT;
/*
* PID-map pages start out as NULL, they get allocated upon
@@ -87,6 +81,7 @@ struct pid_namespace init_pid_ns = {
#ifdef CONFIG_PID_NS
.ns.ops = &pidns_operations,
#endif
+ .pid_max = PID_MAX_DEFAULT,
#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
.memfd_noexec_scope = MEMFD_NOEXEC_SCOPE_EXEC,
#endif
@@ -108,6 +103,7 @@ EXPORT_SYMBOL_GPL(init_pid_ns);
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
+seqcount_spinlock_t pidmap_lock_seq = SEQCNT_SPINLOCK_ZERO(pidmap_lock_seq, &pidmap_lock);
void put_pid(struct pid *pid)
{
@@ -158,6 +154,7 @@ void free_pid(struct pid *pid)
idr_remove(&ns->idr, upid->nr);
}
+ pidfs_remove_pid(pid);
spin_unlock_irqrestore(&pidmap_lock, flags);
call_rcu(&pid->rcu, delayed_put_pid);
@@ -193,6 +190,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
for (i = ns->level; i >= 0; i--) {
int tid = 0;
+ int pid_max = READ_ONCE(tmp->pid_max);
if (set_tid_size) {
tid = set_tid[ns->level - i];
@@ -273,22 +271,24 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
INIT_HLIST_HEAD(&pid->inodes);
upid = pid->numbers + ns->level;
+ idr_preload(GFP_KERNEL);
spin_lock_irq(&pidmap_lock);
if (!(ns->pid_allocated & PIDNS_ADDING))
goto out_unlock;
- pid->stashed = NULL;
- pid->ino = ++pidfs_ino;
+ pidfs_add_pid(pid);
for ( ; upid >= pid->numbers; --upid) {
/* Make the PID visible to find_pid_ns. */
idr_replace(&upid->ns->idr, pid, upid->nr);
upid->ns->pid_allocated++;
}
spin_unlock_irq(&pidmap_lock);
+ idr_preload_end();
return pid;
out_unlock:
spin_unlock_irq(&pidmap_lock);
+ idr_preload_end();
put_pid_ns(ns);
out_free:
@@ -644,17 +644,118 @@ SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
return fd;
}
+#ifdef CONFIG_SYSCTL
+static struct ctl_table_set *pid_table_root_lookup(struct ctl_table_root *root)
+{
+ return &task_active_pid_ns(current)->set;
+}
+
+static int set_is_seen(struct ctl_table_set *set)
+{
+ return &task_active_pid_ns(current)->set == set;
+}
+
+static int pid_table_root_permissions(struct ctl_table_header *head,
+ const struct ctl_table *table)
+{
+ struct pid_namespace *pidns =
+ container_of(head->set, struct pid_namespace, set);
+ int mode = table->mode;
+
+ if (ns_capable(pidns->user_ns, CAP_SYS_ADMIN) ||
+ uid_eq(current_euid(), make_kuid(pidns->user_ns, 0)))
+ mode = (mode & S_IRWXU) >> 6;
+ else if (in_egroup_p(make_kgid(pidns->user_ns, 0)))
+ mode = (mode & S_IRWXG) >> 3;
+ else
+ mode = mode & S_IROTH;
+ return (mode << 6) | (mode << 3) | mode;
+}
+
+static void pid_table_root_set_ownership(struct ctl_table_header *head,
+ kuid_t *uid, kgid_t *gid)
+{
+ struct pid_namespace *pidns =
+ container_of(head->set, struct pid_namespace, set);
+ kuid_t ns_root_uid;
+ kgid_t ns_root_gid;
+
+ ns_root_uid = make_kuid(pidns->user_ns, 0);
+ if (uid_valid(ns_root_uid))
+ *uid = ns_root_uid;
+
+ ns_root_gid = make_kgid(pidns->user_ns, 0);
+ if (gid_valid(ns_root_gid))
+ *gid = ns_root_gid;
+}
+
+static struct ctl_table_root pid_table_root = {
+ .lookup = pid_table_root_lookup,
+ .permissions = pid_table_root_permissions,
+ .set_ownership = pid_table_root_set_ownership,
+};
+
+static struct ctl_table pid_table[] = {
+ {
+ .procname = "pid_max",
+ .data = &init_pid_ns.pid_max,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &pid_max_min,
+ .extra2 = &pid_max_max,
+ },
+};
+#endif
+
+int register_pidns_sysctls(struct pid_namespace *pidns)
+{
+#ifdef CONFIG_SYSCTL
+ struct ctl_table *tbl;
+
+ setup_sysctl_set(&pidns->set, &pid_table_root, set_is_seen);
+
+ tbl = kmemdup(pid_table, sizeof(pid_table), GFP_KERNEL);
+ if (!tbl)
+ return -ENOMEM;
+ tbl->data = &pidns->pid_max;
+ pidns->pid_max = min(pid_max_max, max_t(int, pidns->pid_max,
+ PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
+
+ pidns->sysctls = __register_sysctl_table(&pidns->set, "kernel", tbl,
+ ARRAY_SIZE(pid_table));
+ if (!pidns->sysctls) {
+ kfree(tbl);
+ retire_sysctl_set(&pidns->set);
+ return -ENOMEM;
+ }
+#endif
+ return 0;
+}
+
+void unregister_pidns_sysctls(struct pid_namespace *pidns)
+{
+#ifdef CONFIG_SYSCTL
+ const struct ctl_table *tbl;
+
+ tbl = pidns->sysctls->ctl_table_arg;
+ unregister_sysctl_table(pidns->sysctls);
+ retire_sysctl_set(&pidns->set);
+ kfree(tbl);
+#endif
+}
+
void __init pid_idr_init(void)
{
/* Verify no one has done anything silly: */
BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING);
/* bump default and minimum pid_max based on number of cpus */
- pid_max = min(pid_max_max, max_t(int, pid_max,
- PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
+ init_pid_ns.pid_max = min(pid_max_max, max_t(int, init_pid_ns.pid_max,
+ PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
pid_max_min = max_t(int, pid_max_min,
PIDS_PER_CPU_MIN * num_possible_cpus());
- pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
+ pr_info("pid_max: default: %u minimum: %u\n", init_pid_ns.pid_max, pid_max_min);
idr_init(&init_pid_ns.idr);
@@ -665,6 +766,16 @@ void __init pid_idr_init(void)
NULL);
}
+static __init int pid_namespace_sysctl_init(void)
+{
+#ifdef CONFIG_SYSCTL
+ /* "kernel" directory will have already been initialized. */
+ BUG_ON(register_pidns_sysctls(&init_pid_ns));
+#endif
+ return 0;
+}
+subsys_initcall(pid_namespace_sysctl_init);
+
static struct file *__pidfd_fget(struct task_struct *task, int fd)
{
struct file *file;
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index d70ab49d5b4a..f1ffa032fc32 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -70,6 +70,8 @@ static void dec_pid_namespaces(struct ucounts *ucounts)
dec_ucount(ucounts, UCOUNT_PID_NAMESPACES);
}
+static void destroy_pid_namespace_work(struct work_struct *work);
+
static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns,
struct pid_namespace *parent_pid_ns)
{
@@ -105,17 +107,27 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns
goto out_free_idr;
ns->ns.ops = &pidns_operations;
+ ns->pid_max = parent_pid_ns->pid_max;
+ err = register_pidns_sysctls(ns);
+ if (err)
+ goto out_free_inum;
+
refcount_set(&ns->ns.count, 1);
ns->level = level;
ns->parent = get_pid_ns(parent_pid_ns);
ns->user_ns = get_user_ns(user_ns);
ns->ucounts = ucounts;
ns->pid_allocated = PIDNS_ADDING;
+ INIT_WORK(&ns->work, destroy_pid_namespace_work);
+
#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
ns->memfd_noexec_scope = pidns_memfd_noexec_scope(parent_pid_ns);
#endif
+
return ns;
+out_free_inum:
+ ns_free_inum(&ns->ns);
out_free_idr:
idr_destroy(&ns->idr);
kmem_cache_free(pid_ns_cachep, ns);
@@ -137,12 +149,28 @@ static void delayed_free_pidns(struct rcu_head *p)
static void destroy_pid_namespace(struct pid_namespace *ns)
{
+ unregister_pidns_sysctls(ns);
+
ns_free_inum(&ns->ns);
idr_destroy(&ns->idr);
call_rcu(&ns->rcu, delayed_free_pidns);
}
+static void destroy_pid_namespace_work(struct work_struct *work)
+{
+ struct pid_namespace *ns =
+ container_of(work, struct pid_namespace, work);
+
+ do {
+ struct pid_namespace *parent;
+
+ parent = ns->parent;
+ destroy_pid_namespace(ns);
+ ns = parent;
+ } while (ns != &init_pid_ns && refcount_dec_and_test(&ns->ns.count));
+}
+
struct pid_namespace *copy_pid_ns(unsigned long flags,
struct user_namespace *user_ns, struct pid_namespace *old_ns)
{
@@ -155,15 +183,8 @@ struct pid_namespace *copy_pid_ns(unsigned long flags,
void put_pid_ns(struct pid_namespace *ns)
{
- struct pid_namespace *parent;
-
- while (ns != &init_pid_ns) {
- parent = ns->parent;
- if (!refcount_dec_and_test(&ns->ns.count))
- break;
- destroy_pid_namespace(ns);
- ns = parent;
- }
+ if (ns && ns != &init_pid_ns && refcount_dec_and_test(&ns->ns.count))
+ schedule_work(&ns->work);
}
EXPORT_SYMBOL_GPL(put_pid_ns);
@@ -274,6 +295,7 @@ static int pid_ns_ctl_handler(const struct ctl_table *table, int write,
next = idr_get_cursor(&pid_ns->idr) - 1;
tmp.data = &next;
+ tmp.extra2 = &pid_ns->pid_max;
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (!ret && write)
idr_set_cursor(&pid_ns->idr, next + 1);
@@ -281,7 +303,6 @@ static int pid_ns_ctl_handler(const struct ctl_table *table, int write,
return ret;
}
-extern int pid_max;
static struct ctl_table pid_ns_ctl_table[] = {
{
.procname = "ns_last_pid",
@@ -289,7 +310,7 @@ static struct ctl_table pid_ns_ctl_table[] = {
.mode = 0666, /* permissions are checked in the handler */
.proc_handler = pid_ns_ctl_handler,
.extra1 = SYSCTL_ZERO,
- .extra2 = &pid_max,
+ .extra2 = &init_pid_ns.pid_max,
},
};
#endif /* CONFIG_CHECKPOINT_RESTORE */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3e9ca38512de..26958431deb7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -689,21 +689,16 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
*
* XXX could add max_slice to the augmented data to track this.
*/
-static s64 entity_lag(u64 avruntime, struct sched_entity *se)
+static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
s64 vlag, limit;
- vlag = avruntime - se->vruntime;
- limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
-
- return clamp(vlag, -limit, limit);
-}
-
-static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
SCHED_WARN_ON(!se->on_rq);
- se->vlag = entity_lag(avg_vruntime(cfs_rq), se);
+ vlag = avg_vruntime(cfs_rq) - se->vruntime;
+ limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
+
+ se->vlag = clamp(vlag, -limit, limit);
}
/*
@@ -3774,137 +3769,32 @@ static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
#endif
-static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
- unsigned long weight)
-{
- unsigned long old_weight = se->load.weight;
- s64 vlag, vslice;
-
- /*
- * VRUNTIME
- * --------
- *
- * COROLLARY #1: The virtual runtime of the entity needs to be
- * adjusted if re-weight at !0-lag point.
- *
- * Proof: For contradiction assume this is not true, so we can
- * re-weight without changing vruntime at !0-lag point.
- *
- * Weight VRuntime Avg-VRuntime
- * before w v V
- * after w' v' V'
- *
- * Since lag needs to be preserved through re-weight:
- *
- * lag = (V - v)*w = (V'- v')*w', where v = v'
- * ==> V' = (V - v)*w/w' + v (1)
- *
- * Let W be the total weight of the entities before reweight,
- * since V' is the new weighted average of entities:
- *
- * V' = (WV + w'v - wv) / (W + w' - w) (2)
- *
- * by using (1) & (2) we obtain:
- *
- * (WV + w'v - wv) / (W + w' - w) = (V - v)*w/w' + v
- * ==> (WV-Wv+Wv+w'v-wv)/(W+w'-w) = (V - v)*w/w' + v
- * ==> (WV - Wv)/(W + w' - w) + v = (V - v)*w/w' + v
- * ==> (V - v)*W/(W + w' - w) = (V - v)*w/w' (3)
- *
- * Since we are doing at !0-lag point which means V != v, we
- * can simplify (3):
- *
- * ==> W / (W + w' - w) = w / w'
- * ==> Ww' = Ww + ww' - ww
- * ==> W * (w' - w) = w * (w' - w)
- * ==> W = w (re-weight indicates w' != w)
- *
- * So the cfs_rq contains only one entity, hence vruntime of
- * the entity @v should always equal to the cfs_rq's weighted
- * average vruntime @V, which means we will always re-weight
- * at 0-lag point, thus breach assumption. Proof completed.
- *
- *
- * COROLLARY #2: Re-weight does NOT affect weighted average
- * vruntime of all the entities.
- *
- * Proof: According to corollary #1, Eq. (1) should be:
- *
- * (V - v)*w = (V' - v')*w'
- * ==> v' = V' - (V - v)*w/w' (4)
- *
- * According to the weighted average formula, we have:
- *
- * V' = (WV - wv + w'v') / (W - w + w')
- * = (WV - wv + w'(V' - (V - v)w/w')) / (W - w + w')
- * = (WV - wv + w'V' - Vw + wv) / (W - w + w')
- * = (WV + w'V' - Vw) / (W - w + w')
- *
- * ==> V'*(W - w + w') = WV + w'V' - Vw
- * ==> V' * (W - w) = (W - w) * V (5)
- *
- * If the entity is the only one in the cfs_rq, then reweight
- * always occurs at 0-lag point, so V won't change. Or else
- * there are other entities, hence W != w, then Eq. (5) turns
- * into V' = V. So V won't change in either case, proof done.
- *
- *
- * So according to corollary #1 & #2, the effect of re-weight
- * on vruntime should be:
- *
- * v' = V' - (V - v) * w / w' (4)
- * = V - (V - v) * w / w'
- * = V - vl * w / w'
- * = V - vl'
- */
- if (avruntime != se->vruntime) {
- vlag = entity_lag(avruntime, se);
- vlag = div_s64(vlag * old_weight, weight);
- se->vruntime = avruntime - vlag;
- }
-
- /*
- * DEADLINE
- * --------
- *
- * When the weight changes, the virtual time slope changes and
- * we should adjust the relative virtual deadline accordingly.
- *
- * d' = v' + (d - v)*w/w'
- * = V' - (V - v)*w/w' + (d - v)*w/w'
- * = V - (V - v)*w/w' + (d - v)*w/w'
- * = V + (d - V)*w/w'
- */
- vslice = (s64)(se->deadline - avruntime);
- vslice = div_s64(vslice * old_weight, weight);
- se->deadline = avruntime + vslice;
-}
+static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags);
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight)
{
bool curr = cfs_rq->curr == se;
- u64 avruntime;
if (se->on_rq) {
/* commit outstanding execution time */
update_curr(cfs_rq);
- avruntime = avg_vruntime(cfs_rq);
+ update_entity_lag(cfs_rq, se);
+ se->deadline -= se->vruntime;
+ se->rel_deadline = 1;
if (!curr)
__dequeue_entity(cfs_rq, se);
update_load_sub(&cfs_rq->load, se->load.weight);
}
dequeue_load_avg(cfs_rq, se);
- if (se->on_rq) {
- reweight_eevdf(se, avruntime, weight);
- } else {
- /*
- * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
- * we need to scale se->vlag when w_i changes.
- */
- se->vlag = div_s64(se->vlag * se->load.weight, weight);
- }
+ /*
+ * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
+ * we need to scale se->vlag when w_i changes.
+ */
+ se->vlag = div_s64(se->vlag * se->load.weight, weight);
+ if (se->rel_deadline)
+ se->deadline = div_s64(se->deadline * se->load.weight, weight);
update_load_set(&se->load, weight);
@@ -3919,6 +3809,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
enqueue_load_avg(cfs_rq, se);
if (se->on_rq) {
update_load_add(&cfs_rq->load, se->load.weight);
+ place_entity(cfs_rq, se, 0);
if (!curr)
__enqueue_entity(cfs_rq, se);
@@ -4065,7 +3956,11 @@ static void update_cfs_group(struct sched_entity *se)
struct cfs_rq *gcfs_rq = group_cfs_rq(se);
long shares;
- if (!gcfs_rq)
+ /*
+ * When a group becomes empty, preserve its weight. This matters for
+ * DELAY_DEQUEUE.
+ */
+ if (!gcfs_rq || !gcfs_rq->load.weight)
return;
if (throttled_hierarchy(gcfs_rq))
@@ -5359,7 +5254,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se->vruntime = vruntime - lag;
- if (sched_feat(PLACE_REL_DEADLINE) && se->rel_deadline) {
+ if (se->rel_deadline) {
se->deadline += se->vruntime;
se->rel_deadline = 0;
return;
diff --git a/kernel/signal.c b/kernel/signal.c
index 989b1cc9116a..a2afd54303f0 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2007,11 +2007,22 @@ void posixtimer_send_sigqueue(struct k_itimer *tmr)
if (!list_empty(&q->list)) {
/*
- * If task group is exiting with the signal already pending,
- * wait for __exit_signal() to do its job. Otherwise if
- * ignored, it's not supposed to be queued. Try to survive.
+ * The signal was ignored and blocked. The timer
+ * expiry queued it because blocked signals are
+ * queued independent of the ignored state.
+ *
+ * The unblocking set SIGPENDING, but the signal
+ * was not yet dequeued from the pending list.
+ * So prepare_signal() sees unblocked and ignored,
+ * which ends up here. Leave it queued like a
+ * regular signal.
+ *
+ * The same happens when the task group is exiting
+ * and the signal is already queued.
+ * prepare_signal() treats SIGNAL_GROUP_EXIT as
+ * ignored independent of its queued state. This
+ * gets cleaned up in __exit_signal().
*/
- WARN_ON_ONCE(!(t->signal->flags & SIGNAL_GROUP_EXIT));
goto out;
}
@@ -2046,17 +2057,25 @@ void posixtimer_send_sigqueue(struct k_itimer *tmr)
goto out;
}
- /* This should never happen and leaks a reference count */
- if (WARN_ON_ONCE(!hlist_unhashed(&tmr->ignored_list)))
- hlist_del_init(&tmr->ignored_list);
-
if (unlikely(!list_empty(&q->list))) {
/* This holds a reference count already */
result = TRACE_SIGNAL_ALREADY_PENDING;
goto out;
}
- posixtimer_sigqueue_getref(q);
+ /*
+ * If the signal is on the ignore list, it got blocked after it was
+ * ignored earlier. But nothing lifted the ignore. Move it back to
+ * the pending list to be consistent with the regular signal
+ * handling. This already holds a reference count.
+ *
+ * If it's not on the ignore list acquire a reference count.
+ */
+ if (likely(hlist_unhashed(&tmr->ignored_list)))
+ posixtimer_sigqueue_getref(q);
+ else
+ hlist_del_init(&tmr->ignored_list);
+
posixtimer_queue_sigqueue(q, t, tmr->it_pid_type);
result = TRACE_SIGNAL_DELIVERED;
out:
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 5c9202cb8f59..7ae7a4136855 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1804,15 +1804,6 @@ static struct ctl_table kern_table[] = {
},
#endif
{
- .procname = "pid_max",
- .data = &pid_max,
- .maxlen = sizeof (int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &pid_max_min,
- .extra2 = &pid_max_max,
- },
- {
.procname = "panic_on_oops",
.data = &panic_on_oops,
.maxlen = sizeof(int),
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 80fe3749d2db..030426c8c944 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -2202,6 +2202,15 @@ int hrtimers_prepare_cpu(unsigned int cpu)
}
cpu_base->cpu = cpu;
+ hrtimer_cpu_base_init_expiry_lock(cpu_base);
+ return 0;
+}
+
+int hrtimers_cpu_starting(unsigned int cpu)
+{
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+
+ /* Clear out any left over state from a CPU down operation */
cpu_base->active_bases = 0;
cpu_base->hres_active = 0;
cpu_base->hang_detected = 0;
@@ -2210,7 +2219,6 @@ int hrtimers_prepare_cpu(unsigned int cpu)
cpu_base->expires_next = KTIME_MAX;
cpu_base->softirq_expires_next = KTIME_MAX;
cpu_base->online = 1;
- hrtimer_cpu_base_init_expiry_lock(cpu_base);
return 0;
}
@@ -2286,5 +2294,6 @@ int hrtimers_cpu_dying(unsigned int dying_cpu)
void __init hrtimers_init(void)
{
hrtimers_prepare_cpu(smp_processor_id());
+ hrtimers_cpu_starting(smp_processor_id());
open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
}
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 8d57f7686bb0..066c9ddca4ec 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -534,8 +534,13 @@ static void __walk_groups(up_f up, struct tmigr_walk *data,
break;
child = group;
- group = group->parent;
+ /*
+ * Pairs with the store release on group connection
+ * to make sure group initialization is visible.
+ */
+ group = READ_ONCE(group->parent);
data->childmask = child->groupmask;
+ WARN_ON_ONCE(!data->childmask);
} while (group);
}
@@ -564,7 +569,7 @@ static struct tmigr_event *tmigr_next_groupevt(struct tmigr_group *group)
while ((node = timerqueue_getnext(&group->events))) {
evt = container_of(node, struct tmigr_event, nextevt);
- if (!evt->ignore) {
+ if (!READ_ONCE(evt->ignore)) {
WRITE_ONCE(group->next_expiry, evt->nextevt.expires);
return evt;
}
@@ -660,7 +665,7 @@ static bool tmigr_active_up(struct tmigr_group *group,
* lock is held while updating the ignore flag in idle path. So this
* state change will not be lost.
*/
- group->groupevt.ignore = true;
+ WRITE_ONCE(group->groupevt.ignore, true);
return walk_done;
}
@@ -721,6 +726,7 @@ bool tmigr_update_events(struct tmigr_group *group, struct tmigr_group *child,
union tmigr_state childstate, groupstate;
bool remote = data->remote;
bool walk_done = false;
+ bool ignore;
u64 nextexp;
if (child) {
@@ -739,11 +745,19 @@ bool tmigr_update_events(struct tmigr_group *group, struct tmigr_group *child,
nextexp = child->next_expiry;
evt = &child->groupevt;
- evt->ignore = (nextexp == KTIME_MAX) ? true : false;
+ /*
+ * This can race with concurrent idle exit (activate).
+ * If the current writer wins, a useless remote expiration may
+ * be scheduled. If the activate wins, the event is properly
+ * ignored.
+ */
+ ignore = (nextexp == KTIME_MAX) ? true : false;
+ WRITE_ONCE(evt->ignore, ignore);
} else {
nextexp = data->nextexp;
first_childevt = evt = data->evt;
+ ignore = evt->ignore;
/*
* Walking the hierarchy is required in any case when a
@@ -769,7 +783,7 @@ bool tmigr_update_events(struct tmigr_group *group, struct tmigr_group *child,
* first event information of the group is updated properly and
* also handled properly, so skip this fast return path.
*/
- if (evt->ignore && !remote && group->parent)
+ if (ignore && !remote && group->parent)
return true;
raw_spin_lock(&group->lock);
@@ -783,7 +797,7 @@ bool tmigr_update_events(struct tmigr_group *group, struct tmigr_group *child,
* queue when the expiry time changed only or when it could be ignored.
*/
if (timerqueue_node_queued(&evt->nextevt)) {
- if ((evt->nextevt.expires == nextexp) && !evt->ignore) {
+ if ((evt->nextevt.expires == nextexp) && !ignore) {
/* Make sure not to miss a new CPU event with the same expiry */
evt->cpu = first_childevt->cpu;
goto check_toplvl;
@@ -793,7 +807,7 @@ bool tmigr_update_events(struct tmigr_group *group, struct tmigr_group *child,
WRITE_ONCE(group->next_expiry, KTIME_MAX);
}
- if (evt->ignore) {
+ if (ignore) {
/*
* When the next child event could be ignored (nextexp is
* KTIME_MAX) and there was no remote timer handling before or
@@ -1487,6 +1501,21 @@ static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
s.seq = 0;
atomic_set(&group->migr_state, s.state);
+ /*
+ * If this is a new top-level, prepare its groupmask in advance.
+ * This avoids accidents where yet another new top-level is
+ * created in the future and made visible before the current groupmask.
+ */
+ if (list_empty(&tmigr_level_list[lvl])) {
+ group->groupmask = BIT(0);
+ /*
+ * The previous top level has prepared its groupmask already,
+ * simply account it as the first child.
+ */
+ if (lvl > 0)
+ group->num_children = 1;
+ }
+
timerqueue_init_head(&group->events);
timerqueue_init(&group->groupevt.nextevt);
group->groupevt.nextevt.expires = KTIME_MAX;
@@ -1550,8 +1579,25 @@ static void tmigr_connect_child_parent(struct tmigr_group *child,
raw_spin_lock_irq(&child->lock);
raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
- child->parent = parent;
- child->groupmask = BIT(parent->num_children++);
+ if (activate) {
+ /*
+ * @child is the old top and @parent the new one. In this
+ * case groupmask is pre-initialized and @child already
+ * accounted, along with its new sibling corresponding to the
+ * CPU going up.
+ */
+ WARN_ON_ONCE(child->groupmask != BIT(0) || parent->num_children != 2);
+ } else {
+ /* Adding @child for the CPU going up to @parent. */
+ child->groupmask = BIT(parent->num_children++);
+ }
+
+ /*
+ * Make sure parent initialization is visible before publishing it to a
+ * racing CPU entering/exiting idle. This RELEASE barrier enforces an
+ * address dependency that pairs with the READ_ONCE() in __walk_groups().
+ */
+ smp_store_release(&child->parent, parent);
raw_spin_unlock(&parent->lock);
raw_spin_unlock_irq(&child->lock);
diff --git a/kernel/trace/pid_list.c b/kernel/trace/pid_list.c
index 4966e6bbdf6f..c62b9b3cfb3d 100644
--- a/kernel/trace/pid_list.c
+++ b/kernel/trace/pid_list.c
@@ -414,7 +414,7 @@ struct trace_pid_list *trace_pid_list_alloc(void)
int i;
/* According to linux/thread.h, pids can be no bigger that 30 bits */
- WARN_ON_ONCE(pid_max > (1 << 30));
+ WARN_ON_ONCE(init_pid_ns.pid_max > (1 << 30));
pid_list = kzalloc(sizeof(*pid_list), GFP_KERNEL);
if (!pid_list)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f8aebcb01e62..b6e40e8791fa 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4122,6 +4122,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
preempt_model_none() ? "server" :
preempt_model_voluntary() ? "desktop" :
preempt_model_full() ? "preempt" :
+ preempt_model_lazy() ? "lazy" :
preempt_model_rt() ? "preempt_rt" :
"unknown",
/* These are reserved for later use */
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 9691b47b5f3d..179676db622e 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -717,8 +717,6 @@ extern unsigned long tracing_thresh;
/* PID filtering */
-extern int pid_max;
-
bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
pid_t search_pid);
bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index fce064e20570..a4e799c1e767 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -182,6 +182,7 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
struct trace_array_cpu *data;
unsigned long flags;
unsigned int trace_ctx;
+ u64 *calltime;
int ret;
if (ftrace_graph_ignore_func(gops, trace))
@@ -199,6 +200,12 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
if (!func_prolog_dec(tr, &data, &flags))
return 0;
+ calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
+ if (!calltime)
+ return 0;
+
+ *calltime = trace_clock_local();
+
trace_ctx = tracing_gen_ctx_flags(flags);
ret = __trace_graph_entry(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
@@ -213,12 +220,19 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace,
struct trace_array_cpu *data;
unsigned long flags;
unsigned int trace_ctx;
+ u64 *calltime;
+ int size;
ftrace_graph_addr_finish(gops, trace);
if (!func_prolog_dec(tr, &data, &flags))
return;
+ calltime = fgraph_retrieve_data(gops->idx, &size);
+ if (!calltime)
+ return;
+ trace->calltime = *calltime;
+
trace_ctx = tracing_gen_ctx_flags(flags);
__trace_graph_return(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 573b5d8e8a28..cb49f7279dc8 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -442,7 +442,7 @@ int trace_alloc_tgid_map(void)
if (tgid_map)
return 0;
- tgid_map_max = pid_max;
+ tgid_map_max = init_pid_ns.pid_max;
map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
GFP_KERNEL);
if (!map)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index d6c7f18daa15..c58292e424d5 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -118,6 +118,7 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace,
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
unsigned int trace_ctx;
+ u64 *calltime;
int ret = 0;
if (ftrace_graph_ignore_func(gops, trace))
@@ -135,6 +136,12 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace,
if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return 0;
+ calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
+ if (!calltime)
+ return 0;
+
+ *calltime = trace_clock_local();
+
ret = __trace_graph_entry(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
preempt_enable_notrace();
@@ -148,12 +155,19 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace,
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
unsigned int trace_ctx;
+ u64 *calltime;
+ int size;
ftrace_graph_addr_finish(gops, trace);
if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return;
+ calltime = fgraph_retrieve_data(gops->idx, &size);
+ if (!calltime)
+ return;
+ trace->calltime = *calltime;
+
__trace_graph_return(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
index 1895fbc32bcb..5267adeaa403 100644
--- a/kernel/watch_queue.c
+++ b/kernel/watch_queue.c
@@ -71,7 +71,7 @@ static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
bit /= WATCH_QUEUE_NOTE_SIZE;
page = buf->page;
- bit += page->index;
+ bit += page->private;
set_bit(bit, wqueue->notes_bitmap);
generic_pipe_buf_release(pipe, buf);
@@ -278,7 +278,7 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
pages[i] = alloc_page(GFP_KERNEL);
if (!pages[i])
goto error_p;
- pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE;
+ pages[i]->private = i * WATCH_QUEUE_NOTES_PER_PAGE;
}
bitmap = bitmap_alloc(nr_notes, GFP_KERNEL);
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 7dcebf118a3e..65e706e1bc19 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -195,6 +195,9 @@ void pgalloc_tag_swap(struct folio *new, struct folio *old)
union codetag_ref ref_old, ref_new;
struct alloc_tag *tag_old, *tag_new;
+ if (!mem_alloc_profiling_enabled())
+ return;
+
tag_old = pgalloc_tag_get(&old->page);
if (!tag_old)
return;
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
index 13e15687675a..10a560feb66e 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/kunit_iov_iter.c
@@ -392,7 +392,7 @@ static void __init iov_kunit_load_folioq(struct kunit *test,
if (folioq_full(p)) {
p->next = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p->next);
- folioq_init(p->next);
+ folioq_init(p->next, 0);
p->next->prev = p;
p = p->next;
}
@@ -409,7 +409,7 @@ static struct folio_queue *iov_kunit_create_folioq(struct kunit *test)
folioq = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, folioq);
kunit_add_action_or_reset(test, iov_kunit_destroy_folioq, folioq);
- folioq_init(folioq);
+ folioq_init(folioq, 0);
return folioq;
}
diff --git a/lib/lockref.c b/lib/lockref.c
index 2afe4c5d8919..5d8e3ef3860e 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -58,23 +58,22 @@ EXPORT_SYMBOL(lockref_get);
* @lockref: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if count was zero
*/
-int lockref_get_not_zero(struct lockref *lockref)
+bool lockref_get_not_zero(struct lockref *lockref)
{
- int retval;
+ bool retval = false;
CMPXCHG_LOOP(
new.count++;
if (old.count <= 0)
- return 0;
+ return false;
,
- return 1;
+ return true;
);
spin_lock(&lockref->lock);
- retval = 0;
if (lockref->count > 0) {
lockref->count++;
- retval = 1;
+ retval = true;
}
spin_unlock(&lockref->lock);
return retval;
@@ -82,39 +81,11 @@ int lockref_get_not_zero(struct lockref *lockref)
EXPORT_SYMBOL(lockref_get_not_zero);
/**
- * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
- * @lockref: pointer to lockref structure
- * Return: 1 if count updated successfully or 0 if count would become zero
- */
-int lockref_put_not_zero(struct lockref *lockref)
-{
- int retval;
-
- CMPXCHG_LOOP(
- new.count--;
- if (old.count <= 1)
- return 0;
- ,
- return 1;
- );
-
- spin_lock(&lockref->lock);
- retval = 0;
- if (lockref->count > 1) {
- lockref->count--;
- retval = 1;
- }
- spin_unlock(&lockref->lock);
- return retval;
-}
-EXPORT_SYMBOL(lockref_put_not_zero);
-
-/**
* lockref_put_return - Decrement reference count if possible
* @lockref: pointer to lockref structure
*
* Decrement the reference count and return the new value.
- * If the lockref was dead or locked, return an error.
+ * If the lockref was dead or locked, return -1.
*/
int lockref_put_return(struct lockref *lockref)
{
@@ -134,22 +105,22 @@ EXPORT_SYMBOL(lockref_put_return);
* @lockref: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
*/
-int lockref_put_or_lock(struct lockref *lockref)
+bool lockref_put_or_lock(struct lockref *lockref)
{
CMPXCHG_LOOP(
new.count--;
if (old.count <= 1)
break;
,
- return 1;
+ return true;
);
spin_lock(&lockref->lock);
if (lockref->count <= 1)
- return 0;
+ return false;
lockref->count--;
spin_unlock(&lockref->lock);
- return 1;
+ return true;
}
EXPORT_SYMBOL(lockref_put_or_lock);
@@ -169,23 +140,22 @@ EXPORT_SYMBOL(lockref_mark_dead);
* @lockref: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if lockref was dead
*/
-int lockref_get_not_dead(struct lockref *lockref)
+bool lockref_get_not_dead(struct lockref *lockref)
{
- int retval;
+ bool retval = false;
CMPXCHG_LOOP(
new.count++;
if (old.count < 0)
- return 0;
+ return false;
,
- return 1;
+ return true;
);
spin_lock(&lockref->lock);
- retval = 0;
if (lockref->count >= 0) {
lockref->count++;
- retval = 1;
+ retval = true;
}
spin_unlock(&lockref->lock);
return retval;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 9d3dac38a3f4..56fe96319292 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -160,8 +160,7 @@ long long simple_strtoll(const char *cp, char **endp, unsigned int base)
}
EXPORT_SYMBOL(simple_strtoll);
-static noinline_for_stack
-int skip_atoi(const char **s)
+static inline int skip_atoi(const char **s)
{
int i = 0;
@@ -407,7 +406,7 @@ int num_to_str(char *buf, int size, unsigned long long num, unsigned int width)
return len + width;
}
-#define SIGN 1 /* unsigned/signed, must be 1 */
+#define SIGN 1 /* unsigned/signed */
#define LEFT 2 /* left justified */
#define PLUS 4 /* show plus */
#define SPACE 8 /* space if plus */
@@ -415,38 +414,26 @@ int num_to_str(char *buf, int size, unsigned long long num, unsigned int width)
#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */
#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */
-static_assert(SIGN == 1);
static_assert(ZEROPAD == ('0' - ' '));
static_assert(SMALL == ('a' ^ 'A'));
-enum format_type {
- FORMAT_TYPE_NONE, /* Just a string part */
- FORMAT_TYPE_WIDTH,
- FORMAT_TYPE_PRECISION,
- FORMAT_TYPE_CHAR,
- FORMAT_TYPE_STR,
- FORMAT_TYPE_PTR,
- FORMAT_TYPE_PERCENT_CHAR,
- FORMAT_TYPE_INVALID,
- FORMAT_TYPE_LONG_LONG,
- FORMAT_TYPE_ULONG,
- FORMAT_TYPE_LONG,
- FORMAT_TYPE_UBYTE,
- FORMAT_TYPE_BYTE,
- FORMAT_TYPE_USHORT,
- FORMAT_TYPE_SHORT,
- FORMAT_TYPE_UINT,
- FORMAT_TYPE_INT,
- FORMAT_TYPE_SIZE_T,
- FORMAT_TYPE_PTRDIFF
+enum format_state {
+ FORMAT_STATE_NONE, /* Just a string part */
+ FORMAT_STATE_NUM,
+ FORMAT_STATE_WIDTH,
+ FORMAT_STATE_PRECISION,
+ FORMAT_STATE_CHAR,
+ FORMAT_STATE_STR,
+ FORMAT_STATE_PTR,
+ FORMAT_STATE_PERCENT_CHAR,
+ FORMAT_STATE_INVALID,
};
struct printf_spec {
- unsigned int type:8; /* format_type enum */
- signed int field_width:24; /* width of output field */
- unsigned int flags:8; /* flags to number() */
- unsigned int base:8; /* number base, 8, 10 or 16 only */
- signed int precision:16; /* # of digits/chars */
+ unsigned char flags; /* flags to number() */
+ unsigned char base; /* number base, 8, 10 or 16 only */
+ short precision; /* # of digits/chars */
+ int field_width; /* width of output field */
} __packed;
static_assert(sizeof(struct printf_spec) == 8);
@@ -579,7 +566,6 @@ char *special_hex_number(char *buf, char *end, unsigned long long num, int size)
{
struct printf_spec spec;
- spec.type = FORMAT_TYPE_PTR;
spec.field_width = 2 + 2 * size; /* 0x + hex */
spec.flags = SPECIAL | SMALL | ZEROPAD;
spec.base = 16;
@@ -2530,6 +2516,26 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
}
}
+struct fmt {
+ const char *str;
+ unsigned char state; // enum format_state
+ unsigned char size; // size of numbers
+};
+
+#define SPEC_CHAR(x, flag) [(x)-32] = flag
+static unsigned char spec_flag(unsigned char c)
+{
+ static const unsigned char spec_flag_array[] = {
+ SPEC_CHAR(' ', SPACE),
+ SPEC_CHAR('#', SPECIAL),
+ SPEC_CHAR('+', PLUS),
+ SPEC_CHAR('-', LEFT),
+ SPEC_CHAR('0', ZEROPAD),
+ };
+ c -= 32;
+ return (c < sizeof(spec_flag_array)) ? spec_flag_array[c] : 0;
+}
+
/*
* Helper function to decode printf style format.
* Each call decode a token from the format and return the
@@ -2552,181 +2558,141 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
* @qualifier: qualifier of a number (long, size_t, ...)
*/
static noinline_for_stack
-int format_decode(const char *fmt, struct printf_spec *spec)
+struct fmt format_decode(struct fmt fmt, struct printf_spec *spec)
{
- const char *start = fmt;
- char qualifier;
+ const char *start = fmt.str;
+ char flag;
/* we finished early by reading the field width */
- if (spec->type == FORMAT_TYPE_WIDTH) {
+ if (unlikely(fmt.state == FORMAT_STATE_WIDTH)) {
if (spec->field_width < 0) {
spec->field_width = -spec->field_width;
spec->flags |= LEFT;
}
- spec->type = FORMAT_TYPE_NONE;
+ fmt.state = FORMAT_STATE_NONE;
goto precision;
}
/* we finished early by reading the precision */
- if (spec->type == FORMAT_TYPE_PRECISION) {
+ if (unlikely(fmt.state == FORMAT_STATE_PRECISION)) {
if (spec->precision < 0)
spec->precision = 0;
- spec->type = FORMAT_TYPE_NONE;
+ fmt.state = FORMAT_STATE_NONE;
goto qualifier;
}
/* By default */
- spec->type = FORMAT_TYPE_NONE;
+ fmt.state = FORMAT_STATE_NONE;
- for (; *fmt ; ++fmt) {
- if (*fmt == '%')
+ for (; *fmt.str ; fmt.str++) {
+ if (*fmt.str == '%')
break;
}
/* Return the current non-format string */
- if (fmt != start || !*fmt)
- return fmt - start;
+ if (fmt.str != start || !*fmt.str)
+ return fmt;
- /* Process flags */
+ /* Process flags. This also skips the first '%' */
spec->flags = 0;
-
- while (1) { /* this also skips first '%' */
- bool found = true;
-
- ++fmt;
-
- switch (*fmt) {
- case '-': spec->flags |= LEFT; break;
- case '+': spec->flags |= PLUS; break;
- case ' ': spec->flags |= SPACE; break;
- case '#': spec->flags |= SPECIAL; break;
- case '0': spec->flags |= ZEROPAD; break;
- default: found = false;
- }
-
- if (!found)
- break;
- }
+ do {
+ /* this also skips first '%' */
+ flag = spec_flag(*++fmt.str);
+ spec->flags |= flag;
+ } while (flag);
/* get field width */
spec->field_width = -1;
- if (isdigit(*fmt))
- spec->field_width = skip_atoi(&fmt);
- else if (*fmt == '*') {
+ if (isdigit(*fmt.str))
+ spec->field_width = skip_atoi(&fmt.str);
+ else if (unlikely(*fmt.str == '*')) {
/* it's the next argument */
- spec->type = FORMAT_TYPE_WIDTH;
- return ++fmt - start;
+ fmt.state = FORMAT_STATE_WIDTH;
+ fmt.str++;
+ return fmt;
}
precision:
/* get the precision */
spec->precision = -1;
- if (*fmt == '.') {
- ++fmt;
- if (isdigit(*fmt)) {
- spec->precision = skip_atoi(&fmt);
+ if (unlikely(*fmt.str == '.')) {
+ fmt.str++;
+ if (isdigit(*fmt.str)) {
+ spec->precision = skip_atoi(&fmt.str);
if (spec->precision < 0)
spec->precision = 0;
- } else if (*fmt == '*') {
+ } else if (*fmt.str == '*') {
/* it's the next argument */
- spec->type = FORMAT_TYPE_PRECISION;
- return ++fmt - start;
+ fmt.state = FORMAT_STATE_PRECISION;
+ fmt.str++;
+ return fmt;
}
}
qualifier:
- /* get the conversion qualifier */
- qualifier = 0;
- if (*fmt == 'h' || _tolower(*fmt) == 'l' ||
- *fmt == 'z' || *fmt == 't') {
- qualifier = *fmt++;
- if (unlikely(qualifier == *fmt)) {
- if (qualifier == 'l') {
- qualifier = 'L';
- ++fmt;
- } else if (qualifier == 'h') {
- qualifier = 'H';
- ++fmt;
- }
- }
- }
-
- /* default base */
+ /* Set up default numeric format */
spec->base = 10;
- switch (*fmt) {
- case 'c':
- spec->type = FORMAT_TYPE_CHAR;
- return ++fmt - start;
-
- case 's':
- spec->type = FORMAT_TYPE_STR;
- return ++fmt - start;
-
- case 'p':
- spec->type = FORMAT_TYPE_PTR;
- return ++fmt - start;
-
- case '%':
- spec->type = FORMAT_TYPE_PERCENT_CHAR;
- return ++fmt - start;
-
- /* integer number formats - set up the flags and "break" */
- case 'o':
- spec->base = 8;
- break;
-
- case 'x':
- spec->flags |= SMALL;
- fallthrough;
-
- case 'X':
- spec->base = 16;
- break;
+ fmt.state = FORMAT_STATE_NUM;
+ fmt.size = sizeof(int);
+ static const struct format_state {
+ unsigned char state;
+ unsigned char size;
+ unsigned char flags_or_double_size;
+ unsigned char base;
+ } lookup_state[256] = {
+ // Length
+ ['l'] = { 0, sizeof(long), sizeof(long long) },
+ ['L'] = { 0, sizeof(long long) },
+ ['h'] = { 0, sizeof(short), sizeof(char) },
+ ['H'] = { 0, sizeof(char) }, // Questionable historical
+ ['z'] = { 0, sizeof(size_t) },
+ ['t'] = { 0, sizeof(ptrdiff_t) },
+
+ // Non-numeric formats
+ ['c'] = { FORMAT_STATE_CHAR },
+ ['s'] = { FORMAT_STATE_STR },
+ ['p'] = { FORMAT_STATE_PTR },
+ ['%'] = { FORMAT_STATE_PERCENT_CHAR },
+
+ // Numerics
+ ['o'] = { FORMAT_STATE_NUM, 0, 0, 8 },
+ ['x'] = { FORMAT_STATE_NUM, 0, SMALL, 16 },
+ ['X'] = { FORMAT_STATE_NUM, 0, 0, 16 },
+ ['d'] = { FORMAT_STATE_NUM, 0, SIGN, 10 },
+ ['i'] = { FORMAT_STATE_NUM, 0, SIGN, 10 },
+ ['u'] = { FORMAT_STATE_NUM, 0, 0, 10, },
- case 'd':
- case 'i':
- spec->flags |= SIGN;
- break;
- case 'u':
- break;
-
- case 'n':
/*
* Since %n poses a greater security risk than
* utility, treat it as any other invalid or
* unsupported format specifier.
*/
- fallthrough;
+ };
- default:
- WARN_ONCE(1, "Please remove unsupported %%%c in format string\n", *fmt);
- spec->type = FORMAT_TYPE_INVALID;
- return fmt - start;
- }
-
- if (qualifier == 'L')
- spec->type = FORMAT_TYPE_LONG_LONG;
- else if (qualifier == 'l') {
- BUILD_BUG_ON(FORMAT_TYPE_ULONG + SIGN != FORMAT_TYPE_LONG);
- spec->type = FORMAT_TYPE_ULONG + (spec->flags & SIGN);
- } else if (qualifier == 'z') {
- spec->type = FORMAT_TYPE_SIZE_T;
- } else if (qualifier == 't') {
- spec->type = FORMAT_TYPE_PTRDIFF;
- } else if (qualifier == 'H') {
- BUILD_BUG_ON(FORMAT_TYPE_UBYTE + SIGN != FORMAT_TYPE_BYTE);
- spec->type = FORMAT_TYPE_UBYTE + (spec->flags & SIGN);
- } else if (qualifier == 'h') {
- BUILD_BUG_ON(FORMAT_TYPE_USHORT + SIGN != FORMAT_TYPE_SHORT);
- spec->type = FORMAT_TYPE_USHORT + (spec->flags & SIGN);
- } else {
- BUILD_BUG_ON(FORMAT_TYPE_UINT + SIGN != FORMAT_TYPE_INT);
- spec->type = FORMAT_TYPE_UINT + (spec->flags & SIGN);
+ const struct format_state *p = lookup_state + (u8)*fmt.str;
+ if (p->size) {
+ fmt.size = p->size;
+ if (p->flags_or_double_size && fmt.str[0] == fmt.str[1]) {
+ fmt.size = p->flags_or_double_size;
+ fmt.str++;
+ }
+ fmt.str++;
+ p = lookup_state + *fmt.str;
+ }
+ if (p->state) {
+ if (p->base)
+ spec->base = p->base;
+ spec->flags |= p->flags_or_double_size;
+ fmt.state = p->state;
+ fmt.str++;
+ return fmt;
}
- return ++fmt - start;
+ WARN_ONCE(1, "Please remove unsupported %%%c in format string\n", *fmt.str);
+ fmt.state = FORMAT_STATE_INVALID;
+ return fmt;
}
static void
@@ -2747,11 +2713,27 @@ set_precision(struct printf_spec *spec, int prec)
}
}
+/*
+ * Turn a 1/2/4-byte value into a 64-bit one for printing: truncate
+ * as necessary and deal with signedness.
+ *
+ * 'size' is the size of the value in bytes.
+ */
+static unsigned long long convert_num_spec(unsigned int val, int size, struct printf_spec spec)
+{
+ unsigned int shift = 32 - size*8;
+
+ val <<= shift;
+ if (!(spec.flags & SIGN))
+ return val >> shift;
+ return (int)val >> shift;
+}
+
/**
* vsnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
- * @fmt: The format string to use
+ * @fmt_str: The format string to use
* @args: Arguments for the format string
*
* This function generally follows C99 vsnprintf, but has some
@@ -2775,11 +2757,14 @@ set_precision(struct printf_spec *spec, int prec)
*
* If you're not already dealing with a va_list consider using snprintf().
*/
-int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+int vsnprintf(char *buf, size_t size, const char *fmt_str, va_list args)
{
- unsigned long long num;
char *str, *end;
struct printf_spec spec = {0};
+ struct fmt fmt = {
+ .str = fmt_str,
+ .state = FORMAT_STATE_NONE,
+ };
/* Reject out-of-range values early. Large positive sizes are
used for unknown buffer sizes. */
@@ -2795,33 +2780,43 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
size = end - buf;
}
- while (*fmt) {
- const char *old_fmt = fmt;
- int read = format_decode(fmt, &spec);
+ while (*fmt.str) {
+ const char *old_fmt = fmt.str;
- fmt += read;
+ fmt = format_decode(fmt, &spec);
- switch (spec.type) {
- case FORMAT_TYPE_NONE: {
- int copy = read;
+ switch (fmt.state) {
+ case FORMAT_STATE_NONE: {
+ int read = fmt.str - old_fmt;
if (str < end) {
+ int copy = read;
if (copy > end - str)
copy = end - str;
memcpy(str, old_fmt, copy);
}
str += read;
- break;
+ continue;
}
- case FORMAT_TYPE_WIDTH:
+ case FORMAT_STATE_NUM: {
+ unsigned long long num;
+ if (fmt.size <= sizeof(int))
+ num = convert_num_spec(va_arg(args, int), fmt.size, spec);
+ else
+ num = va_arg(args, long long);
+ str = number(str, end, num, spec);
+ continue;
+ }
+
+ case FORMAT_STATE_WIDTH:
set_field_width(&spec, va_arg(args, int));
- break;
+ continue;
- case FORMAT_TYPE_PRECISION:
+ case FORMAT_STATE_PRECISION:
set_precision(&spec, va_arg(args, int));
- break;
+ continue;
- case FORMAT_TYPE_CHAR: {
+ case FORMAT_STATE_CHAR: {
char c;
if (!(spec.flags & LEFT)) {
@@ -2841,27 +2836,27 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
*str = ' ';
++str;
}
- break;
+ continue;
}
- case FORMAT_TYPE_STR:
+ case FORMAT_STATE_STR:
str = string(str, end, va_arg(args, char *), spec);
- break;
+ continue;
- case FORMAT_TYPE_PTR:
- str = pointer(fmt, str, end, va_arg(args, void *),
+ case FORMAT_STATE_PTR:
+ str = pointer(fmt.str, str, end, va_arg(args, void *),
spec);
- while (isalnum(*fmt))
- fmt++;
- break;
+ while (isalnum(*fmt.str))
+ fmt.str++;
+ continue;
- case FORMAT_TYPE_PERCENT_CHAR:
+ case FORMAT_STATE_PERCENT_CHAR:
if (str < end)
*str = '%';
++str;
- break;
+ continue;
- case FORMAT_TYPE_INVALID:
+ default:
/*
* Presumably the arguments passed gcc's type
* checking, but there is no safe or sane way
@@ -2871,47 +2866,6 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
* sync.
*/
goto out;
-
- default:
- switch (spec.type) {
- case FORMAT_TYPE_LONG_LONG:
- num = va_arg(args, long long);
- break;
- case FORMAT_TYPE_ULONG:
- num = va_arg(args, unsigned long);
- break;
- case FORMAT_TYPE_LONG:
- num = va_arg(args, long);
- break;
- case FORMAT_TYPE_SIZE_T:
- if (spec.flags & SIGN)
- num = va_arg(args, ssize_t);
- else
- num = va_arg(args, size_t);
- break;
- case FORMAT_TYPE_PTRDIFF:
- num = va_arg(args, ptrdiff_t);
- break;
- case FORMAT_TYPE_UBYTE:
- num = (unsigned char) va_arg(args, int);
- break;
- case FORMAT_TYPE_BYTE:
- num = (signed char) va_arg(args, int);
- break;
- case FORMAT_TYPE_USHORT:
- num = (unsigned short) va_arg(args, int);
- break;
- case FORMAT_TYPE_SHORT:
- num = (short) va_arg(args, int);
- break;
- case FORMAT_TYPE_INT:
- num = (int) va_arg(args, int);
- break;
- default:
- num = va_arg(args, unsigned int);
- }
-
- str = number(str, end, num, spec);
}
}
@@ -3067,7 +3021,7 @@ EXPORT_SYMBOL(sprintf);
* vbin_printf - Parse a format string and place args' binary value in a buffer
* @bin_buf: The buffer to place args' binary value
* @size: The size of the buffer(by words(32bits), not characters)
- * @fmt: The format string to use
+ * @fmt_str: The format string to use
* @args: Arguments for the format string
*
* The format follows C99 vsnprintf, except %n is ignored, and its argument
@@ -3080,8 +3034,12 @@ EXPORT_SYMBOL(sprintf);
* If the return value is greater than @size, the resulting bin_buf is NOT
* valid for bstr_printf().
*/
-int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
+int vbin_printf(u32 *bin_buf, size_t size, const char *fmt_str, va_list args)
{
+ struct fmt fmt = {
+ .str = fmt_str,
+ .state = FORMAT_STATE_NONE,
+ };
struct printf_spec spec = {0};
char *str, *end;
int width;
@@ -3113,31 +3071,29 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
value; \
})
- while (*fmt) {
- int read = format_decode(fmt, &spec);
+ while (*fmt.str) {
+ fmt = format_decode(fmt, &spec);
- fmt += read;
-
- switch (spec.type) {
- case FORMAT_TYPE_NONE:
- case FORMAT_TYPE_PERCENT_CHAR:
+ switch (fmt.state) {
+ case FORMAT_STATE_NONE:
+ case FORMAT_STATE_PERCENT_CHAR:
break;
- case FORMAT_TYPE_INVALID:
+ case FORMAT_STATE_INVALID:
goto out;
- case FORMAT_TYPE_WIDTH:
- case FORMAT_TYPE_PRECISION:
+ case FORMAT_STATE_WIDTH:
+ case FORMAT_STATE_PRECISION:
width = (int)save_arg(int);
/* Pointers may require the width */
- if (*fmt == 'p')
+ if (*fmt.str == 'p')
set_field_width(&spec, width);
break;
- case FORMAT_TYPE_CHAR:
+ case FORMAT_STATE_CHAR:
save_arg(char);
break;
- case FORMAT_TYPE_STR: {
+ case FORMAT_STATE_STR: {
const char *save_str = va_arg(args, char *);
const char *err_msg;
size_t len;
@@ -3153,9 +3109,9 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
break;
}
- case FORMAT_TYPE_PTR:
+ case FORMAT_STATE_PTR:
/* Dereferenced pointers must be done now */
- switch (*fmt) {
+ switch (*fmt.str) {
/* Dereference of functions is still OK */
case 'S':
case 's':
@@ -3165,11 +3121,11 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
save_arg(void *);
break;
default:
- if (!isalnum(*fmt)) {
+ if (!isalnum(*fmt.str)) {
save_arg(void *);
break;
}
- str = pointer(fmt, str, end, va_arg(args, void *),
+ str = pointer(fmt.str, str, end, va_arg(args, void *),
spec);
if (str + 1 < end)
*str++ = '\0';
@@ -3177,35 +3133,14 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
end[-1] = '\0'; /* Must be nul terminated */
}
/* skip all alphanumeric pointer suffixes */
- while (isalnum(*fmt))
- fmt++;
+ while (isalnum(*fmt.str))
+ fmt.str++;
break;
- default:
- switch (spec.type) {
-
- case FORMAT_TYPE_LONG_LONG:
+ case FORMAT_STATE_NUM:
+ if (fmt.size > sizeof(int)) {
save_arg(long long);
- break;
- case FORMAT_TYPE_ULONG:
- case FORMAT_TYPE_LONG:
- save_arg(unsigned long);
- break;
- case FORMAT_TYPE_SIZE_T:
- save_arg(size_t);
- break;
- case FORMAT_TYPE_PTRDIFF:
- save_arg(ptrdiff_t);
- break;
- case FORMAT_TYPE_UBYTE:
- case FORMAT_TYPE_BYTE:
- save_arg(char);
- break;
- case FORMAT_TYPE_USHORT:
- case FORMAT_TYPE_SHORT:
- save_arg(short);
- break;
- default:
+ } else {
save_arg(int);
}
}
@@ -3221,7 +3156,7 @@ EXPORT_SYMBOL_GPL(vbin_printf);
* bstr_printf - Format a string from binary arguments and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
- * @fmt: The format string to use
+ * @fmt_str: The format string to use
* @bin_buf: Binary arguments for the format string
*
* This function like C99 vsnprintf, but the difference is that vsnprintf gets
@@ -3239,8 +3174,12 @@ EXPORT_SYMBOL_GPL(vbin_printf);
* return is greater than or equal to @size, the resulting
* string is truncated.
*/
-int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+int bstr_printf(char *buf, size_t size, const char *fmt_str, const u32 *bin_buf)
{
+ struct fmt fmt = {
+ .str = fmt_str,
+ .state = FORMAT_STATE_NONE,
+ };
struct printf_spec spec = {0};
char *str, *end;
const char *args = (const char *)bin_buf;
@@ -3272,33 +3211,33 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
size = end - buf;
}
- while (*fmt) {
- const char *old_fmt = fmt;
- int read = format_decode(fmt, &spec);
-
- fmt += read;
+ while (*fmt.str) {
+ const char *old_fmt = fmt.str;
+ unsigned long long num;
- switch (spec.type) {
- case FORMAT_TYPE_NONE: {
- int copy = read;
+ fmt = format_decode(fmt, &spec);
+ switch (fmt.state) {
+ case FORMAT_STATE_NONE: {
+ int read = fmt.str - old_fmt;
if (str < end) {
+ int copy = read;
if (copy > end - str)
copy = end - str;
memcpy(str, old_fmt, copy);
}
str += read;
- break;
+ continue;
}
- case FORMAT_TYPE_WIDTH:
+ case FORMAT_STATE_WIDTH:
set_field_width(&spec, get_arg(int));
- break;
+ continue;
- case FORMAT_TYPE_PRECISION:
+ case FORMAT_STATE_PRECISION:
set_precision(&spec, get_arg(int));
- break;
+ continue;
- case FORMAT_TYPE_CHAR: {
+ case FORMAT_STATE_CHAR: {
char c;
if (!(spec.flags & LEFT)) {
@@ -3317,21 +3256,21 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
*str = ' ';
++str;
}
- break;
+ continue;
}
- case FORMAT_TYPE_STR: {
+ case FORMAT_STATE_STR: {
const char *str_arg = args;
args += strlen(str_arg) + 1;
str = string(str, end, (char *)str_arg, spec);
- break;
+ continue;
}
- case FORMAT_TYPE_PTR: {
+ case FORMAT_STATE_PTR: {
bool process = false;
int copy, len;
/* Non function dereferences were already done */
- switch (*fmt) {
+ switch (*fmt.str) {
case 'S':
case 's':
case 'x':
@@ -3340,7 +3279,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
process = true;
break;
default:
- if (!isalnum(*fmt)) {
+ if (!isalnum(*fmt.str)) {
process = true;
break;
}
@@ -3355,63 +3294,32 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
}
}
if (process)
- str = pointer(fmt, str, end, get_arg(void *), spec);
+ str = pointer(fmt.str, str, end, get_arg(void *), spec);
- while (isalnum(*fmt))
- fmt++;
- break;
+ while (isalnum(*fmt.str))
+ fmt.str++;
+ continue;
}
- case FORMAT_TYPE_PERCENT_CHAR:
+ case FORMAT_STATE_PERCENT_CHAR:
if (str < end)
*str = '%';
++str;
- break;
+ continue;
- case FORMAT_TYPE_INVALID:
+ case FORMAT_STATE_INVALID:
goto out;
- default: {
- unsigned long long num;
-
- switch (spec.type) {
-
- case FORMAT_TYPE_LONG_LONG:
+ case FORMAT_STATE_NUM:
+ if (fmt.size > sizeof(int)) {
num = get_arg(long long);
- break;
- case FORMAT_TYPE_ULONG:
- case FORMAT_TYPE_LONG:
- num = get_arg(unsigned long);
- break;
- case FORMAT_TYPE_SIZE_T:
- num = get_arg(size_t);
- break;
- case FORMAT_TYPE_PTRDIFF:
- num = get_arg(ptrdiff_t);
- break;
- case FORMAT_TYPE_UBYTE:
- num = get_arg(unsigned char);
- break;
- case FORMAT_TYPE_BYTE:
- num = get_arg(signed char);
- break;
- case FORMAT_TYPE_USHORT:
- num = get_arg(unsigned short);
- break;
- case FORMAT_TYPE_SHORT:
- num = get_arg(short);
- break;
- case FORMAT_TYPE_UINT:
- num = get_arg(unsigned int);
- break;
- default:
- num = get_arg(int);
+ } else {
+ num = convert_num_spec(get_arg(int), fmt.size, spec);
}
-
str = number(str, end, num, spec);
- } /* default: */
- } /* switch(spec.type) */
- } /* while(*fmt) */
+ continue;
+ }
+ } /* while(*fmt.str) */
out:
if (size > 0) {
diff --git a/mm/filemap.c b/mm/filemap.c
index 33b60d448fca..4f476411a9a2 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1523,7 +1523,7 @@ void folio_end_read(struct folio *folio, bool success)
/* Must be in bottom byte for x86 to work */
BUILD_BUG_ON(PG_uptodate > 7);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
- VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio);
+ VM_BUG_ON_FOLIO(success && folio_test_uptodate(folio), folio);
if (likely(success))
mask |= 1 << PG_uptodate;
@@ -2996,7 +2996,7 @@ static inline loff_t folio_seek_hole_data(struct xa_state *xas,
if (ops->is_partially_uptodate(folio, offset, bsz) ==
seek_data)
break;
- start = (start + bsz) & ~(bsz - 1);
+ start = (start + bsz) & ~((u64)bsz - 1);
offset += bsz;
} while (offset < folio_size(folio));
unlock:
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e53d83b3e5cf..db64116a4f84 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2206,6 +2206,16 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd)
return pmd;
}
+static pmd_t clear_uffd_wp_pmd(pmd_t pmd)
+{
+ if (pmd_present(pmd))
+ pmd = pmd_clear_uffd_wp(pmd);
+ else if (is_swap_pmd(pmd))
+ pmd = pmd_swp_clear_uffd_wp(pmd);
+
+ return pmd;
+}
+
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
{
@@ -2244,6 +2254,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
}
pmd = move_soft_dirty_pmd(pmd);
+ if (vma_has_uffd_without_event_remap(vma))
+ pmd = clear_uffd_wp_pmd(pmd);
set_pmd_at(mm, new_addr, new_pmd, pmd);
if (force_flush)
flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c498874a7170..eaaec19caa7c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5402,6 +5402,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
unsigned long sz)
{
+ bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
struct hstate *h = hstate_vma(vma);
struct mm_struct *mm = vma->vm_mm;
spinlock_t *src_ptl, *dst_ptl;
@@ -5418,7 +5419,18 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
- set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
+
+ if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
+ huge_pte_clear(mm, new_addr, dst_pte, sz);
+ else {
+ if (need_clear_uffd_wp) {
+ if (pte_present(pte))
+ pte = huge_pte_clear_uffd_wp(pte);
+ else if (is_swap_pte(pte))
+ pte = pte_swp_clear_uffd_wp(pte);
+ }
+ set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
+ }
if (src_ptl != dst_ptl)
spin_unlock(src_ptl);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 653dbb1ff05c..bad1e130eda8 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2422,7 +2422,7 @@ skip:
VM_BUG_ON(khugepaged_scan.address < hstart ||
khugepaged_scan.address + HPAGE_PMD_SIZE >
hend);
- if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
+ if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
struct file *file = get_file(vma->vm_file);
pgoff_t pgoff = linear_page_index(vma,
khugepaged_scan.address);
@@ -2768,7 +2768,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
mmap_assert_locked(mm);
memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask);
- if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
+ if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
struct file *file = get_file(vma->vm_file);
pgoff_t pgoff = linear_page_index(vma, addr);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 737af23f4f4e..820ba3b5cbfc 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1093,7 +1093,7 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
- create_object_percpu((__force unsigned long)ptr, size, 0, gfp);
+ create_object_percpu((__force unsigned long)ptr, size, 1, gfp);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 04f35659717a..162407fbf2bc 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2268,7 +2268,8 @@ struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
page = __alloc_pages_noprof(gfp, order, nid, nodemask);
- if (unlikely(pol->mode == MPOL_INTERLEAVE) && page) {
+ if (unlikely(pol->mode == MPOL_INTERLEAVE ||
+ pol->mode == MPOL_WEIGHTED_INTERLEAVE) && page) {
/* skip NUMA_INTERLEAVE_HIT update if numa stats is disabled */
if (static_branch_likely(&vm_numa_stat_key) &&
page_to_nid(page) == nid) {
diff --git a/mm/mremap.c b/mm/mremap.c
index 60473413836b..cff7f552f909 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -138,6 +138,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
struct vm_area_struct *new_vma, pmd_t *new_pmd,
unsigned long new_addr, bool need_rmap_locks)
{
+ bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
struct mm_struct *mm = vma->vm_mm;
pte_t *old_pte, *new_pte, pte;
pmd_t dummy_pmdval;
@@ -216,7 +217,18 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
force_flush = true;
pte = move_pte(pte, old_addr, new_addr);
pte = move_soft_dirty_pte(pte);
- set_pte_at(mm, new_addr, new_pte, pte);
+
+ if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
+ pte_clear(mm, new_addr, new_pte);
+ else {
+ if (need_clear_uffd_wp) {
+ if (pte_present(pte))
+ pte = pte_clear_uffd_wp(pte);
+ else if (is_swap_pte(pte))
+ pte = pte_swp_clear_uffd_wp(pte);
+ }
+ set_pte_at(mm, new_addr, new_pte, pte);
+ }
}
arch_leave_lazy_mmu_mode();
@@ -278,6 +290,15 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
return false;
+ /* If this pmd belongs to a uffd vma with remap events disabled, we need
+ * to ensure that the uffd-wp state is cleared from all pgtables. This
+ * means recursing into lower page tables in move_page_tables(), and we
+ * can reuse the existing code if we simply treat the entry as "not
+ * moved".
+ */
+ if (vma_has_uffd_without_event_remap(vma))
+ return false;
+
/*
* We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_lock prevents deadlock.
@@ -333,6 +354,15 @@ static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
if (WARN_ON_ONCE(!pud_none(*new_pud)))
return false;
+ /* If this pud belongs to a uffd vma with remap events disabled, we need
+ * to ensure that the uffd-wp state is cleared from all pgtables. This
+ * means recursing into lower page tables in move_page_tables(), and we
+ * can reuse the existing code if we simply treat the entry as "not
+ * moved".
+ */
+ if (vma_has_uffd_without_event_remap(vma))
+ return false;
+
/*
* We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_lock prevents deadlock.
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d213ead95675..d9861e42b2bd 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -692,6 +692,8 @@ static unsigned long bdi_ratio_from_pages(unsigned long pages)
unsigned long ratio;
global_dirty_limits(&background_thresh, &dirty_thresh);
+ if (!dirty_thresh)
+ return -EINVAL;
ratio = div64_u64(pages * 100ULL * BDI_RATIO_SCALE, dirty_thresh);
return ratio;
@@ -790,13 +792,15 @@ int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes)
{
int ret;
unsigned long pages = min_bytes >> PAGE_SHIFT;
- unsigned long min_ratio;
+ long min_ratio;
ret = bdi_check_pages_limit(pages);
if (ret)
return ret;
min_ratio = bdi_ratio_from_pages(pages);
+ if (min_ratio < 0)
+ return min_ratio;
return __bdi_set_min_ratio(bdi, min_ratio);
}
@@ -809,13 +813,15 @@ int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes)
{
int ret;
unsigned long pages = max_bytes >> PAGE_SHIFT;
- unsigned long max_ratio;
+ long max_ratio;
ret = bdi_check_pages_limit(pages);
if (ret)
return ret;
max_ratio = bdi_ratio_from_pages(pages);
+ if (max_ratio < 0)
+ return max_ratio;
return __bdi_set_max_ratio(bdi, max_ratio);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cae7b93864c2..01eab25edf89 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5692,10 +5692,13 @@ __meminit void zone_pcp_init(struct zone *zone)
zone->present_pages, zone_batchsize(zone));
}
+static void setup_per_zone_lowmem_reserve(void);
+
void adjust_managed_page_count(struct page *page, long count)
{
atomic_long_add(count, &page_zone(page)->managed_pages);
totalram_pages_add(count);
+ setup_per_zone_lowmem_reserve();
}
EXPORT_SYMBOL(adjust_managed_page_count);
diff --git a/mm/shmem.c b/mm/shmem.c
index ac58d4fb2e6f..532afd8e049c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3821,7 +3821,7 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry)
static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
{
- if (!simple_offset_empty(dentry))
+ if (!simple_empty(dentry))
return -ENOTEMPTY;
drop_nlink(d_inode(dentry));
@@ -3878,7 +3878,7 @@ static int shmem_rename2(struct mnt_idmap *idmap,
return simple_offset_rename_exchange(old_dir, old_dentry,
new_dir, new_dentry);
- if (!simple_offset_empty(new_dentry))
+ if (!simple_empty(new_dentry))
return -ENOTEMPTY;
if (flags & RENAME_WHITEOUT) {
@@ -3917,6 +3917,7 @@ static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
int len;
struct inode *inode;
struct folio *folio;
+ char *link;
len = strlen(symname) + 1;
if (len > PAGE_SIZE)
@@ -3938,12 +3939,13 @@ static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
inode->i_size = len-1;
if (len <= SHORT_SYMLINK_LEN) {
- inode->i_link = kmemdup(symname, len, GFP_KERNEL);
- if (!inode->i_link) {
+ link = kmemdup(symname, len, GFP_KERNEL);
+ if (!link) {
error = -ENOMEM;
goto out_remove_offset;
}
inode->i_op = &shmem_short_symlink_operations;
+ inode_set_cached_link(inode, link, len - 1);
} else {
inode_nohighmem(inode);
inode->i_mapping->a_ops = &shmem_aops;
@@ -4368,7 +4370,7 @@ static int shmem_parse_opt_casefold(struct fs_context *fc, struct fs_parameter *
bool latest_version)
{
struct shmem_options *ctx = fc->fs_private;
- unsigned int version = UTF8_LATEST;
+ int version = UTF8_LATEST;
struct unicode_map *encoding;
char *version_str = param->string + 5;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9a859b7d18d7..b1ec5ece067e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4642,6 +4642,9 @@ retry:
reset_batch_size(walk);
}
+ __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(),
+ stat.nr_demoted);
+
item = PGSTEAL_KSWAPD + reclaimer_offset();
if (!cgroup_reclaim(sc))
__count_vm_events(item, reclaimed);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 4d016314a56c..16bfe1c694dd 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -2122,10 +2122,20 @@ static void __init start_shepherd_timer(void)
{
int cpu;
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
vmstat_update);
+ /*
+ * For secondary CPUs during CPU hotplug scenarios,
+ * vmstat_cpu_online() will enable the work.
+ * mm/vmstat:online enables and disables vmstat_work
+ * symmetrically during CPU hotplug events.
+ */
+ if (!cpu_online(cpu))
+ disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
+ }
+
schedule_delayed_work(&shepherd,
round_jiffies_relative(sysctl_stat_interval));
}
@@ -2148,13 +2158,14 @@ static int vmstat_cpu_online(unsigned int cpu)
if (!node_state(cpu_to_node(cpu), N_CPU)) {
node_set_state(cpu_to_node(cpu), N_CPU);
}
+ enable_delayed_work(&per_cpu(vmstat_work, cpu));
return 0;
}
static int vmstat_cpu_down_prep(unsigned int cpu)
{
- cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
+ disable_delayed_work_sync(&per_cpu(vmstat_work, cpu));
return 0;
}
diff --git a/mm/zswap.c b/mm/zswap.c
index 5a27af8d86ea..b84c20d889b1 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -251,7 +251,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
struct zswap_pool *pool;
char name[38]; /* 'zswap' + 32 char (max) num + \0 */
gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
- int ret;
+ int ret, cpu;
if (!zswap_has_pool) {
/* if either are unset, pool initialization failed, and we
@@ -285,6 +285,9 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
goto error;
}
+ for_each_possible_cpu(cpu)
+ mutex_init(&per_cpu_ptr(pool->acomp_ctx, cpu)->mutex);
+
ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
&pool->node);
if (ret)
@@ -817,36 +820,41 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
{
struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
- struct crypto_acomp *acomp;
- struct acomp_req *req;
+ struct crypto_acomp *acomp = NULL;
+ struct acomp_req *req = NULL;
+ u8 *buffer = NULL;
int ret;
- mutex_init(&acomp_ctx->mutex);
-
- acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
- if (!acomp_ctx->buffer)
- return -ENOMEM;
+ buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto fail;
+ }
acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
if (IS_ERR(acomp)) {
pr_err("could not alloc crypto acomp %s : %ld\n",
pool->tfm_name, PTR_ERR(acomp));
ret = PTR_ERR(acomp);
- goto acomp_fail;
+ goto fail;
}
- acomp_ctx->acomp = acomp;
- acomp_ctx->is_sleepable = acomp_is_async(acomp);
- req = acomp_request_alloc(acomp_ctx->acomp);
+ req = acomp_request_alloc(acomp);
if (!req) {
pr_err("could not alloc crypto acomp_request %s\n",
pool->tfm_name);
ret = -ENOMEM;
- goto req_fail;
+ goto fail;
}
- acomp_ctx->req = req;
+ /*
+ * Only hold the mutex after completing allocations, otherwise we may
+ * recurse into zswap through reclaim and attempt to hold the mutex
+ * again resulting in a deadlock.
+ */
+ mutex_lock(&acomp_ctx->mutex);
crypto_init_wait(&acomp_ctx->wait);
+
/*
* if the backend of acomp is async zip, crypto_req_done() will wakeup
* crypto_wait_req(); if the backend of acomp is scomp, the callback
@@ -855,12 +863,17 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &acomp_ctx->wait);
+ acomp_ctx->buffer = buffer;
+ acomp_ctx->acomp = acomp;
+ acomp_ctx->is_sleepable = acomp_is_async(acomp);
+ acomp_ctx->req = req;
+ mutex_unlock(&acomp_ctx->mutex);
return 0;
-req_fail:
- crypto_free_acomp(acomp_ctx->acomp);
-acomp_fail:
- kfree(acomp_ctx->buffer);
+fail:
+ if (acomp)
+ crypto_free_acomp(acomp);
+ kfree(buffer);
return ret;
}
@@ -869,27 +882,43 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
+ mutex_lock(&acomp_ctx->mutex);
if (!IS_ERR_OR_NULL(acomp_ctx)) {
if (!IS_ERR_OR_NULL(acomp_ctx->req))
acomp_request_free(acomp_ctx->req);
+ acomp_ctx->req = NULL;
if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
crypto_free_acomp(acomp_ctx->acomp);
kfree(acomp_ctx->buffer);
}
+ mutex_unlock(&acomp_ctx->mutex);
return 0;
}
-/* Prevent CPU hotplug from freeing up the per-CPU acomp_ctx resources */
-static struct crypto_acomp_ctx *acomp_ctx_get_cpu(struct crypto_acomp_ctx __percpu *acomp_ctx)
+static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct zswap_pool *pool)
{
- cpus_read_lock();
- return raw_cpu_ptr(acomp_ctx);
+ struct crypto_acomp_ctx *acomp_ctx;
+
+ for (;;) {
+ acomp_ctx = raw_cpu_ptr(pool->acomp_ctx);
+ mutex_lock(&acomp_ctx->mutex);
+ if (likely(acomp_ctx->req))
+ return acomp_ctx;
+ /*
+ * It is possible that we were migrated to a different CPU after
+ * getting the per-CPU ctx but before the mutex was acquired. If
+ * the old CPU got offlined, zswap_cpu_comp_dead() could have
+ * already freed ctx->req (among other things) and set it to
+ * NULL. Just try again on the new CPU that we ended up on.
+ */
+ mutex_unlock(&acomp_ctx->mutex);
+ }
}
-static void acomp_ctx_put_cpu(void)
+static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *acomp_ctx)
{
- cpus_read_unlock();
+ mutex_unlock(&acomp_ctx->mutex);
}
static bool zswap_compress(struct page *page, struct zswap_entry *entry,
@@ -905,9 +934,7 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
gfp_t gfp;
u8 *dst;
- acomp_ctx = acomp_ctx_get_cpu(pool->acomp_ctx);
- mutex_lock(&acomp_ctx->mutex);
-
+ acomp_ctx = acomp_ctx_get_cpu_lock(pool);
dst = acomp_ctx->buffer;
sg_init_table(&input, 1);
sg_set_page(&input, page, PAGE_SIZE, 0);
@@ -960,8 +987,7 @@ unlock:
else if (alloc_ret)
zswap_reject_alloc_fail++;
- mutex_unlock(&acomp_ctx->mutex);
- acomp_ctx_put_cpu();
+ acomp_ctx_put_unlock(acomp_ctx);
return comp_ret == 0 && alloc_ret == 0;
}
@@ -972,9 +998,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
struct crypto_acomp_ctx *acomp_ctx;
u8 *src;
- acomp_ctx = acomp_ctx_get_cpu(entry->pool->acomp_ctx);
- mutex_lock(&acomp_ctx->mutex);
-
+ acomp_ctx = acomp_ctx_get_cpu_lock(entry->pool);
src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
/*
* If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer
@@ -998,11 +1022,10 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio)
acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
- mutex_unlock(&acomp_ctx->mutex);
if (src != acomp_ctx->buffer)
zpool_unmap_handle(zpool, entry->handle);
- acomp_ctx_put_cpu();
+ acomp_ctx_put_unlock(acomp_ctx);
}
/*********************************
diff --git a/net/core/filter.c b/net/core/filter.c
index 834614071727..2fb45a86f3dd 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -11251,6 +11251,7 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
struct sock_reuseport *reuse;
struct sock *selected_sk;
+ int err;
selected_sk = map->ops->map_lookup_elem(map, key);
if (!selected_sk)
@@ -11258,10 +11259,6 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
if (!reuse) {
- /* Lookup in sock_map can return TCP ESTABLISHED sockets. */
- if (sk_is_refcounted(selected_sk))
- sock_put(selected_sk);
-
/* reuseport_array has only sk with non NULL sk_reuseport_cb.
* The only (!reuse) case here is - the sk has already been
* unhashed (e.g. by close()), so treat it as -ENOENT.
@@ -11269,24 +11266,33 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
* Other maps (e.g. sock_map) do not provide this guarantee and
* the sk may never be in the reuseport group to begin with.
*/
- return is_sockarray ? -ENOENT : -EINVAL;
+ err = is_sockarray ? -ENOENT : -EINVAL;
+ goto error;
}
if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
struct sock *sk = reuse_kern->sk;
- if (sk->sk_protocol != selected_sk->sk_protocol)
- return -EPROTOTYPE;
- else if (sk->sk_family != selected_sk->sk_family)
- return -EAFNOSUPPORT;
-
- /* Catch all. Likely bound to a different sockaddr. */
- return -EBADFD;
+ if (sk->sk_protocol != selected_sk->sk_protocol) {
+ err = -EPROTOTYPE;
+ } else if (sk->sk_family != selected_sk->sk_family) {
+ err = -EAFNOSUPPORT;
+ } else {
+ /* Catch all. Likely bound to a different sockaddr. */
+ err = -EBADFD;
+ }
+ goto error;
}
reuse_kern->selected_sk = selected_sk;
return 0;
+error:
+ /* Lookup in sock_map can return TCP ESTABLISHED sockets. */
+ if (sk_is_refcounted(selected_sk))
+ sock_put(selected_sk);
+
+ return err;
}
static const struct bpf_func_proto sk_select_reuseport_proto = {
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
index a89cbd8d87c3..996ac6a449eb 100644
--- a/net/core/netdev-genl-gen.c
+++ b/net/core/netdev-genl-gen.c
@@ -197,6 +197,16 @@ static const struct genl_multicast_group netdev_nl_mcgrps[] = {
[NETDEV_NLGRP_PAGE_POOL] = { "page-pool", },
};
+static void __netdev_nl_sock_priv_init(void *priv)
+{
+ netdev_nl_sock_priv_init(priv);
+}
+
+static void __netdev_nl_sock_priv_destroy(void *priv)
+{
+ netdev_nl_sock_priv_destroy(priv);
+}
+
struct genl_family netdev_nl_family __ro_after_init = {
.name = NETDEV_FAMILY_NAME,
.version = NETDEV_FAMILY_VERSION,
@@ -208,6 +218,6 @@ struct genl_family netdev_nl_family __ro_after_init = {
.mcgrps = netdev_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(netdev_nl_mcgrps),
.sock_priv_size = sizeof(struct list_head),
- .sock_priv_init = (void *)netdev_nl_sock_priv_init,
- .sock_priv_destroy = (void *)netdev_nl_sock_priv_destroy,
+ .sock_priv_init = __netdev_nl_sock_priv_init,
+ .sock_priv_destroy = __netdev_nl_sock_priv_destroy,
};
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 2e459b9d88eb..96a6ed37d4cc 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -627,6 +627,8 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
const struct net_device_ops *ops;
int err;
+ skb_queue_head_init(&np->skb_pool);
+
if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
np_err(np, "%s doesn't support polling, aborting\n",
ndev->name);
@@ -662,6 +664,9 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
strscpy(np->dev_name, ndev->name, IFNAMSIZ);
npinfo->netpoll = np;
+ /* fill up the skb queue */
+ refill_skbs(np);
+
/* last thing to do is link it to the net device structure */
rcu_assign_pointer(ndev->npinfo, npinfo);
@@ -681,8 +686,6 @@ int netpoll_setup(struct netpoll *np)
struct in_device *in_dev;
int err;
- skb_queue_head_init(&np->skb_pool);
-
rtnl_lock();
if (np->dev_name[0]) {
struct net *net = current->nsproxy->net_ns;
@@ -782,9 +785,6 @@ put_noaddr:
}
}
- /* fill up the skb queue */
- refill_skbs(np);
-
err = __netpoll_setup(np, ndev);
if (err)
goto flush;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 7e23cacbe66e..4cb547fae91f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -851,6 +851,9 @@ static ssize_t get_imix_entries(const char __user *buffer,
unsigned long weight;
unsigned long size;
+ if (pkt_dev->n_imix_entries >= MAX_IMIX_ENTRIES)
+ return -E2BIG;
+
len = num_arg(&buffer[i], max_digits, &size);
if (len < 0)
return len;
@@ -880,9 +883,6 @@ static ssize_t get_imix_entries(const char __user *buffer,
i++;
pkt_dev->n_imix_entries++;
-
- if (pkt_dev->n_imix_entries > MAX_IMIX_ENTRIES)
- return -E2BIG;
} while (c == ' ');
return i;
diff --git a/net/core/xdp.c b/net/core/xdp.c
index bcc5551c6424..2315feed94ef 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -186,7 +186,6 @@ int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
xdp_rxq_info_init(xdp_rxq);
xdp_rxq->dev = dev;
xdp_rxq->queue_index = queue_index;
- xdp_rxq->napi_id = napi_id;
xdp_rxq->frag_size = frag_size;
xdp_rxq->reg_state = REG_STATE_REGISTERED;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 0fbec3509618..e1564b95fab0 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2445,6 +2445,7 @@ martian_destination:
net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
&daddr, &saddr, dev->name);
#endif
+ goto out;
e_nobufs:
reason = SKB_DROP_REASON_NOMEM;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e8953e88efef..86d282618515 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -533,7 +533,7 @@ begin:
return NULL;
}
-/* In hash4, rehash can happen in connect(), where hash4_cnt keeps unchanged. */
+/* udp_rehash4() only checks hslot4, and hash4_cnt is not processed. */
static void udp_rehash4(struct udp_table *udptable, struct sock *sk,
u16 newhash4)
{
@@ -582,15 +582,13 @@ void udp_lib_hash4(struct sock *sk, u16 hash)
struct net *net = sock_net(sk);
struct udp_table *udptable;
- /* Connected udp socket can re-connect to another remote address,
- * so rehash4 is needed.
+ /* Connected udp socket can re-connect to another remote address, which
+ * will be handled by rehash. Thus no need to redo hash4 here.
*/
- udptable = net->ipv4.udp_table;
- if (udp_hashed4(sk)) {
- udp_rehash4(udptable, sk, hash);
+ if (udp_hashed4(sk))
return;
- }
+ udptable = net->ipv4.udp_table;
hslot = udp_hashslot(udptable, net, udp_sk(sk)->udp_port_hash);
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
hslot4 = udp_hashslot4(udptable, hash);
@@ -2173,14 +2171,14 @@ void udp_lib_rehash(struct sock *sk, u16 newhash, u16 newhash4)
struct udp_table *udptable = udp_get_table_prot(sk);
struct udp_hslot *hslot, *hslot2, *nhslot2;
+ hslot = udp_hashslot(udptable, sock_net(sk),
+ udp_sk(sk)->udp_port_hash);
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
nhslot2 = udp_hashslot2(udptable, newhash);
udp_sk(sk)->udp_portaddr_hash = newhash;
if (hslot2 != nhslot2 ||
rcu_access_pointer(sk->sk_reuseport_cb)) {
- hslot = udp_hashslot(udptable, sock_net(sk),
- udp_sk(sk)->udp_port_hash);
/* we must lock primary chain too */
spin_lock_bh(&hslot->lock);
if (rcu_access_pointer(sk->sk_reuseport_cb))
@@ -2199,19 +2197,29 @@ void udp_lib_rehash(struct sock *sk, u16 newhash, u16 newhash4)
spin_unlock(&nhslot2->lock);
}
- if (udp_hashed4(sk)) {
- udp_rehash4(udptable, sk, newhash4);
+ spin_unlock_bh(&hslot->lock);
+ }
+
+ /* Now process hash4 if necessary:
+ * (1) update hslot4;
+ * (2) update hslot2->hash4_cnt.
+ * Note that hslot2/hslot4 should be checked separately, as
+ * either of them may change with the other unchanged.
+ */
+ if (udp_hashed4(sk)) {
+ spin_lock_bh(&hslot->lock);
- if (hslot2 != nhslot2) {
- spin_lock(&hslot2->lock);
- udp_hash4_dec(hslot2);
- spin_unlock(&hslot2->lock);
+ udp_rehash4(udptable, sk, newhash4);
+ if (hslot2 != nhslot2) {
+ spin_lock(&hslot2->lock);
+ udp_hash4_dec(hslot2);
+ spin_unlock(&hslot2->lock);
- spin_lock(&nhslot2->lock);
- udp_hash4_inc(nhslot2);
- spin_unlock(&nhslot2->lock);
- }
+ spin_lock(&nhslot2->lock);
+ udp_hash4_inc(nhslot2);
+ spin_unlock(&nhslot2->lock);
}
+
spin_unlock_bh(&hslot->lock);
}
}
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index a62bc874bf1e..123f3f297284 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -607,7 +607,6 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
}
opts->ext_copy.use_ack = 1;
opts->suboptions = OPTION_MPTCP_DSS;
- WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk));
/* Add kind/length/subtype/flag overhead if mapping is not populated */
if (dss_size == 0)
@@ -1288,7 +1287,7 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
}
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICT);
}
- return;
+ goto update_wspace;
}
if (rcv_wnd_new != rcv_wnd_old) {
@@ -1313,6 +1312,9 @@ raise_win:
th->window = htons(new_win);
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDSHARED);
}
+
+update_wspace:
+ WRITE_ONCE(msk->old_wspace, tp->rcv_wnd);
}
__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index a93e661ef5c4..73526f1d768f 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -760,10 +760,15 @@ static inline u64 mptcp_data_avail(const struct mptcp_sock *msk)
static inline bool mptcp_epollin_ready(const struct sock *sk)
{
+ u64 data_avail = mptcp_data_avail(mptcp_sk(sk));
+
+ if (!data_avail)
+ return false;
+
/* mptcp doesn't have to deal with small skbs in the receive queue,
- * at it can always coalesce them
+ * as it can always coalesce them
*/
- return (mptcp_data_avail(mptcp_sk(sk)) >= sk->sk_rcvlowat) ||
+ return (data_avail >= sk->sk_rcvlowat) ||
(mem_cgroup_sockets_enabled && sk->sk_memcg &&
mem_cgroup_under_socket_pressure(sk->sk_memcg)) ||
READ_ONCE(tcp_memory_pressure);
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index ef0f8f73826f..4e0842df5234 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -289,6 +289,7 @@ enum {
ncsi_dev_state_config_sp = 0x0301,
ncsi_dev_state_config_cis,
ncsi_dev_state_config_oem_gma,
+ ncsi_dev_state_config_apply_mac,
ncsi_dev_state_config_clear_vids,
ncsi_dev_state_config_svf,
ncsi_dev_state_config_ev,
@@ -322,6 +323,7 @@ struct ncsi_dev_priv {
#define NCSI_DEV_RESHUFFLE 4
#define NCSI_DEV_RESET 8 /* Reset state of NC */
unsigned int gma_flag; /* OEM GMA flag */
+ struct sockaddr pending_mac; /* MAC address received from GMA */
spinlock_t lock; /* Protect the NCSI device */
unsigned int package_probe_id;/* Current ID during probe */
unsigned int package_num; /* Number of packages */
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 5cf55bde366d..bf276eaf9330 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -1038,7 +1038,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
: ncsi_dev_state_config_clear_vids;
break;
case ncsi_dev_state_config_oem_gma:
- nd->state = ncsi_dev_state_config_clear_vids;
+ nd->state = ncsi_dev_state_config_apply_mac;
nca.package = np->id;
nca.channel = nc->id;
@@ -1050,10 +1050,22 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
nca.type = NCSI_PKT_CMD_OEM;
ret = ncsi_gma_handler(&nca, nc->version.mf_id);
}
- if (ret < 0)
+ if (ret < 0) {
+ nd->state = ncsi_dev_state_config_clear_vids;
schedule_work(&ndp->work);
+ }
break;
+ case ncsi_dev_state_config_apply_mac:
+ rtnl_lock();
+ ret = dev_set_mac_address(dev, &ndp->pending_mac, NULL);
+ rtnl_unlock();
+ if (ret < 0)
+ netdev_warn(dev, "NCSI: 'Writing MAC address to device failed\n");
+
+ nd->state = ncsi_dev_state_config_clear_vids;
+
+ fallthrough;
case ncsi_dev_state_config_clear_vids:
case ncsi_dev_state_config_svf:
case ncsi_dev_state_config_ev:
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index e28be33bdf2c..14bd66909ca4 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -628,16 +628,14 @@ static int ncsi_rsp_handler_snfc(struct ncsi_request *nr)
static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id)
{
struct ncsi_dev_priv *ndp = nr->ndp;
+ struct sockaddr *saddr = &ndp->pending_mac;
struct net_device *ndev = ndp->ndev.dev;
struct ncsi_rsp_oem_pkt *rsp;
- struct sockaddr saddr;
u32 mac_addr_off = 0;
- int ret = 0;
/* Get the response header */
rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
- saddr.sa_family = ndev->type;
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
if (mfr_id == NCSI_OEM_MFR_BCM_ID)
mac_addr_off = BCM_MAC_ADDR_OFFSET;
@@ -646,22 +644,17 @@ static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id)
else if (mfr_id == NCSI_OEM_MFR_INTEL_ID)
mac_addr_off = INTEL_MAC_ADDR_OFFSET;
- memcpy(saddr.sa_data, &rsp->data[mac_addr_off], ETH_ALEN);
+ saddr->sa_family = ndev->type;
+ memcpy(saddr->sa_data, &rsp->data[mac_addr_off], ETH_ALEN);
if (mfr_id == NCSI_OEM_MFR_BCM_ID || mfr_id == NCSI_OEM_MFR_INTEL_ID)
- eth_addr_inc((u8 *)saddr.sa_data);
- if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
+ eth_addr_inc((u8 *)saddr->sa_data);
+ if (!is_valid_ether_addr((const u8 *)saddr->sa_data))
return -ENXIO;
/* Set the flag for GMA command which should only be called once */
ndp->gma_flag = 1;
- rtnl_lock();
- ret = dev_set_mac_address(ndev, &saddr, NULL);
- rtnl_unlock();
- if (ret < 0)
- netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
-
- return ret;
+ return 0;
}
/* Response handler for Mellanox card */
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 16e260014684..704c858cf209 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -934,7 +934,9 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
{
struct vport *vport = ovs_vport_rcu(dp, out_port);
- if (likely(vport && netif_carrier_ok(vport->dev))) {
+ if (likely(vport &&
+ netif_running(vport->dev) &&
+ netif_carrier_ok(vport->dev))) {
u16 mru = OVS_CB(skb)->mru;
u32 cutlen = OVS_CB(skb)->cutlen;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 5cf8109f672a..fa9d1b49599b 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -491,6 +491,15 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
*/
vsk->transport->release(vsk);
vsock_deassign_transport(vsk);
+
+ /* transport's release() and destruct() can touch some socket
+ * state, since we are reassigning the socket to a new transport
+ * during vsock_connect(), let's reset these fields to have a
+ * clean state.
+ */
+ sock_reset_flag(sk, SOCK_DONE);
+ sk->sk_state = TCP_CLOSE;
+ vsk->peer_shutdown = 0;
}
/* We increase the module refcnt to prevent the transport unloading
@@ -870,6 +879,9 @@ EXPORT_SYMBOL_GPL(vsock_create_connected);
s64 vsock_stream_has_data(struct vsock_sock *vsk)
{
+ if (WARN_ON(!vsk->transport))
+ return 0;
+
return vsk->transport->stream_has_data(vsk);
}
EXPORT_SYMBOL_GPL(vsock_stream_has_data);
@@ -878,6 +890,9 @@ s64 vsock_connectible_has_data(struct vsock_sock *vsk)
{
struct sock *sk = sk_vsock(vsk);
+ if (WARN_ON(!vsk->transport))
+ return 0;
+
if (sk->sk_type == SOCK_SEQPACKET)
return vsk->transport->seqpacket_has_data(vsk);
else
@@ -887,6 +902,9 @@ EXPORT_SYMBOL_GPL(vsock_connectible_has_data);
s64 vsock_stream_has_space(struct vsock_sock *vsk)
{
+ if (WARN_ON(!vsk->transport))
+ return 0;
+
return vsk->transport->stream_has_space(vsk);
}
EXPORT_SYMBOL_GPL(vsock_stream_has_space);
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 9acc13ab3f82..7f7de6d88096 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -26,6 +26,9 @@
/* Threshold for detecting small packets to copy */
#define GOOD_COPY_LEN 128
+static void virtio_transport_cancel_close_work(struct vsock_sock *vsk,
+ bool cancel_timeout);
+
static const struct virtio_transport *
virtio_transport_get_ops(struct vsock_sock *vsk)
{
@@ -1109,6 +1112,8 @@ void virtio_transport_destruct(struct vsock_sock *vsk)
{
struct virtio_vsock_sock *vvs = vsk->trans;
+ virtio_transport_cancel_close_work(vsk, true);
+
kfree(vvs);
vsk->trans = NULL;
}
@@ -1204,17 +1209,11 @@ static void virtio_transport_wait_close(struct sock *sk, long timeout)
}
}
-static void virtio_transport_do_close(struct vsock_sock *vsk,
- bool cancel_timeout)
+static void virtio_transport_cancel_close_work(struct vsock_sock *vsk,
+ bool cancel_timeout)
{
struct sock *sk = sk_vsock(vsk);
- sock_set_flag(sk, SOCK_DONE);
- vsk->peer_shutdown = SHUTDOWN_MASK;
- if (vsock_stream_has_data(vsk) <= 0)
- sk->sk_state = TCP_CLOSING;
- sk->sk_state_change(sk);
-
if (vsk->close_work_scheduled &&
(!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
vsk->close_work_scheduled = false;
@@ -1226,6 +1225,20 @@ static void virtio_transport_do_close(struct vsock_sock *vsk,
}
}
+static void virtio_transport_do_close(struct vsock_sock *vsk,
+ bool cancel_timeout)
+{
+ struct sock *sk = sk_vsock(vsk);
+
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ if (vsock_stream_has_data(vsk) <= 0)
+ sk->sk_state = TCP_CLOSING;
+ sk->sk_state_change(sk);
+
+ virtio_transport_cancel_close_work(vsk, cancel_timeout);
+}
+
static void virtio_transport_close_timeout(struct work_struct *work)
{
struct vsock_sock *vsk =
@@ -1628,8 +1641,11 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
lock_sock(sk);
- /* Check if sk has been closed before lock_sock */
- if (sock_flag(sk, SOCK_DONE)) {
+ /* Check if sk has been closed or assigned to another transport before
+ * lock_sock (note: listener sockets are not assigned to any transport)
+ */
+ if (sock_flag(sk, SOCK_DONE) ||
+ (sk->sk_state != TCP_LISTEN && vsk->transport != &t->transport)) {
(void)virtio_transport_reset_no_sock(t, skb);
release_sock(sk);
sock_put(sk);
diff --git a/net/vmw_vsock/vsock_bpf.c b/net/vmw_vsock/vsock_bpf.c
index 4aa6e74ec295..f201d9eca1df 100644
--- a/net/vmw_vsock/vsock_bpf.c
+++ b/net/vmw_vsock/vsock_bpf.c
@@ -77,6 +77,7 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
size_t len, int flags, int *addr_len)
{
struct sk_psock *psock;
+ struct vsock_sock *vsk;
int copied;
psock = sk_psock_get(sk);
@@ -84,6 +85,13 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
return __vsock_recvmsg(sk, msg, len, flags);
lock_sock(sk);
+ vsk = vsock_sk(sk);
+
+ if (!vsk->transport) {
+ copied = -ENODEV;
+ goto out;
+ }
+
if (vsock_has_data(sk, psock) && sk_psock_queue_empty(psock)) {
release_sock(sk);
sk_psock_put(sk, psock);
@@ -108,6 +116,7 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
}
+out:
release_sock(sk);
sk_psock_put(sk, psock);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 3fa70286c846..89d2bef96469 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -322,7 +322,6 @@ static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
return -ENOSPC;
}
- sk_mark_napi_id_once_xdp(&xs->sk, xdp);
return 0;
}
@@ -908,11 +907,8 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
if (unlikely(!xs->tx))
return -ENOBUFS;
- if (sk_can_busy_loop(sk)) {
- if (xs->zc)
- __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
+ if (sk_can_busy_loop(sk))
sk_busy_loop(sk, 1); /* only support non-blocking sockets */
- }
if (xs->zc && xsk_no_wakeup(sk))
return 0;
@@ -1298,6 +1294,14 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
xs->queue_id = qid;
xp_add_xsk(xs->pool, xs);
+ if (xs->zc && qid < dev->real_num_rx_queues) {
+ struct netdev_rx_queue *rxq;
+
+ rxq = __netif_get_rx_queue(dev, qid);
+ if (rxq->napi)
+ __sk_mark_napi_id_once(sk, rxq->napi->napi_id);
+ }
+
out_unlock:
if (err) {
dev_put(dev);
diff --git a/samples/vfs/.gitignore b/samples/vfs/.gitignore
index 79212d91285b..8708341bc082 100644
--- a/samples/vfs/.gitignore
+++ b/samples/vfs/.gitignore
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
/test-fsmount
+/test-list-all-mounts
/test-statx
+/mountinfo
diff --git a/samples/vfs/Makefile b/samples/vfs/Makefile
index 6377a678134a..6554b73a75c8 100644
--- a/samples/vfs/Makefile
+++ b/samples/vfs/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-userprogs-always-y += test-fsmount test-statx
+userprogs-always-y += test-fsmount test-statx mountinfo test-list-all-mounts
userccflags += -I usr/include
diff --git a/samples/vfs/mountinfo.c b/samples/vfs/mountinfo.c
new file mode 100644
index 000000000000..bc78275cac69
--- /dev/null
+++ b/samples/vfs/mountinfo.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/*
+ * Use pidfds, nsfds, listmount() and statmount() mimic the
+ * contents of /proc/self/mountinfo.
+ */
+#define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__
+#include <stdio.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <alloca.h>
+#include <getopt.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <errno.h>
+
+#include "samples-vfs.h"
+
+/* max mounts per listmount call */
+#define MAXMOUNTS 1024
+
+/* size of struct statmount (including trailing string buffer) */
+#define STATMOUNT_BUFSIZE 4096
+
+static bool ext_format;
+
+#ifndef __NR_pidfd_open
+#define __NR_pidfd_open -1
+#endif
+
+/*
+ * There are no bindings in glibc for listmount() and statmount() (yet),
+ * make our own here.
+ */
+static int statmount(__u64 mnt_id, __u64 mnt_ns_id, __u64 mask,
+ struct statmount *buf, size_t bufsize,
+ unsigned int flags)
+{
+ struct mnt_id_req req = {
+ .size = MNT_ID_REQ_SIZE_VER0,
+ .mnt_id = mnt_id,
+ .param = mask,
+ };
+
+ if (mnt_ns_id) {
+ req.size = MNT_ID_REQ_SIZE_VER1;
+ req.mnt_ns_id = mnt_ns_id;
+ }
+
+ return syscall(__NR_statmount, &req, buf, bufsize, flags);
+}
+
+static ssize_t listmount(__u64 mnt_id, __u64 mnt_ns_id, __u64 last_mnt_id,
+ __u64 list[], size_t num, unsigned int flags)
+{
+ struct mnt_id_req req = {
+ .size = MNT_ID_REQ_SIZE_VER0,
+ .mnt_id = mnt_id,
+ .param = last_mnt_id,
+ };
+
+ if (mnt_ns_id) {
+ req.size = MNT_ID_REQ_SIZE_VER1;
+ req.mnt_ns_id = mnt_ns_id;
+ }
+
+ return syscall(__NR_listmount, &req, list, num, flags);
+}
+
+static void show_mnt_attrs(__u64 flags)
+{
+ printf("%s", flags & MOUNT_ATTR_RDONLY ? "ro" : "rw");
+
+ if (flags & MOUNT_ATTR_NOSUID)
+ printf(",nosuid");
+ if (flags & MOUNT_ATTR_NODEV)
+ printf(",nodev");
+ if (flags & MOUNT_ATTR_NOEXEC)
+ printf(",noexec");
+
+ switch (flags & MOUNT_ATTR__ATIME) {
+ case MOUNT_ATTR_RELATIME:
+ printf(",relatime");
+ break;
+ case MOUNT_ATTR_NOATIME:
+ printf(",noatime");
+ break;
+ case MOUNT_ATTR_STRICTATIME:
+ /* print nothing */
+ break;
+ }
+
+ if (flags & MOUNT_ATTR_NODIRATIME)
+ printf(",nodiratime");
+ if (flags & MOUNT_ATTR_NOSYMFOLLOW)
+ printf(",nosymfollow");
+ if (flags & MOUNT_ATTR_IDMAP)
+ printf(",idmapped");
+}
+
+static void show_propagation(struct statmount *sm)
+{
+ if (sm->mnt_propagation & MS_SHARED)
+ printf(" shared:%llu", sm->mnt_peer_group);
+ if (sm->mnt_propagation & MS_SLAVE) {
+ printf(" master:%llu", sm->mnt_master);
+ if (sm->propagate_from && sm->propagate_from != sm->mnt_master)
+ printf(" propagate_from:%llu", sm->propagate_from);
+ }
+ if (sm->mnt_propagation & MS_UNBINDABLE)
+ printf(" unbindable");
+}
+
+static void show_sb_flags(__u64 flags)
+{
+ printf("%s", flags & MS_RDONLY ? "ro" : "rw");
+ if (flags & MS_SYNCHRONOUS)
+ printf(",sync");
+ if (flags & MS_DIRSYNC)
+ printf(",dirsync");
+ if (flags & MS_MANDLOCK)
+ printf(",mand");
+ if (flags & MS_LAZYTIME)
+ printf(",lazytime");
+}
+
+static int dump_mountinfo(__u64 mnt_id, __u64 mnt_ns_id)
+{
+ int ret;
+ struct statmount *buf = alloca(STATMOUNT_BUFSIZE);
+ const __u64 mask = STATMOUNT_SB_BASIC | STATMOUNT_MNT_BASIC |
+ STATMOUNT_PROPAGATE_FROM | STATMOUNT_FS_TYPE |
+ STATMOUNT_MNT_ROOT | STATMOUNT_MNT_POINT |
+ STATMOUNT_MNT_OPTS | STATMOUNT_FS_SUBTYPE |
+ STATMOUNT_SB_SOURCE;
+
+ ret = statmount(mnt_id, mnt_ns_id, mask, buf, STATMOUNT_BUFSIZE, 0);
+ if (ret < 0) {
+ perror("statmount");
+ return 1;
+ }
+
+ if (ext_format)
+ printf("0x%llx 0x%llx 0x%llx ", mnt_ns_id, mnt_id, buf->mnt_parent_id);
+
+ printf("%u %u %u:%u %s %s ", buf->mnt_id_old, buf->mnt_parent_id_old,
+ buf->sb_dev_major, buf->sb_dev_minor,
+ &buf->str[buf->mnt_root],
+ &buf->str[buf->mnt_point]);
+ show_mnt_attrs(buf->mnt_attr);
+ show_propagation(buf);
+
+ printf(" - %s", &buf->str[buf->fs_type]);
+ if (buf->mask & STATMOUNT_FS_SUBTYPE)
+ printf(".%s", &buf->str[buf->fs_subtype]);
+ if (buf->mask & STATMOUNT_SB_SOURCE)
+ printf(" %s ", &buf->str[buf->sb_source]);
+ else
+ printf(" :none ");
+
+ show_sb_flags(buf->sb_flags);
+ if (buf->mask & STATMOUNT_MNT_OPTS)
+ printf(",%s", &buf->str[buf->mnt_opts]);
+ printf("\n");
+ return 0;
+}
+
+static int dump_mounts(__u64 mnt_ns_id)
+{
+ __u64 mntid[MAXMOUNTS];
+ __u64 last_mnt_id = 0;
+ ssize_t count;
+ int i;
+
+ /*
+ * Get a list of all mntids in mnt_ns_id. If it returns MAXMOUNTS
+ * mounts, then go again until we get everything.
+ */
+ do {
+ count = listmount(LSMT_ROOT, mnt_ns_id, last_mnt_id, mntid, MAXMOUNTS, 0);
+ if (count < 0 || count > MAXMOUNTS) {
+ errno = count < 0 ? errno : count;
+ perror("listmount");
+ return 1;
+ }
+
+ /* Walk the returned mntids and print info about each */
+ for (i = 0; i < count; ++i) {
+ int ret = dump_mountinfo(mntid[i], mnt_ns_id);
+
+ if (ret != 0)
+ return ret;
+ }
+ /* Set up last_mnt_id to pick up where we left off */
+ last_mnt_id = mntid[count - 1];
+ } while (count == MAXMOUNTS);
+ return 0;
+}
+
+static void usage(const char * const prog)
+{
+ printf("Usage:\n");
+ printf("%s [-e] [-p pid] [-r] [-h]\n", prog);
+ printf(" -e: extended format\n");
+ printf(" -h: print usage message\n");
+ printf(" -p: get mount namespace from given pid\n");
+ printf(" -r: recursively print all mounts in all child namespaces\n");
+}
+
+int main(int argc, char * const *argv)
+{
+ struct mnt_ns_info mni = { .size = MNT_NS_INFO_SIZE_VER0 };
+ int pidfd, mntns, ret, opt;
+ pid_t pid = getpid();
+ bool recursive = false;
+
+ while ((opt = getopt(argc, argv, "ehp:r")) != -1) {
+ switch (opt) {
+ case 'e':
+ ext_format = true;
+ break;
+ case 'h':
+ usage(argv[0]);
+ return 0;
+ case 'p':
+ pid = atoi(optarg);
+ break;
+ case 'r':
+ recursive = true;
+ break;
+ }
+ }
+
+ /* Get a pidfd for pid */
+ pidfd = syscall(__NR_pidfd_open, pid, 0);
+ if (pidfd < 0) {
+ perror("pidfd_open");
+ return 1;
+ }
+
+ /* Get the mnt namespace for pidfd */
+ mntns = ioctl(pidfd, PIDFD_GET_MNT_NAMESPACE, NULL);
+ if (mntns < 0) {
+ perror("PIDFD_GET_MNT_NAMESPACE");
+ return 1;
+ }
+ close(pidfd);
+
+ /* get info about mntns. In particular, the mnt_ns_id */
+ ret = ioctl(mntns, NS_MNT_GET_INFO, &mni);
+ if (ret < 0) {
+ perror("NS_MNT_GET_INFO");
+ return 1;
+ }
+
+ do {
+ int ret;
+
+ ret = dump_mounts(mni.mnt_ns_id);
+ if (ret)
+ return ret;
+
+ if (!recursive)
+ break;
+
+ /* get the next mntns (and overwrite the old mount ns info) */
+ ret = ioctl(mntns, NS_MNT_GET_NEXT, &mni);
+ close(mntns);
+ mntns = ret;
+ } while (mntns >= 0);
+
+ return 0;
+}
diff --git a/samples/vfs/samples-vfs.h b/samples/vfs/samples-vfs.h
new file mode 100644
index 000000000000..103e1e7c4cec
--- /dev/null
+++ b/samples/vfs/samples-vfs.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __SAMPLES_VFS_H
+#define __SAMPLES_VFS_H
+
+#include <errno.h>
+#include <linux/types.h>
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+
+#define die_errno(format, ...) \
+ do { \
+ fprintf(stderr, "%m | %s: %d: %s: " format "\n", __FILE__, \
+ __LINE__, __func__, ##__VA_ARGS__); \
+ exit(EXIT_FAILURE); \
+ } while (0)
+
+struct statmount {
+ __u32 size; /* Total size, including strings */
+ __u32 mnt_opts; /* [str] Options (comma separated, escaped) */
+ __u64 mask; /* What results were written */
+ __u32 sb_dev_major; /* Device ID */
+ __u32 sb_dev_minor;
+ __u64 sb_magic; /* ..._SUPER_MAGIC */
+ __u32 sb_flags; /* SB_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */
+ __u32 fs_type; /* [str] Filesystem type */
+ __u64 mnt_id; /* Unique ID of mount */
+ __u64 mnt_parent_id; /* Unique ID of parent (for root == mnt_id) */
+ __u32 mnt_id_old; /* Reused IDs used in proc/.../mountinfo */
+ __u32 mnt_parent_id_old;
+ __u64 mnt_attr; /* MOUNT_ATTR_... */
+ __u64 mnt_propagation; /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */
+ __u64 mnt_peer_group; /* ID of shared peer group */
+ __u64 mnt_master; /* Mount receives propagation from this ID */
+ __u64 propagate_from; /* Propagation from in current namespace */
+ __u32 mnt_root; /* [str] Root of mount relative to root of fs */
+ __u32 mnt_point; /* [str] Mountpoint relative to current root */
+ __u64 mnt_ns_id; /* ID of the mount namespace */
+ __u32 fs_subtype; /* [str] Subtype of fs_type (if any) */
+ __u32 sb_source; /* [str] Source string of the mount */
+ __u32 opt_num; /* Number of fs options */
+ __u32 opt_array; /* [str] Array of nul terminated fs options */
+ __u32 opt_sec_num; /* Number of security options */
+ __u32 opt_sec_array; /* [str] Array of nul terminated security options */
+ __u64 __spare2[46];
+ char str[]; /* Variable size part containing strings */
+};
+
+struct mnt_id_req {
+ __u32 size;
+ __u32 spare;
+ __u64 mnt_id;
+ __u64 param;
+ __u64 mnt_ns_id;
+};
+
+#ifndef MNT_ID_REQ_SIZE_VER0
+#define MNT_ID_REQ_SIZE_VER0 24 /* sizeof first published struct */
+#endif
+
+#ifndef MNT_ID_REQ_SIZE_VER1
+#define MNT_ID_REQ_SIZE_VER1 32 /* sizeof second published struct */
+#endif
+
+/* Get the id for a mount namespace */
+#ifndef NS_GET_MNTNS_ID
+#define NS_GET_MNTNS_ID _IO(0xb7, 0x5)
+#endif
+
+struct mnt_ns_info {
+ __u32 size;
+ __u32 nr_mounts;
+ __u64 mnt_ns_id;
+};
+
+#ifndef MNT_NS_INFO_SIZE_VER0
+#define MNT_NS_INFO_SIZE_VER0 16 /* size of first published struct */
+#endif
+
+#ifndef NS_MNT_GET_INFO
+#define NS_MNT_GET_INFO _IOR(0xb7, 10, struct mnt_ns_info)
+#endif
+
+#ifndef NS_MNT_GET_NEXT
+#define NS_MNT_GET_NEXT _IOR(0xb7, 11, struct mnt_ns_info)
+#endif
+
+#ifndef NS_MNT_GET_PREV
+#define NS_MNT_GET_PREV _IOR(0xb7, 12, struct mnt_ns_info)
+#endif
+
+#ifndef PIDFD_GET_MNT_NAMESPACE
+#define PIDFD_GET_MNT_NAMESPACE _IO(0xFF, 3)
+#endif
+
+#ifndef __NR_listmount
+#define __NR_listmount 458
+#endif
+
+#ifndef __NR_statmount
+#define __NR_statmount 457
+#endif
+
+#ifndef LSMT_ROOT
+#define LSMT_ROOT 0xffffffffffffffff /* root mount */
+#endif
+
+/* @mask bits for statmount(2) */
+#ifndef STATMOUNT_SB_BASIC
+#define STATMOUNT_SB_BASIC 0x00000001U /* Want/got sb_... */
+#endif
+
+#ifndef STATMOUNT_MNT_BASIC
+#define STATMOUNT_MNT_BASIC 0x00000002U /* Want/got mnt_... */
+#endif
+
+#ifndef STATMOUNT_PROPAGATE_FROM
+#define STATMOUNT_PROPAGATE_FROM 0x00000004U /* Want/got propagate_from */
+#endif
+
+#ifndef STATMOUNT_MNT_ROOT
+#define STATMOUNT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
+#endif
+
+#ifndef STATMOUNT_MNT_POINT
+#define STATMOUNT_MNT_POINT 0x00000010U /* Want/got mnt_point */
+#endif
+
+#ifndef STATMOUNT_FS_TYPE
+#define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */
+#endif
+
+#ifndef STATMOUNT_MNT_NS_ID
+#define STATMOUNT_MNT_NS_ID 0x00000040U /* Want/got mnt_ns_id */
+#endif
+
+#ifndef STATMOUNT_MNT_OPTS
+#define STATMOUNT_MNT_OPTS 0x00000080U /* Want/got mnt_opts */
+#endif
+
+#ifndef STATMOUNT_FS_SUBTYPE
+#define STATMOUNT_FS_SUBTYPE 0x00000100U /* Want/got fs_subtype */
+#endif
+
+#ifndef STATMOUNT_SB_SOURCE
+#define STATMOUNT_SB_SOURCE 0x00000200U /* Want/got sb_source */
+#endif
+
+#ifndef STATMOUNT_OPT_ARRAY
+#define STATMOUNT_OPT_ARRAY 0x00000400U /* Want/got opt_... */
+#endif
+
+#ifndef STATMOUNT_OPT_SEC_ARRAY
+#define STATMOUNT_OPT_SEC_ARRAY 0x00000800U /* Want/got opt_sec... */
+#endif
+
+#ifndef STATX_MNT_ID_UNIQUE
+#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
+#endif
+
+#ifndef MOUNT_ATTR_RDONLY
+#define MOUNT_ATTR_RDONLY 0x00000001 /* Mount read-only */
+#endif
+
+#ifndef MOUNT_ATTR_NOSUID
+#define MOUNT_ATTR_NOSUID 0x00000002 /* Ignore suid and sgid bits */
+#endif
+
+#ifndef MOUNT_ATTR_NODEV
+#define MOUNT_ATTR_NODEV 0x00000004 /* Disallow access to device special files */
+#endif
+
+#ifndef MOUNT_ATTR_NOEXEC
+#define MOUNT_ATTR_NOEXEC 0x00000008 /* Disallow program execution */
+#endif
+
+#ifndef MOUNT_ATTR__ATIME
+#define MOUNT_ATTR__ATIME 0x00000070 /* Setting on how atime should be updated */
+#endif
+
+#ifndef MOUNT_ATTR_RELATIME
+#define MOUNT_ATTR_RELATIME 0x00000000 /* - Update atime relative to mtime/ctime. */
+#endif
+
+#ifndef MOUNT_ATTR_NOATIME
+#define MOUNT_ATTR_NOATIME 0x00000010 /* - Do not update access times. */
+#endif
+
+#ifndef MOUNT_ATTR_STRICTATIME
+#define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */
+#endif
+
+#ifndef MOUNT_ATTR_NODIRATIME
+#define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */
+#endif
+
+#ifndef MOUNT_ATTR_IDMAP
+#define MOUNT_ATTR_IDMAP 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */
+#endif
+
+#ifndef MOUNT_ATTR_NOSYMFOLLOW
+#define MOUNT_ATTR_NOSYMFOLLOW 0x00200000 /* Do not follow symlinks */
+#endif
+
+#ifndef MS_RDONLY
+#define MS_RDONLY 1 /* Mount read-only */
+#endif
+
+#ifndef MS_SYNCHRONOUS
+#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
+#endif
+
+#ifndef MS_MANDLOCK
+#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
+#endif
+
+#ifndef MS_DIRSYNC
+#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
+#endif
+
+#ifndef MS_UNBINDABLE
+#define MS_UNBINDABLE (1<<17) /* change to unbindable */
+#endif
+
+#ifndef MS_PRIVATE
+#define MS_PRIVATE (1<<18) /* change to private */
+#endif
+
+#ifndef MS_SLAVE
+#define MS_SLAVE (1<<19) /* change to slave */
+#endif
+
+#ifndef MS_SHARED
+#define MS_SHARED (1<<20) /* change to shared */
+#endif
+
+#ifndef MS_LAZYTIME
+#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
+#endif
+
+#endif /* __SAMPLES_VFS_H */
diff --git a/samples/vfs/test-list-all-mounts.c b/samples/vfs/test-list-all-mounts.c
new file mode 100644
index 000000000000..1a02ea4593e3
--- /dev/null
+++ b/samples/vfs/test-list-all-mounts.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Christian Brauner <brauner@kernel.org>
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <limits.h>
+#include <linux/types.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "../../tools/testing/selftests/pidfd/pidfd.h"
+#include "samples-vfs.h"
+
+static int __statmount(__u64 mnt_id, __u64 mnt_ns_id, __u64 mask,
+ struct statmount *stmnt, size_t bufsize,
+ unsigned int flags)
+{
+ struct mnt_id_req req = {
+ .size = MNT_ID_REQ_SIZE_VER1,
+ .mnt_id = mnt_id,
+ .param = mask,
+ .mnt_ns_id = mnt_ns_id,
+ };
+
+ return syscall(__NR_statmount, &req, stmnt, bufsize, flags);
+}
+
+static struct statmount *sys_statmount(__u64 mnt_id, __u64 mnt_ns_id,
+ __u64 mask, unsigned int flags)
+{
+ size_t bufsize = 1 << 15;
+ struct statmount *stmnt = NULL, *tmp = NULL;
+ int ret;
+
+ for (;;) {
+ tmp = realloc(stmnt, bufsize);
+ if (!tmp)
+ goto out;
+
+ stmnt = tmp;
+ ret = __statmount(mnt_id, mnt_ns_id, mask, stmnt, bufsize, flags);
+ if (!ret)
+ return stmnt;
+
+ if (errno != EOVERFLOW)
+ goto out;
+
+ bufsize <<= 1;
+ if (bufsize >= UINT_MAX / 2)
+ goto out;
+ }
+
+out:
+ free(stmnt);
+ return NULL;
+}
+
+static ssize_t sys_listmount(__u64 mnt_id, __u64 last_mnt_id, __u64 mnt_ns_id,
+ __u64 list[], size_t num, unsigned int flags)
+{
+ struct mnt_id_req req = {
+ .size = MNT_ID_REQ_SIZE_VER1,
+ .mnt_id = mnt_id,
+ .param = last_mnt_id,
+ .mnt_ns_id = mnt_ns_id,
+ };
+
+ return syscall(__NR_listmount, &req, list, num, flags);
+}
+
+int main(int argc, char *argv[])
+{
+#define LISTMNT_BUFFER 10
+ __u64 list[LISTMNT_BUFFER], last_mnt_id = 0;
+ int ret, pidfd, fd_mntns;
+ struct mnt_ns_info info = {};
+
+ pidfd = sys_pidfd_open(getpid(), 0);
+ if (pidfd < 0)
+ die_errno("pidfd_open failed");
+
+ fd_mntns = ioctl(pidfd, PIDFD_GET_MNT_NAMESPACE, 0);
+ if (fd_mntns < 0)
+ die_errno("ioctl(PIDFD_GET_MNT_NAMESPACE) failed");
+
+ ret = ioctl(fd_mntns, NS_MNT_GET_INFO, &info);
+ if (ret < 0)
+ die_errno("ioctl(NS_GET_MNTNS_ID) failed");
+
+ printf("Listing %u mounts for mount namespace %" PRIu64 "\n",
+ info.nr_mounts, (uint64_t)info.mnt_ns_id);
+ for (;;) {
+ ssize_t nr_mounts;
+next:
+ nr_mounts = sys_listmount(LSMT_ROOT, last_mnt_id,
+ info.mnt_ns_id, list, LISTMNT_BUFFER,
+ 0);
+ if (nr_mounts <= 0) {
+ int fd_mntns_next;
+
+ printf("Finished listing %u mounts for mount namespace %" PRIu64 "\n\n",
+ info.nr_mounts, (uint64_t)info.mnt_ns_id);
+ fd_mntns_next = ioctl(fd_mntns, NS_MNT_GET_NEXT, &info);
+ if (fd_mntns_next < 0) {
+ if (errno == ENOENT) {
+ printf("Finished listing all mount namespaces\n");
+ exit(0);
+ }
+ die_errno("ioctl(NS_MNT_GET_NEXT) failed");
+ }
+ close(fd_mntns);
+ fd_mntns = fd_mntns_next;
+ last_mnt_id = 0;
+ printf("Listing %u mounts for mount namespace %" PRIu64 "\n",
+ info.nr_mounts, (uint64_t)info.mnt_ns_id);
+ goto next;
+ }
+
+ for (size_t cur = 0; cur < nr_mounts; cur++) {
+ struct statmount *stmnt;
+
+ last_mnt_id = list[cur];
+
+ stmnt = sys_statmount(last_mnt_id, info.mnt_ns_id,
+ STATMOUNT_SB_BASIC |
+ STATMOUNT_MNT_BASIC |
+ STATMOUNT_MNT_ROOT |
+ STATMOUNT_MNT_POINT |
+ STATMOUNT_MNT_NS_ID |
+ STATMOUNT_MNT_OPTS |
+ STATMOUNT_FS_TYPE, 0);
+ if (!stmnt) {
+ printf("Failed to statmount(%" PRIu64 ") in mount namespace(%" PRIu64 ")\n",
+ (uint64_t)last_mnt_id, (uint64_t)info.mnt_ns_id);
+ continue;
+ }
+
+ printf("mnt_id:\t\t%" PRIu64 "\nmnt_parent_id:\t%" PRIu64 "\nfs_type:\t%s\nmnt_root:\t%s\nmnt_point:\t%s\nmnt_opts:\t%s\n\n",
+ (uint64_t)stmnt->mnt_id,
+ (uint64_t)stmnt->mnt_parent_id,
+ stmnt->str + stmnt->fs_type,
+ stmnt->str + stmnt->mnt_root,
+ stmnt->str + stmnt->mnt_point,
+ stmnt->str + stmnt->mnt_opts);
+ free(stmnt);
+ }
+ }
+
+ exit(0);
+}
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index 46fa18b80fc1..17abc4e7a985 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -286,6 +286,18 @@ handle_line() {
last=$(( $last - 1 ))
fi
+ # Extract info after the symbol if present. E.g.:
+ # func_name+0x54/0x80 (P)
+ # ^^^
+ # The regex assumes only uppercase letters will be used. To be
+ # extended if needed.
+ local info_str=""
+ if [[ ${words[$last]} =~ \([A-Z]*\) ]]; then
+ info_str=${words[$last]}
+ unset words[$last]
+ last=$(( $last - 1 ))
+ fi
+
if [[ ${words[$last]} =~ \[([^]]+)\] ]]; then
module=${words[$last]}
# some traces format is "(%pS)", which like "(foo+0x0/0x1 [bar])"
@@ -313,9 +325,9 @@ handle_line() {
# Add up the line number to the symbol
if [[ -z ${module} ]]
then
- echo "${words[@]}" "$symbol"
+ echo "${words[@]}" "$symbol ${info_str}"
else
- echo "${words[@]}" "$symbol $module"
+ echo "${words[@]}" "$symbol $module ${info_str}"
fi
}
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 2c0185ebc900..c07d150685d7 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -2612,7 +2612,7 @@ static int policy_readlink(struct dentry *dentry, char __user *buffer,
res = snprintf(name, sizeof(name), "%s:[%lu]", AAFS_NAME,
d_inode(dentry)->i_ino);
if (res > 0 && res < sizeof(name))
- res = readlink_copy(buffer, buflen, name);
+ res = readlink_copy(buffer, buflen, name, strlen(name));
else
res = -ENOENT;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b74b566f675e..ad66378d7321 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -10641,6 +10641,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1e1f, "ASUS Vivobook 15 X1504VAP", ALC2XX_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
SND_PCI_QUIRK(0x1043, 0x1e5e, "ASUS ROG Strix G513", ALC294_FIXUP_ASUS_G513_PINS),
+ SND_PCI_QUIRK(0x1043, 0x1e63, "ASUS H7606W", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1),
+ SND_PCI_QUIRK(0x1043, 0x1e83, "ASUS GA605W", ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1),
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1eb3, "ASUS Ally RCLA72", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x1ed3, "ASUS HN7306W", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10930,8 +10932,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x38e0, "Yoga Y990 Intel VECO Dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38f8, "Yoga Book 9i", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38df, "Y990 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
- SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x17aa, 0x38f9, "Thinkbook 16P Gen5", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+ SND_PCI_QUIRK(0x17aa, 0x38fa, "Thinkbook 16P Gen5", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x38fd, "ThinkBook plus Gen5 Hybrid", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC),
@@ -10995,6 +10997,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13),
SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO),
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 0f2df7c91e18..0b9e87dc2b6c 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -2451,6 +2451,7 @@ config SND_SOC_WM8993
config SND_SOC_WM8994
tristate
+ depends on MFD_WM8994
config SND_SOC_WM8995
tristate
diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
index 4236f78beec0..83c21c17fb80 100644
--- a/sound/soc/codecs/cs42l43.c
+++ b/sound/soc/codecs/cs42l43.c
@@ -2404,6 +2404,7 @@ static int cs42l43_codec_runtime_resume(struct device *dev)
static const struct dev_pm_ops cs42l43_codec_pm_ops = {
RUNTIME_PM_OPS(NULL, cs42l43_codec_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
static const struct platform_device_id cs42l43_codec_id_table[] = {
diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
index 61729e5b50a8..f508df01145b 100644
--- a/sound/soc/codecs/es8316.c
+++ b/sound/soc/codecs/es8316.c
@@ -39,7 +39,9 @@ struct es8316_priv {
struct snd_soc_jack *jack;
int irq;
unsigned int sysclk;
- unsigned int allowed_rates[ARRAY_SIZE(supported_mclk_lrck_ratios)];
+ /* ES83xx supports halving the MCLK so it supports twice as many rates
+ */
+ unsigned int allowed_rates[ARRAY_SIZE(supported_mclk_lrck_ratios) * 2];
struct snd_pcm_hw_constraint_list sysclk_constraints;
bool jd_inverted;
};
@@ -386,6 +388,12 @@ static int es8316_set_dai_sysclk(struct snd_soc_dai *codec_dai,
if (freq % ratio == 0)
es8316->allowed_rates[count++] = freq / ratio;
+
+ /* We also check if the halved MCLK produces a valid rate
+ * since the codec supports halving the MCLK.
+ */
+ if ((freq / ratio) % 2 == 0)
+ es8316->allowed_rates[count++] = freq / ratio / 2;
}
if (count) {
diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c
index a5603b617688..b06eead7e0f6 100644
--- a/sound/soc/codecs/es8326.c
+++ b/sound/soc/codecs/es8326.c
@@ -616,7 +616,7 @@ static int es8326_mute(struct snd_soc_dai *dai, int mute, int direction)
0x0F, 0x0F);
if (es8326->version > ES8326_VERSION_B) {
regmap_update_bits(es8326->regmap, ES8326_VMIDSEL, 0x40, 0x40);
- regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x70, 0x10);
+ regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x70, 0x30);
}
}
} else {
@@ -631,6 +631,8 @@ static int es8326_mute(struct snd_soc_dai *dai, int mute, int direction)
regmap_write(es8326->regmap, ES8326_HPR_OFFSET_INI, offset_r);
es8326->calibrated = true;
}
+ regmap_update_bits(es8326->regmap, ES8326_CLK_INV, 0xc0, 0x00);
+ regmap_update_bits(es8326->regmap, ES8326_CLK_MUX, 0x80, 0x00);
if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
regmap_update_bits(es8326->regmap, ES8326_DAC_DSM, 0x01, 0x01);
usleep_range(1000, 5000);
@@ -645,7 +647,7 @@ static int es8326_mute(struct snd_soc_dai *dai, int mute, int direction)
} else {
msleep(300);
if (es8326->version > ES8326_VERSION_B) {
- regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x70, 0x50);
+ regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x70, 0x70);
regmap_update_bits(es8326->regmap, ES8326_VMIDSEL, 0x40, 0x00);
}
regmap_update_bits(es8326->regmap, ES8326_ADC_MUTE,
@@ -676,6 +678,10 @@ static int es8326_set_bias_level(struct snd_soc_component *codec,
regmap_write(es8326->regmap, ES8326_ANA_PDN, 0x00);
regmap_update_bits(es8326->regmap, ES8326_CLK_CTL, 0x20, 0x20);
regmap_update_bits(es8326->regmap, ES8326_RESET, 0x02, 0x00);
+ if (es8326->version > ES8326_VERSION_B) {
+ regmap_update_bits(es8326->regmap, ES8326_VMIDSEL, 0x40, 0x40);
+ regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x70, 0x30);
+ }
break;
case SND_SOC_BIAS_PREPARE:
break;
@@ -683,6 +689,12 @@ static int es8326_set_bias_level(struct snd_soc_component *codec,
regmap_write(es8326->regmap, ES8326_ANA_PDN, 0x3b);
regmap_update_bits(es8326->regmap, ES8326_CLK_CTL, 0x20, 0x00);
regmap_write(es8326->regmap, ES8326_SDINOUT1_IO, ES8326_IO_INPUT);
+ if (es8326->version > ES8326_VERSION_B) {
+ regmap_update_bits(es8326->regmap, ES8326_VMIDSEL, 0x40, 0x40);
+ regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x70, 0x10);
+ }
+ regmap_update_bits(es8326->regmap, ES8326_CLK_INV, 0xc0, 0xc0);
+ regmap_update_bits(es8326->regmap, ES8326_CLK_MUX, 0x80, 0x80);
break;
case SND_SOC_BIAS_OFF:
clk_disable_unprepare(es8326->mclk);
@@ -773,7 +785,10 @@ static void es8326_jack_button_handler(struct work_struct *work)
case 0x6f:
case 0x4b:
/* button volume up */
- cur_button = SND_JACK_BTN_1;
+ if ((iface == 0x6f) && (es8326->version > ES8326_VERSION_B))
+ cur_button = SND_JACK_BTN_0;
+ else
+ cur_button = SND_JACK_BTN_1;
break;
case 0x27:
/* button volume down */
@@ -1082,7 +1097,7 @@ static void es8326_init(struct snd_soc_component *component)
regmap_write(es8326->regmap, ES8326_ADC2_SRC, 0x66);
es8326_disable_micbias(es8326->component);
if (es8326->version > ES8326_VERSION_B) {
- regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x73, 0x13);
+ regmap_update_bits(es8326->regmap, ES8326_ANA_MICBIAS, 0x73, 0x10);
regmap_update_bits(es8326->regmap, ES8326_VMIDSEL, 0x40, 0x40);
}
diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c
index fb8cd2284fe8..728bf78ae71f 100644
--- a/sound/soc/codecs/tas2781-i2c.c
+++ b/sound/soc/codecs/tas2781-i2c.c
@@ -78,7 +78,7 @@ static const struct bulk_reg_val tas2781_cali_start_reg[] = {
X2781_CL_STT_VAL(TAS2781_PRM_INT_MASK_REG, 0xfe, false),
X2781_CL_STT_VAL(TAS2781_PRM_CLK_CFG_REG, 0xdd, false),
X2781_CL_STT_VAL(TAS2781_PRM_RSVD_REG, 0x20, false),
- X2781_CL_STT_VAL(TAS2781_PRM_TEST_57_REG, 0x14, false),
+ X2781_CL_STT_VAL(TAS2781_PRM_TEST_57_REG, 0x14, true),
X2781_CL_STT_VAL(TAS2781_PRM_TEST_62_REG, 0x45, true),
X2781_CL_STT_VAL(TAS2781_PRM_PVDD_UVLO_REG, 0x03, false),
X2781_CL_STT_VAL(TAS2781_PRM_CHNL_0_REG, 0xa8, false),
diff --git a/sound/soc/renesas/rcar/adg.c b/sound/soc/renesas/rcar/adg.c
index 0f190abf00e7..191f212d338c 100644
--- a/sound/soc/renesas/rcar/adg.c
+++ b/sound/soc/renesas/rcar/adg.c
@@ -374,12 +374,12 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
return 0;
}
-void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
+int rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
{
struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
struct rsnd_mod *adg_mod = rsnd_mod_get(adg);
struct clk *clk;
- int i;
+ int ret = 0, i;
if (enable) {
rsnd_mod_bset(adg_mod, BRGCKR, 0x80770000, adg->ckr);
@@ -389,18 +389,33 @@ void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
for_each_rsnd_clkin(clk, adg, i) {
if (enable) {
- clk_prepare_enable(clk);
+ ret = clk_prepare_enable(clk);
/*
* We shouldn't use clk_get_rate() under
* atomic context. Let's keep it when
* rsnd_adg_clk_enable() was called
*/
+ if (ret < 0)
+ break;
+
adg->clkin_rate[i] = clk_get_rate(clk);
} else {
- clk_disable_unprepare(clk);
+ if (adg->clkin_rate[i])
+ clk_disable_unprepare(clk);
+
+ adg->clkin_rate[i] = 0;
}
}
+
+ /*
+ * rsnd_adg_clk_enable() might return error (_disable() will not).
+ * We need to rollback in such case
+ */
+ if (ret < 0)
+ rsnd_adg_clk_disable(priv);
+
+ return ret;
}
static struct clk *rsnd_adg_create_null_clk(struct rsnd_priv *priv,
@@ -753,7 +768,10 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
if (ret)
return ret;
- rsnd_adg_clk_enable(priv);
+ ret = rsnd_adg_clk_enable(priv);
+ if (ret)
+ return ret;
+
rsnd_adg_clk_dbg_info(priv, NULL);
return 0;
diff --git a/sound/soc/renesas/rcar/core.c b/sound/soc/renesas/rcar/core.c
index e2234928c9e8..d3709fd0409e 100644
--- a/sound/soc/renesas/rcar/core.c
+++ b/sound/soc/renesas/rcar/core.c
@@ -2086,9 +2086,7 @@ static int __maybe_unused rsnd_resume(struct device *dev)
{
struct rsnd_priv *priv = dev_get_drvdata(dev);
- rsnd_adg_clk_enable(priv);
-
- return 0;
+ return rsnd_adg_clk_enable(priv);
}
static const struct dev_pm_ops rsnd_pm_ops = {
diff --git a/sound/soc/renesas/rcar/rsnd.h b/sound/soc/renesas/rcar/rsnd.h
index 3c164d8e3b16..a5f54b65313c 100644
--- a/sound/soc/renesas/rcar/rsnd.h
+++ b/sound/soc/renesas/rcar/rsnd.h
@@ -608,7 +608,7 @@ int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *cmd_mod,
struct rsnd_dai_stream *io);
#define rsnd_adg_clk_enable(priv) rsnd_adg_clk_control(priv, 1)
#define rsnd_adg_clk_disable(priv) rsnd_adg_clk_control(priv, 0)
-void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable);
+int rsnd_adg_clk_control(struct rsnd_priv *priv, int enable);
void rsnd_adg_clk_dbg_info(struct rsnd_priv *priv, struct seq_file *m);
/*
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 4b1ea7b2c796..60b4b7b75215 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -127,8 +127,9 @@ config SND_SOC_SAMSUNG_TM2_WM5110
config SND_SOC_SAMSUNG_ARIES_WM8994
tristate "SoC I2S Audio support for WM8994 on Aries"
- depends on SND_SOC_SAMSUNG && MFD_WM8994 && IIO && EXTCON
+ depends on SND_SOC_SAMSUNG && I2C && IIO && EXTCON
select SND_SOC_BT_SCO
+ select MFD_WM8994
select SND_SOC_WM8994
select SND_SAMSUNG_I2S
help
@@ -140,8 +141,9 @@ config SND_SOC_SAMSUNG_ARIES_WM8994
config SND_SOC_SAMSUNG_MIDAS_WM1811
tristate "SoC I2S Audio support for Midas boards"
- depends on SND_SOC_SAMSUNG && IIO
+ depends on SND_SOC_SAMSUNG && I2C && IIO
select SND_SAMSUNG_I2S
+ select MFD_WM8994
select SND_SOC_WM8994
help
Say Y if you want to add support for SoC audio on the Midas boards.
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
index d8201c4b1520..6750fdb42564 100755
--- a/tools/net/ynl/ynl-gen-c.py
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -2384,6 +2384,17 @@ def print_kernel_family_struct_src(family, cw):
if not kernel_can_gen_family_struct(family):
return
+ if 'sock-priv' in family.kernel_family:
+ # Generate "trampolines" to make CFI happy
+ cw.write_func("static void", f"__{family.c_name}_nl_sock_priv_init",
+ [f"{family.c_name}_nl_sock_priv_init(priv);"],
+ ["void *priv"])
+ cw.nl()
+ cw.write_func("static void", f"__{family.c_name}_nl_sock_priv_destroy",
+ [f"{family.c_name}_nl_sock_priv_destroy(priv);"],
+ ["void *priv"])
+ cw.nl()
+
cw.block_start(f"struct genl_family {family.ident_name}_nl_family __ro_after_init =")
cw.p('.name\t\t= ' + family.fam_key + ',')
cw.p('.version\t= ' + family.ver_key + ',')
@@ -2401,9 +2412,8 @@ def print_kernel_family_struct_src(family, cw):
cw.p(f'.n_mcgrps\t= ARRAY_SIZE({family.c_name}_nl_mcgrps),')
if 'sock-priv' in family.kernel_family:
cw.p(f'.sock_priv_size\t= sizeof({family.kernel_family["sock-priv"]}),')
- # Force cast here, actual helpers take pointer to the real type.
- cw.p(f'.sock_priv_init\t= (void *){family.c_name}_nl_sock_priv_init,')
- cw.p(f'.sock_priv_destroy = (void *){family.c_name}_nl_sock_priv_destroy,')
+ cw.p(f'.sock_priv_init\t= __{family.c_name}_nl_sock_priv_init,')
+ cw.p(f'.sock_priv_destroy = __{family.c_name}_nl_sock_priv_destroy,')
cw.block_end(';')
diff --git a/tools/testing/selftests/coredump/Makefile b/tools/testing/selftests/coredump/Makefile
new file mode 100644
index 000000000000..ed210037b29d
--- /dev/null
+++ b/tools/testing/selftests/coredump/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+CFLAGS = $(KHDR_INCLUDES)
+
+TEST_GEN_PROGS := stackdump_test
+TEST_FILES := stackdump
+
+include ../lib.mk
diff --git a/tools/testing/selftests/coredump/README.rst b/tools/testing/selftests/coredump/README.rst
new file mode 100644
index 000000000000..164a7aa181c8
--- /dev/null
+++ b/tools/testing/selftests/coredump/README.rst
@@ -0,0 +1,50 @@
+coredump selftest
+=================
+
+Background context
+------------------
+
+`coredump` is a feature which dumps a process's memory space when the process terminates
+unexpectedly (e.g. due to segmentation fault), which can be useful for debugging. By default,
+`coredump` dumps the memory to the file named `core`, but this behavior can be changed by writing a
+different file name to `/proc/sys/kernel/core_pattern`. Furthermore, `coredump` can be piped to a
+user-space program by writing the pipe symbol (`|`) followed by the command to be executed to
+`/proc/sys/kernel/core_pattern`. For the full description, see `man 5 core`.
+
+The piped user program may be interested in reading the stack pointers of the crashed process. The
+crashed process's stack pointers can be read from `procfs`: it is the `kstkesp` field in
+`/proc/$PID/stat`. See `man 5 proc` for all the details.
+
+The problem
+-----------
+While a thread is active, the stack pointer is unsafe to read and therefore the `kstkesp` field
+reads zero. But when the thread is dead (e.g. during a coredump), this field should have valid
+value.
+
+However, this was broken in the past and `kstkesp` was zero even during coredump:
+
+* commit 0a1eb2d474ed ("fs/proc: Stop reporting eip and esp in /proc/PID/stat") changed kstkesp to
+ always be zero
+
+* commit fd7d56270b52 ("fs/proc: Report eip/esp in /prod/PID/stat for coredumping") fixed it for the
+ coredumping thread. However, other threads in a coredumping process still had the problem.
+
+* commit cb8f381f1613 ("fs/proc/array.c: allow reporting eip/esp for all coredumping threads") fixed
+ for all threads in a coredumping process.
+
+* commit 92307383082d ("coredump: Don't perform any cleanups before dumping core") broke it again
+ for the other threads in a coredumping process.
+
+The problem has been fixed now, but considering the history, it may appear again in the future.
+
+The goal of this test
+---------------------
+This test detects problem with reading `kstkesp` during coredump by doing the following:
+
+#. Tell the kernel to execute the "stackdump" script when a coredump happens. This script
+ reads the stack pointers of all threads of crashed processes.
+
+#. Spawn a child process who creates some threads and then crashes.
+
+#. Read the output from the "stackdump" script, and make sure all stack pointer values are
+ non-zero.
diff --git a/tools/testing/selftests/coredump/stackdump b/tools/testing/selftests/coredump/stackdump
new file mode 100755
index 000000000000..96714ce42d12
--- /dev/null
+++ b/tools/testing/selftests/coredump/stackdump
@@ -0,0 +1,14 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+CRASH_PROGRAM_ID=$1
+STACKDUMP_FILE=$2
+
+TMP=$(mktemp)
+
+for t in /proc/$CRASH_PROGRAM_ID/task/*; do
+ tid=$(basename $t)
+ cat /proc/$tid/stat | awk '{print $29}' >> $TMP
+done
+
+mv $TMP $STACKDUMP_FILE
diff --git a/tools/testing/selftests/coredump/stackdump_test.c b/tools/testing/selftests/coredump/stackdump_test.c
new file mode 100644
index 000000000000..137b2364a082
--- /dev/null
+++ b/tools/testing/selftests/coredump/stackdump_test.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <fcntl.h>
+#include <libgen.h>
+#include <linux/limits.h>
+#include <pthread.h>
+#include <string.h>
+#include <sys/resource.h>
+#include <unistd.h>
+
+#include "../kselftest_harness.h"
+
+#define STACKDUMP_FILE "stack_values"
+#define STACKDUMP_SCRIPT "stackdump"
+#define NUM_THREAD_SPAWN 128
+
+static void *do_nothing(void *)
+{
+ while (1)
+ pause();
+}
+
+static void crashing_child(void)
+{
+ pthread_t thread;
+ int i;
+
+ for (i = 0; i < NUM_THREAD_SPAWN; ++i)
+ pthread_create(&thread, NULL, do_nothing, NULL);
+
+ /* crash on purpose */
+ i = *(int *)NULL;
+}
+
+FIXTURE(coredump)
+{
+ char original_core_pattern[256];
+};
+
+FIXTURE_SETUP(coredump)
+{
+ char buf[PATH_MAX];
+ FILE *file;
+ char *dir;
+ int ret;
+
+ file = fopen("/proc/sys/kernel/core_pattern", "r");
+ ASSERT_NE(NULL, file);
+
+ ret = fread(self->original_core_pattern, 1, sizeof(self->original_core_pattern), file);
+ ASSERT_TRUE(ret || feof(file));
+ ASSERT_LT(ret, sizeof(self->original_core_pattern));
+
+ self->original_core_pattern[ret] = '\0';
+
+ ret = fclose(file);
+ ASSERT_EQ(0, ret);
+}
+
+FIXTURE_TEARDOWN(coredump)
+{
+ const char *reason;
+ FILE *file;
+ int ret;
+
+ unlink(STACKDUMP_FILE);
+
+ file = fopen("/proc/sys/kernel/core_pattern", "w");
+ if (!file) {
+ reason = "Unable to open core_pattern";
+ goto fail;
+ }
+
+ ret = fprintf(file, "%s", self->original_core_pattern);
+ if (ret < 0) {
+ reason = "Unable to write to core_pattern";
+ goto fail;
+ }
+
+ ret = fclose(file);
+ if (ret) {
+ reason = "Unable to close core_pattern";
+ goto fail;
+ }
+
+ return;
+fail:
+ /* This should never happen */
+ fprintf(stderr, "Failed to cleanup stackdump test: %s\n", reason);
+}
+
+TEST_F(coredump, stackdump)
+{
+ struct sigaction action = {};
+ unsigned long long stack;
+ char *test_dir, *line;
+ size_t line_length;
+ char buf[PATH_MAX];
+ int ret, i;
+ FILE *file;
+ pid_t pid;
+
+ /*
+ * Step 1: Setup core_pattern so that the stackdump script is executed when the child
+ * process crashes
+ */
+ ret = readlink("/proc/self/exe", buf, sizeof(buf));
+ ASSERT_NE(-1, ret);
+ ASSERT_LT(ret, sizeof(buf));
+ buf[ret] = '\0';
+
+ test_dir = dirname(buf);
+
+ file = fopen("/proc/sys/kernel/core_pattern", "w");
+ ASSERT_NE(NULL, file);
+
+ ret = fprintf(file, "|%1$s/%2$s %%P %1$s/%3$s", test_dir, STACKDUMP_SCRIPT, STACKDUMP_FILE);
+ ASSERT_LT(0, ret);
+
+ ret = fclose(file);
+ ASSERT_EQ(0, ret);
+
+ /* Step 2: Create a process who spawns some threads then crashes */
+ pid = fork();
+ ASSERT_TRUE(pid >= 0);
+ if (pid == 0)
+ crashing_child();
+
+ /*
+ * Step 3: Wait for the stackdump script to write the stack pointers to the stackdump file
+ */
+ for (i = 0; i < 10; ++i) {
+ file = fopen(STACKDUMP_FILE, "r");
+ if (file)
+ break;
+ sleep(1);
+ }
+ ASSERT_NE(file, NULL);
+
+ /* Step 4: Make sure all stack pointer values are non-zero */
+ for (i = 0; -1 != getline(&line, &line_length, file); ++i) {
+ stack = strtoull(line, NULL, 10);
+ ASSERT_NE(stack, 0);
+ }
+
+ ASSERT_EQ(i, 1 + NUM_THREAD_SPAWN);
+
+ fclose(file);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh b/tools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh
index fd13c8cfb7a8..b411fe66510f 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh
@@ -58,9 +58,12 @@ for root in mq mqprio; do
ethtool -L $NDEV combined 4
n_child_assert 4 "One real queue, rest default"
- # Graft some
- tcq replace parent 100:1 handle 204:
- n_child_assert 3 "Grafted"
+ # Remove real one
+ tcq del parent 100:4 handle 204:
+
+ # Replace default with pfifo
+ tcq replace parent 100:1 handle 205: pfifo limit 1000
+ n_child_assert 3 "Deleting real one, replacing default one with pfifo"
ethtool -L $NDEV combined 1
n_child_assert 1 "Grafted, one"
diff --git a/tools/testing/selftests/nsfs/.gitignore b/tools/testing/selftests/filesystems/nsfs/.gitignore
index ed79ebdf286e..92a8249006d1 100644
--- a/tools/testing/selftests/nsfs/.gitignore
+++ b/tools/testing/selftests/filesystems/nsfs/.gitignore
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
owner
pidns
+iterate_mntns
diff --git a/tools/testing/selftests/nsfs/Makefile b/tools/testing/selftests/filesystems/nsfs/Makefile
index dd9bd50b7b93..231aaa7dfd95 100644
--- a/tools/testing/selftests/nsfs/Makefile
+++ b/tools/testing/selftests/filesystems/nsfs/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-TEST_GEN_PROGS := owner pidns
+TEST_GEN_PROGS := owner pidns iterate_mntns
CFLAGS := -Wall -Werror
-include ../lib.mk
+include ../../lib.mk
diff --git a/tools/testing/selftests/nsfs/config b/tools/testing/selftests/filesystems/nsfs/config
index 598d0a225fc9..598d0a225fc9 100644
--- a/tools/testing/selftests/nsfs/config
+++ b/tools/testing/selftests/filesystems/nsfs/config
diff --git a/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c b/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c
new file mode 100644
index 000000000000..457cf76f3c5f
--- /dev/null
+++ b/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Christian Brauner <brauner@kernel.org>
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <sched.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+#include <unistd.h>
+
+#include "../../kselftest_harness.h"
+
+#define MNT_NS_COUNT 11
+#define MNT_NS_LAST_INDEX 10
+
+struct mnt_ns_info {
+ __u32 size;
+ __u32 nr_mounts;
+ __u64 mnt_ns_id;
+};
+
+#define MNT_NS_INFO_SIZE_VER0 16 /* size of first published struct */
+
+/* Get information about namespace. */
+#define NS_MNT_GET_INFO _IOR(0xb7, 10, struct mnt_ns_info)
+/* Get next namespace. */
+#define NS_MNT_GET_NEXT _IOR(0xb7, 11, struct mnt_ns_info)
+/* Get previous namespace. */
+#define NS_MNT_GET_PREV _IOR(0xb7, 12, struct mnt_ns_info)
+
+FIXTURE(iterate_mount_namespaces) {
+ int fd_mnt_ns[MNT_NS_COUNT];
+ __u64 mnt_ns_id[MNT_NS_COUNT];
+};
+
+FIXTURE_SETUP(iterate_mount_namespaces)
+{
+ for (int i = 0; i < MNT_NS_COUNT; i++)
+ self->fd_mnt_ns[i] = -EBADF;
+
+ /*
+ * Creating a new user namespace let's us guarantee that we only see
+ * mount namespaces that we did actually create.
+ */
+ ASSERT_EQ(unshare(CLONE_NEWUSER), 0);
+
+ for (int i = 0; i < MNT_NS_COUNT; i++) {
+ struct mnt_ns_info info = {};
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+ self->fd_mnt_ns[i] = open("/proc/self/ns/mnt", O_RDONLY | O_CLOEXEC);
+ ASSERT_GE(self->fd_mnt_ns[i], 0);
+ ASSERT_EQ(ioctl(self->fd_mnt_ns[i], NS_MNT_GET_INFO, &info), 0);
+ self->mnt_ns_id[i] = info.mnt_ns_id;
+ }
+}
+
+FIXTURE_TEARDOWN(iterate_mount_namespaces)
+{
+ for (int i = 0; i < MNT_NS_COUNT; i++) {
+ if (self->fd_mnt_ns[i] < 0)
+ continue;
+ ASSERT_EQ(close(self->fd_mnt_ns[i]), 0);
+ }
+}
+
+TEST_F(iterate_mount_namespaces, iterate_all_forward)
+{
+ int fd_mnt_ns_cur, count = 0;
+
+ fd_mnt_ns_cur = fcntl(self->fd_mnt_ns[0], F_DUPFD_CLOEXEC);
+ ASSERT_GE(fd_mnt_ns_cur, 0);
+
+ for (;; count++) {
+ struct mnt_ns_info info = {};
+ int fd_mnt_ns_next;
+
+ fd_mnt_ns_next = ioctl(fd_mnt_ns_cur, NS_MNT_GET_NEXT, &info);
+ if (fd_mnt_ns_next < 0 && errno == ENOENT)
+ break;
+ ASSERT_GE(fd_mnt_ns_next, 0);
+ ASSERT_EQ(close(fd_mnt_ns_cur), 0);
+ fd_mnt_ns_cur = fd_mnt_ns_next;
+ }
+ ASSERT_EQ(count, MNT_NS_LAST_INDEX);
+}
+
+TEST_F(iterate_mount_namespaces, iterate_all_backwards)
+{
+ int fd_mnt_ns_cur, count = 0;
+
+ fd_mnt_ns_cur = fcntl(self->fd_mnt_ns[MNT_NS_LAST_INDEX], F_DUPFD_CLOEXEC);
+ ASSERT_GE(fd_mnt_ns_cur, 0);
+
+ for (;; count++) {
+ struct mnt_ns_info info = {};
+ int fd_mnt_ns_prev;
+
+ fd_mnt_ns_prev = ioctl(fd_mnt_ns_cur, NS_MNT_GET_PREV, &info);
+ if (fd_mnt_ns_prev < 0 && errno == ENOENT)
+ break;
+ ASSERT_GE(fd_mnt_ns_prev, 0);
+ ASSERT_EQ(close(fd_mnt_ns_cur), 0);
+ fd_mnt_ns_cur = fd_mnt_ns_prev;
+ }
+ ASSERT_EQ(count, MNT_NS_LAST_INDEX);
+}
+
+TEST_F(iterate_mount_namespaces, iterate_forward)
+{
+ int fd_mnt_ns_cur;
+
+ ASSERT_EQ(setns(self->fd_mnt_ns[0], CLONE_NEWNS), 0);
+
+ fd_mnt_ns_cur = self->fd_mnt_ns[0];
+ for (int i = 1; i < MNT_NS_COUNT; i++) {
+ struct mnt_ns_info info = {};
+ int fd_mnt_ns_next;
+
+ fd_mnt_ns_next = ioctl(fd_mnt_ns_cur, NS_MNT_GET_NEXT, &info);
+ ASSERT_GE(fd_mnt_ns_next, 0);
+ ASSERT_EQ(close(fd_mnt_ns_cur), 0);
+ fd_mnt_ns_cur = fd_mnt_ns_next;
+ ASSERT_EQ(info.mnt_ns_id, self->mnt_ns_id[i]);
+ }
+}
+
+TEST_F(iterate_mount_namespaces, iterate_backward)
+{
+ int fd_mnt_ns_cur;
+
+ ASSERT_EQ(setns(self->fd_mnt_ns[MNT_NS_LAST_INDEX], CLONE_NEWNS), 0);
+
+ fd_mnt_ns_cur = self->fd_mnt_ns[MNT_NS_LAST_INDEX];
+ for (int i = MNT_NS_LAST_INDEX - 1; i >= 0; i--) {
+ struct mnt_ns_info info = {};
+ int fd_mnt_ns_prev;
+
+ fd_mnt_ns_prev = ioctl(fd_mnt_ns_cur, NS_MNT_GET_PREV, &info);
+ ASSERT_GE(fd_mnt_ns_prev, 0);
+ ASSERT_EQ(close(fd_mnt_ns_cur), 0);
+ fd_mnt_ns_cur = fd_mnt_ns_prev;
+ ASSERT_EQ(info.mnt_ns_id, self->mnt_ns_id[i]);
+ }
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/nsfs/owner.c b/tools/testing/selftests/filesystems/nsfs/owner.c
index 96a976c74550..96a976c74550 100644
--- a/tools/testing/selftests/nsfs/owner.c
+++ b/tools/testing/selftests/filesystems/nsfs/owner.c
diff --git a/tools/testing/selftests/nsfs/pidns.c b/tools/testing/selftests/filesystems/nsfs/pidns.c
index e3c772c6a7c7..e3c772c6a7c7 100644
--- a/tools/testing/selftests/nsfs/pidns.c
+++ b/tools/testing/selftests/filesystems/nsfs/pidns.c
diff --git a/tools/testing/selftests/filesystems/statmount/Makefile b/tools/testing/selftests/filesystems/statmount/Makefile
index 3af3136e35a4..14ee91a41650 100644
--- a/tools/testing/selftests/filesystems/statmount/Makefile
+++ b/tools/testing/selftests/filesystems/statmount/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
CFLAGS += -Wall -O2 -g $(KHDR_INCLUDES)
-TEST_GEN_PROGS := statmount_test statmount_test_ns
+TEST_GEN_PROGS := statmount_test statmount_test_ns listmount_test
include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/statmount/listmount_test.c b/tools/testing/selftests/filesystems/statmount/listmount_test.c
new file mode 100644
index 000000000000..15f0834f7557
--- /dev/null
+++ b/tools/testing/selftests/filesystems/statmount/listmount_test.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Christian Brauner <brauner@kernel.org>
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <sched.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+#include <unistd.h>
+
+#include "statmount.h"
+#include "../../kselftest_harness.h"
+
+#ifndef LISTMOUNT_REVERSE
+#define LISTMOUNT_REVERSE (1 << 0) /* List later mounts first */
+#endif
+
+#define LISTMNT_BUFFER 10
+
+/* Check that all mount ids are in increasing order. */
+TEST(listmount_forward)
+{
+ uint64_t list[LISTMNT_BUFFER], last_mnt_id = 0;
+
+ for (;;) {
+ ssize_t nr_mounts;
+
+ nr_mounts = listmount(LSMT_ROOT, 0, last_mnt_id,
+ list, LISTMNT_BUFFER, 0);
+ ASSERT_GE(nr_mounts, 0);
+ if (nr_mounts == 0)
+ break;
+
+ for (size_t cur = 0; cur < nr_mounts; cur++) {
+ if (cur < nr_mounts - 1)
+ ASSERT_LT(list[cur], list[cur + 1]);
+ last_mnt_id = list[cur];
+ }
+ }
+}
+
+/* Check that all mount ids are in decreasing order. */
+TEST(listmount_backward)
+{
+ uint64_t list[LISTMNT_BUFFER], last_mnt_id = 0;
+
+ for (;;) {
+ ssize_t nr_mounts;
+
+ nr_mounts = listmount(LSMT_ROOT, 0, last_mnt_id,
+ list, LISTMNT_BUFFER, LISTMOUNT_REVERSE);
+ ASSERT_GE(nr_mounts, 0);
+ if (nr_mounts == 0)
+ break;
+
+ for (size_t cur = 0; cur < nr_mounts; cur++) {
+ if (cur < nr_mounts - 1)
+ ASSERT_GT(list[cur], list[cur + 1]);
+ last_mnt_id = list[cur];
+ }
+ }
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c
index 32c6ccc2a6be..1238e1c5aae1 100644
--- a/tools/testing/selftests/mm/cow.c
+++ b/tools/testing/selftests/mm/cow.c
@@ -758,7 +758,7 @@ static void do_run_with_base_page(test_fn fn, bool swapout)
}
/* Populate a base page. */
- memset(mem, 0, pagesize);
+ memset(mem, 1, pagesize);
if (swapout) {
madvise(mem, pagesize, MADV_PAGEOUT);
@@ -824,12 +824,12 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run, size_t thpsize)
* Try to populate a THP. Touch the first sub-page and test if
* we get the last sub-page populated automatically.
*/
- mem[0] = 0;
+ mem[0] = 1;
if (!pagemap_is_populated(pagemap_fd, mem + thpsize - pagesize)) {
ksft_test_result_skip("Did not get a THP populated\n");
goto munmap;
}
- memset(mem, 0, thpsize);
+ memset(mem, 1, thpsize);
size = thpsize;
switch (thp_run) {
@@ -1012,7 +1012,7 @@ static void run_with_hugetlb(test_fn fn, const char *desc, size_t hugetlbsize)
}
/* Populate an huge page. */
- memset(mem, 0, hugetlbsize);
+ memset(mem, 1, hugetlbsize);
/*
* We need a total of two hugetlb pages to handle COW/unsharing
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index 4209b9569039..414addef9a45 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -25,6 +25,8 @@
#include <sys/types.h>
#include <sys/mman.h>
+#include <arpa/inet.h>
+
#include <netdb.h>
#include <netinet/in.h>
@@ -1211,23 +1213,42 @@ static void parse_setsock_options(const char *name)
exit(1);
}
-void xdisconnect(int fd, int addrlen)
+void xdisconnect(int fd)
{
- struct sockaddr_storage empty;
+ socklen_t addrlen = sizeof(struct sockaddr_storage);
+ struct sockaddr_storage addr, empty;
int msec_sleep = 10;
- int queued = 1;
- int i;
+ void *raw_addr;
+ int i, cmdlen;
+ char cmd[128];
+
+ /* get the local address and convert it to string */
+ if (getsockname(fd, (struct sockaddr *)&addr, &addrlen) < 0)
+ xerror("getsockname");
+
+ if (addr.ss_family == AF_INET)
+ raw_addr = &(((struct sockaddr_in *)&addr)->sin_addr);
+ else if (addr.ss_family == AF_INET6)
+ raw_addr = &(((struct sockaddr_in6 *)&addr)->sin6_addr);
+ else
+ xerror("bad family");
+
+ strcpy(cmd, "ss -M | grep -q ");
+ cmdlen = strlen(cmd);
+ if (!inet_ntop(addr.ss_family, raw_addr, &cmd[cmdlen],
+ sizeof(cmd) - cmdlen))
+ xerror("inet_ntop");
shutdown(fd, SHUT_WR);
- /* while until the pending data is completely flushed, the later
+ /*
+ * wait until the pending data is completely flushed and all
+ * the MPTCP sockets reached the closed status.
* disconnect will bypass/ignore/drop any pending data.
*/
for (i = 0; ; i += msec_sleep) {
- if (ioctl(fd, SIOCOUTQ, &queued) < 0)
- xerror("can't query out socket queue: %d", errno);
-
- if (!queued)
+ /* closed socket are not listed by 'ss' */
+ if (system(cmd) != 0)
break;
if (i > poll_timeout)
@@ -1281,9 +1302,9 @@ again:
return ret;
if (cfg_truncate > 0) {
- xdisconnect(fd, peer->ai_addrlen);
+ xdisconnect(fd);
} else if (--cfg_repeat > 0) {
- xdisconnect(fd, peer->ai_addrlen);
+ xdisconnect(fd);
/* the socket could be unblocking at this point, we need the
* connect to be blocking
diff --git a/tools/testing/selftests/pid_namespace/.gitignore b/tools/testing/selftests/pid_namespace/.gitignore
index 93ab9d7e5b7e..5118f0f3edf4 100644
--- a/tools/testing/selftests/pid_namespace/.gitignore
+++ b/tools/testing/selftests/pid_namespace/.gitignore
@@ -1 +1,2 @@
+pid_max
regression_enomem
diff --git a/tools/testing/selftests/pid_namespace/Makefile b/tools/testing/selftests/pid_namespace/Makefile
index 9286a1d22cd3..b972f55d07ae 100644
--- a/tools/testing/selftests/pid_namespace/Makefile
+++ b/tools/testing/selftests/pid_namespace/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS += -g $(KHDR_INCLUDES)
-TEST_GEN_PROGS = regression_enomem
+TEST_GEN_PROGS = regression_enomem pid_max
LOCAL_HDRS += $(selfdir)/pidfd/pidfd.h
diff --git a/tools/testing/selftests/pid_namespace/pid_max.c b/tools/testing/selftests/pid_namespace/pid_max.c
new file mode 100644
index 000000000000..51c414faabb0
--- /dev/null
+++ b/tools/testing/selftests/pid_namespace/pid_max.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#define _GNU_SOURCE
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/types.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/wait.h>
+
+#include "../kselftest_harness.h"
+#include "../pidfd/pidfd.h"
+
+#define __STACK_SIZE (8 * 1024 * 1024)
+static pid_t do_clone(int (*fn)(void *), void *arg, int flags)
+{
+ char *stack;
+ pid_t ret;
+
+ stack = malloc(__STACK_SIZE);
+ if (!stack)
+ return -ENOMEM;
+
+#ifdef __ia64__
+ ret = __clone2(fn, stack, __STACK_SIZE, flags | SIGCHLD, arg);
+#else
+ ret = clone(fn, stack + __STACK_SIZE, flags | SIGCHLD, arg);
+#endif
+ free(stack);
+ return ret;
+}
+
+static int pid_max_cb(void *data)
+{
+ int fd, ret;
+ pid_t pid;
+
+ ret = mount("", "/", NULL, MS_PRIVATE | MS_REC, 0);
+ if (ret) {
+ fprintf(stderr, "%m - Failed to make rootfs private mount\n");
+ return -1;
+ }
+
+ umount2("/proc", MNT_DETACH);
+
+ ret = mount("proc", "/proc", "proc", 0, NULL);
+ if (ret) {
+ fprintf(stderr, "%m - Failed to mount proc\n");
+ return -1;
+ }
+
+ fd = open("/proc/sys/kernel/pid_max", O_RDWR | O_CLOEXEC | O_NOCTTY);
+ if (fd < 0) {
+ fprintf(stderr, "%m - Failed to open pid_max\n");
+ return -1;
+ }
+
+ ret = write(fd, "500", sizeof("500") - 1);
+ if (ret < 0) {
+ fprintf(stderr, "%m - Failed to write pid_max\n");
+ return -1;
+ }
+
+ for (int i = 0; i < 501; i++) {
+ pid = fork();
+ if (pid == 0)
+ exit(EXIT_SUCCESS);
+ wait_for_pid(pid);
+ if (pid > 500) {
+ fprintf(stderr, "Managed to create pid number beyond limit\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int pid_max_nested_inner(void *data)
+{
+ int fret = -1;
+ pid_t pids[2];
+ int fd, i, ret;
+
+ ret = mount("", "/", NULL, MS_PRIVATE | MS_REC, 0);
+ if (ret) {
+ fprintf(stderr, "%m - Failed to make rootfs private mount\n");
+ return fret;
+ }
+
+ umount2("/proc", MNT_DETACH);
+
+ ret = mount("proc", "/proc", "proc", 0, NULL);
+ if (ret) {
+ fprintf(stderr, "%m - Failed to mount proc\n");
+ return fret;
+ }
+
+ fd = open("/proc/sys/kernel/pid_max", O_RDWR | O_CLOEXEC | O_NOCTTY);
+ if (fd < 0) {
+ fprintf(stderr, "%m - Failed to open pid_max\n");
+ return fret;
+ }
+
+ ret = write(fd, "500", sizeof("500") - 1);
+ close(fd);
+ if (ret < 0) {
+ fprintf(stderr, "%m - Failed to write pid_max\n");
+ return fret;
+ }
+
+ pids[0] = fork();
+ if (pids[0] < 0) {
+ fprintf(stderr, "Failed to create first new process\n");
+ return fret;
+ }
+
+ if (pids[0] == 0)
+ exit(EXIT_SUCCESS);
+
+ pids[1] = fork();
+ wait_for_pid(pids[0]);
+ if (pids[1] >= 0) {
+ if (pids[1] == 0)
+ exit(EXIT_SUCCESS);
+ wait_for_pid(pids[1]);
+
+ fprintf(stderr, "Managed to create process even though ancestor pid namespace had a limit\n");
+ return fret;
+ }
+
+ /* Now make sure that we wrap pids at 400. */
+ for (i = 0; i < 510; i++) {
+ pid_t pid;
+
+ pid = fork();
+ if (pid < 0)
+ return fret;
+
+ if (pid == 0)
+ exit(EXIT_SUCCESS);
+
+ wait_for_pid(pid);
+ if (pid >= 500) {
+ fprintf(stderr, "Managed to create process with pid %d beyond configured limit\n", pid);
+ return fret;
+ }
+ }
+
+ return 0;
+}
+
+static int pid_max_nested_outer(void *data)
+{
+ int fret = -1, nr_procs = 400;
+ pid_t pids[1000];
+ int fd, i, ret;
+ pid_t pid;
+
+ ret = mount("", "/", NULL, MS_PRIVATE | MS_REC, 0);
+ if (ret) {
+ fprintf(stderr, "%m - Failed to make rootfs private mount\n");
+ return fret;
+ }
+
+ umount2("/proc", MNT_DETACH);
+
+ ret = mount("proc", "/proc", "proc", 0, NULL);
+ if (ret) {
+ fprintf(stderr, "%m - Failed to mount proc\n");
+ return fret;
+ }
+
+ fd = open("/proc/sys/kernel/pid_max", O_RDWR | O_CLOEXEC | O_NOCTTY);
+ if (fd < 0) {
+ fprintf(stderr, "%m - Failed to open pid_max\n");
+ return fret;
+ }
+
+ ret = write(fd, "400", sizeof("400") - 1);
+ close(fd);
+ if (ret < 0) {
+ fprintf(stderr, "%m - Failed to write pid_max\n");
+ return fret;
+ }
+
+ /*
+ * Create 397 processes. This leaves room for do_clone() (398) and
+ * one more 399. So creating another process needs to fail.
+ */
+ for (nr_procs = 0; nr_procs < 396; nr_procs++) {
+ pid = fork();
+ if (pid < 0)
+ goto reap;
+
+ if (pid == 0)
+ exit(EXIT_SUCCESS);
+
+ pids[nr_procs] = pid;
+ }
+
+ pid = do_clone(pid_max_nested_inner, NULL, CLONE_NEWPID | CLONE_NEWNS);
+ if (pid < 0) {
+ fprintf(stderr, "%m - Failed to clone nested pidns\n");
+ goto reap;
+ }
+
+ if (wait_for_pid(pid)) {
+ fprintf(stderr, "%m - Nested pid_max failed\n");
+ goto reap;
+ }
+
+ fret = 0;
+
+reap:
+ for (int i = 0; i < nr_procs; i++)
+ wait_for_pid(pids[i]);
+
+ return fret;
+}
+
+static int pid_max_nested_limit_inner(void *data)
+{
+ int fret = -1, nr_procs = 400;
+ int fd, ret;
+ pid_t pid;
+ pid_t pids[1000];
+
+ ret = mount("", "/", NULL, MS_PRIVATE | MS_REC, 0);
+ if (ret) {
+ fprintf(stderr, "%m - Failed to make rootfs private mount\n");
+ return fret;
+ }
+
+ umount2("/proc", MNT_DETACH);
+
+ ret = mount("proc", "/proc", "proc", 0, NULL);
+ if (ret) {
+ fprintf(stderr, "%m - Failed to mount proc\n");
+ return fret;
+ }
+
+ fd = open("/proc/sys/kernel/pid_max", O_RDWR | O_CLOEXEC | O_NOCTTY);
+ if (fd < 0) {
+ fprintf(stderr, "%m - Failed to open pid_max\n");
+ return fret;
+ }
+
+ ret = write(fd, "500", sizeof("500") - 1);
+ close(fd);
+ if (ret < 0) {
+ fprintf(stderr, "%m - Failed to write pid_max\n");
+ return fret;
+ }
+
+ for (nr_procs = 0; nr_procs < 500; nr_procs++) {
+ pid = fork();
+ if (pid < 0)
+ break;
+
+ if (pid == 0)
+ exit(EXIT_SUCCESS);
+
+ pids[nr_procs] = pid;
+ }
+
+ if (nr_procs >= 400) {
+ fprintf(stderr, "Managed to create processes beyond the configured outer limit\n");
+ goto reap;
+ }
+
+ fret = 0;
+
+reap:
+ for (int i = 0; i < nr_procs; i++)
+ wait_for_pid(pids[i]);
+
+ return fret;
+}
+
+static int pid_max_nested_limit_outer(void *data)
+{
+ int fd, ret;
+ pid_t pid;
+
+ ret = mount("", "/", NULL, MS_PRIVATE | MS_REC, 0);
+ if (ret) {
+ fprintf(stderr, "%m - Failed to make rootfs private mount\n");
+ return -1;
+ }
+
+ umount2("/proc", MNT_DETACH);
+
+ ret = mount("proc", "/proc", "proc", 0, NULL);
+ if (ret) {
+ fprintf(stderr, "%m - Failed to mount proc\n");
+ return -1;
+ }
+
+ fd = open("/proc/sys/kernel/pid_max", O_RDWR | O_CLOEXEC | O_NOCTTY);
+ if (fd < 0) {
+ fprintf(stderr, "%m - Failed to open pid_max\n");
+ return -1;
+ }
+
+ ret = write(fd, "400", sizeof("400") - 1);
+ close(fd);
+ if (ret < 0) {
+ fprintf(stderr, "%m - Failed to write pid_max\n");
+ return -1;
+ }
+
+ pid = do_clone(pid_max_nested_limit_inner, NULL, CLONE_NEWPID | CLONE_NEWNS);
+ if (pid < 0) {
+ fprintf(stderr, "%m - Failed to clone nested pidns\n");
+ return -1;
+ }
+
+ if (wait_for_pid(pid)) {
+ fprintf(stderr, "%m - Nested pid_max failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+TEST(pid_max_simple)
+{
+ pid_t pid;
+
+
+ pid = do_clone(pid_max_cb, NULL, CLONE_NEWPID | CLONE_NEWNS);
+ ASSERT_GT(pid, 0);
+ ASSERT_EQ(0, wait_for_pid(pid));
+}
+
+TEST(pid_max_nested_limit)
+{
+ pid_t pid;
+
+ pid = do_clone(pid_max_nested_limit_outer, NULL, CLONE_NEWPID | CLONE_NEWNS);
+ ASSERT_GT(pid, 0);
+ ASSERT_EQ(0, wait_for_pid(pid));
+}
+
+TEST(pid_max_nested)
+{
+ pid_t pid;
+
+ pid = do_clone(pid_max_nested_outer, NULL, CLONE_NEWPID | CLONE_NEWNS);
+ ASSERT_GT(pid, 0);
+ ASSERT_EQ(0, wait_for_pid(pid));
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/pidfd/.gitignore b/tools/testing/selftests/pidfd/.gitignore
index 973198a3ec3d..bf92481f925c 100644
--- a/tools/testing/selftests/pidfd/.gitignore
+++ b/tools/testing/selftests/pidfd/.gitignore
@@ -6,3 +6,5 @@ pidfd_wait
pidfd_fdinfo_test
pidfd_getfd_test
pidfd_setns_test
+pidfd_file_handle_test
+pidfd_bind_mount
diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile
index d731e3e76d5b..301343a11b62 100644
--- a/tools/testing/selftests/pidfd/Makefile
+++ b/tools/testing/selftests/pidfd/Makefile
@@ -2,7 +2,8 @@
CFLAGS += -g $(KHDR_INCLUDES) -pthread -Wall
TEST_GEN_PROGS := pidfd_test pidfd_fdinfo_test pidfd_open_test \
- pidfd_poll_test pidfd_wait pidfd_getfd_test pidfd_setns_test
+ pidfd_poll_test pidfd_wait pidfd_getfd_test pidfd_setns_test \
+ pidfd_file_handle_test pidfd_bind_mount
include ../lib.mk
diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h
index 88d6830ee004..0b96ac4b8ce5 100644
--- a/tools/testing/selftests/pidfd/pidfd.h
+++ b/tools/testing/selftests/pidfd/pidfd.h
@@ -12,11 +12,11 @@
#include <stdlib.h>
#include <string.h>
#include <syscall.h>
-#include <sys/mount.h>
#include <sys/types.h>
#include <sys/wait.h>
#include "../kselftest.h"
+#include "../clone3/clone3_selftests.h"
#ifndef P_PIDFD
#define P_PIDFD 3
@@ -68,6 +68,11 @@
#define PIDFD_SKIP 3
#define PIDFD_XFAIL 4
+static inline int sys_waitid(int which, pid_t pid, siginfo_t *info, int options)
+{
+ return syscall(__NR_waitid, which, pid, info, options, NULL);
+}
+
static inline int wait_for_pid(pid_t pid)
{
int status, ret;
@@ -114,4 +119,37 @@ static inline int sys_memfd_create(const char *name, unsigned int flags)
return syscall(__NR_memfd_create, name, flags);
}
+static inline pid_t create_child(int *pidfd, unsigned flags)
+{
+ struct __clone_args args = {
+ .flags = CLONE_PIDFD | flags,
+ .exit_signal = SIGCHLD,
+ .pidfd = ptr_to_u64(pidfd),
+ };
+
+ return sys_clone3(&args, sizeof(struct __clone_args));
+}
+
+static inline ssize_t read_nointr(int fd, void *buf, size_t count)
+{
+ ssize_t ret;
+
+ do {
+ ret = read(fd, buf, count);
+ } while (ret < 0 && errno == EINTR);
+
+ return ret;
+}
+
+static inline ssize_t write_nointr(int fd, const void *buf, size_t count)
+{
+ ssize_t ret;
+
+ do {
+ ret = write(fd, buf, count);
+ } while (ret < 0 && errno == EINTR);
+
+ return ret;
+}
+
#endif /* __PIDFD_H */
diff --git a/tools/testing/selftests/pidfd/pidfd_bind_mount.c b/tools/testing/selftests/pidfd/pidfd_bind_mount.c
new file mode 100644
index 000000000000..7822dd080258
--- /dev/null
+++ b/tools/testing/selftests/pidfd/pidfd_bind_mount.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2024 Christian Brauner <brauner@kernel.org>
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <stdio.h>
+#include <string.h>
+#include <linux/fs.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+#include <unistd.h>
+
+#include "pidfd.h"
+#include "../kselftest_harness.h"
+
+#ifndef __NR_open_tree
+ #if defined __alpha__
+ #define __NR_open_tree 538
+ #elif defined _MIPS_SIM
+ #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */
+ #define __NR_open_tree 4428
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */
+ #define __NR_open_tree 6428
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */
+ #define __NR_open_tree 5428
+ #endif
+ #elif defined __ia64__
+ #define __NR_open_tree (428 + 1024)
+ #else
+ #define __NR_open_tree 428
+ #endif
+#endif
+
+#ifndef __NR_move_mount
+ #if defined __alpha__
+ #define __NR_move_mount 539
+ #elif defined _MIPS_SIM
+ #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */
+ #define __NR_move_mount 4429
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */
+ #define __NR_move_mount 6429
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */
+ #define __NR_move_mount 5429
+ #endif
+ #elif defined __ia64__
+ #define __NR_move_mount (428 + 1024)
+ #else
+ #define __NR_move_mount 429
+ #endif
+#endif
+
+#ifndef MOVE_MOUNT_F_EMPTY_PATH
+#define MOVE_MOUNT_F_EMPTY_PATH 0x00000004 /* Empty from path permitted */
+#endif
+
+#ifndef MOVE_MOUNT_F_EMPTY_PATH
+#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */
+#endif
+
+static inline int sys_move_mount(int from_dfd, const char *from_pathname,
+ int to_dfd, const char *to_pathname,
+ unsigned int flags)
+{
+ return syscall(__NR_move_mount, from_dfd, from_pathname, to_dfd,
+ to_pathname, flags);
+}
+
+#ifndef OPEN_TREE_CLONE
+#define OPEN_TREE_CLONE 1
+#endif
+
+#ifndef OPEN_TREE_CLOEXEC
+#define OPEN_TREE_CLOEXEC O_CLOEXEC
+#endif
+
+#ifndef AT_RECURSIVE
+#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
+#endif
+
+static inline int sys_open_tree(int dfd, const char *filename, unsigned int flags)
+{
+ return syscall(__NR_open_tree, dfd, filename, flags);
+}
+
+FIXTURE(pidfd_bind_mount) {
+ char template[PATH_MAX];
+ int fd_tmp;
+ int pidfd;
+ struct stat st1;
+ struct stat st2;
+ __u32 gen1;
+ __u32 gen2;
+ bool must_unmount;
+};
+
+FIXTURE_SETUP(pidfd_bind_mount)
+{
+ self->fd_tmp = -EBADF;
+ self->must_unmount = false;
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+ ASSERT_LE(snprintf(self->template, PATH_MAX, "%s", P_tmpdir "/pidfd_bind_mount_XXXXXX"), PATH_MAX);
+ self->fd_tmp = mkstemp(self->template);
+ ASSERT_GE(self->fd_tmp, 0);
+ self->pidfd = sys_pidfd_open(getpid(), 0);
+ ASSERT_GE(self->pidfd, 0);
+ ASSERT_GE(fstat(self->pidfd, &self->st1), 0);
+ ASSERT_EQ(ioctl(self->pidfd, FS_IOC_GETVERSION, &self->gen1), 0);
+}
+
+FIXTURE_TEARDOWN(pidfd_bind_mount)
+{
+ ASSERT_EQ(close(self->fd_tmp), 0);
+ if (self->must_unmount)
+ ASSERT_EQ(umount2(self->template, 0), 0);
+ ASSERT_EQ(unlink(self->template), 0);
+}
+
+/*
+ * Test that a detached mount can be created for a pidfd and then
+ * attached to the filesystem hierarchy.
+ */
+TEST_F(pidfd_bind_mount, bind_mount)
+{
+ int fd_tree;
+
+ fd_tree = sys_open_tree(self->pidfd, "", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC | AT_EMPTY_PATH);
+ ASSERT_GE(fd_tree, 0);
+
+ ASSERT_EQ(move_mount(fd_tree, "", self->fd_tmp, "", MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH), 0);
+ self->must_unmount = true;
+
+ ASSERT_EQ(close(fd_tree), 0);
+}
+
+/* Test that a pidfd can be reopened through procfs. */
+TEST_F(pidfd_bind_mount, reopen)
+{
+ int pidfd;
+ char proc_path[PATH_MAX];
+
+ sprintf(proc_path, "/proc/self/fd/%d", self->pidfd);
+ pidfd = open(proc_path, O_RDONLY | O_NOCTTY | O_CLOEXEC);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_GE(fstat(self->pidfd, &self->st2), 0);
+ ASSERT_EQ(ioctl(self->pidfd, FS_IOC_GETVERSION, &self->gen2), 0);
+
+ ASSERT_TRUE(self->st1.st_dev == self->st2.st_dev && self->st1.st_ino == self->st2.st_ino);
+ ASSERT_TRUE(self->gen1 == self->gen2);
+
+ ASSERT_EQ(close(pidfd), 0);
+}
+
+/*
+ * Test that a detached mount can be created for a pidfd and then
+ * attached to the filesystem hierarchy and reopened.
+ */
+TEST_F(pidfd_bind_mount, bind_mount_reopen)
+{
+ int fd_tree, fd_pidfd_mnt;
+
+ fd_tree = sys_open_tree(self->pidfd, "", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC | AT_EMPTY_PATH);
+ ASSERT_GE(fd_tree, 0);
+
+ ASSERT_EQ(move_mount(fd_tree, "", self->fd_tmp, "", MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH), 0);
+ self->must_unmount = true;
+
+ fd_pidfd_mnt = openat(-EBADF, self->template, O_RDONLY | O_NOCTTY | O_CLOEXEC);
+ ASSERT_GE(fd_pidfd_mnt, 0);
+
+ ASSERT_GE(fstat(fd_tree, &self->st2), 0);
+ ASSERT_EQ(ioctl(fd_pidfd_mnt, FS_IOC_GETVERSION, &self->gen2), 0);
+
+ ASSERT_TRUE(self->st1.st_dev == self->st2.st_dev && self->st1.st_ino == self->st2.st_ino);
+ ASSERT_TRUE(self->gen1 == self->gen2);
+
+ ASSERT_EQ(close(fd_tree), 0);
+ ASSERT_EQ(close(fd_pidfd_mnt), 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/pidfd/pidfd_file_handle_test.c b/tools/testing/selftests/pidfd/pidfd_file_handle_test.c
new file mode 100644
index 000000000000..439b9c6c0457
--- /dev/null
+++ b/tools/testing/selftests/pidfd/pidfd_file_handle_test.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <linux/types.h>
+#include <poll.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <linux/kcmp.h>
+#include <sys/stat.h>
+
+#include "pidfd.h"
+#include "../kselftest_harness.h"
+
+FIXTURE(file_handle)
+{
+ pid_t pid;
+ int pidfd;
+
+ pid_t child_pid1;
+ int child_pidfd1;
+
+ pid_t child_pid2;
+ int child_pidfd2;
+
+ pid_t child_pid3;
+ int child_pidfd3;
+};
+
+FIXTURE_SETUP(file_handle)
+{
+ int ret;
+ int ipc_sockets[2];
+ char c;
+
+ self->pid = getpid();
+ self->pidfd = sys_pidfd_open(self->pid, 0);
+ ASSERT_GE(self->pidfd, 0);
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ EXPECT_EQ(ret, 0);
+
+ self->child_pid1 = create_child(&self->child_pidfd1, CLONE_NEWUSER);
+ EXPECT_GE(self->child_pid1, 0);
+
+ if (self->child_pid1 == 0) {
+ close(ipc_sockets[0]);
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0)
+ _exit(EXIT_FAILURE);
+
+ close(ipc_sockets[1]);
+
+ pause();
+ _exit(EXIT_SUCCESS);
+ }
+
+ close(ipc_sockets[1]);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ close(ipc_sockets[0]);
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ EXPECT_EQ(ret, 0);
+
+ self->child_pid2 = create_child(&self->child_pidfd2, CLONE_NEWUSER | CLONE_NEWPID);
+ EXPECT_GE(self->child_pid2, 0);
+
+ if (self->child_pid2 == 0) {
+ close(ipc_sockets[0]);
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0)
+ _exit(EXIT_FAILURE);
+
+ close(ipc_sockets[1]);
+
+ pause();
+ _exit(EXIT_SUCCESS);
+ }
+
+ close(ipc_sockets[1]);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ close(ipc_sockets[0]);
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ EXPECT_EQ(ret, 0);
+
+ self->child_pid3 = create_child(&self->child_pidfd3, CLONE_NEWUSER | CLONE_NEWPID);
+ EXPECT_GE(self->child_pid3, 0);
+
+ if (self->child_pid3 == 0) {
+ close(ipc_sockets[0]);
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0)
+ _exit(EXIT_FAILURE);
+
+ close(ipc_sockets[1]);
+
+ pause();
+ _exit(EXIT_SUCCESS);
+ }
+
+ close(ipc_sockets[1]);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ close(ipc_sockets[0]);
+}
+
+FIXTURE_TEARDOWN(file_handle)
+{
+ EXPECT_EQ(close(self->pidfd), 0);
+
+ EXPECT_EQ(sys_pidfd_send_signal(self->child_pidfd1, SIGKILL, NULL, 0), 0);
+ if (self->child_pidfd1 >= 0)
+ EXPECT_EQ(0, close(self->child_pidfd1));
+
+ EXPECT_EQ(sys_waitid(P_PID, self->child_pid1, NULL, WEXITED), 0);
+
+ EXPECT_EQ(sys_pidfd_send_signal(self->child_pidfd2, SIGKILL, NULL, 0), 0);
+ if (self->child_pidfd2 >= 0)
+ EXPECT_EQ(0, close(self->child_pidfd2));
+
+ EXPECT_EQ(sys_waitid(P_PID, self->child_pid2, NULL, WEXITED), 0);
+
+ if (self->child_pidfd3 >= 0) {
+ EXPECT_EQ(sys_pidfd_send_signal(self->child_pidfd3, SIGKILL, NULL, 0), 0);
+ EXPECT_EQ(0, close(self->child_pidfd3));
+ EXPECT_EQ(sys_waitid(P_PID, self->child_pid3, NULL, WEXITED), 0);
+ }
+}
+
+/*
+ * Test that we can decode a pidfs file handle in the same pid
+ * namespace.
+ */
+TEST_F(file_handle, file_handle_same_pidns)
+{
+ int mnt_id;
+ struct file_handle *fh;
+ int pidfd = -EBADF;
+ struct stat st1, st2;
+
+ fh = malloc(sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(fh, NULL);
+ memset(fh, 0, sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ fh->handle_bytes = MAX_HANDLE_SZ;
+
+ ASSERT_EQ(name_to_handle_at(self->child_pidfd1, "", fh, &mnt_id, AT_EMPTY_PATH), 0);
+
+ ASSERT_EQ(fstat(self->child_pidfd1, &st1), 0);
+
+ pidfd = open_by_handle_at(self->pidfd, fh, 0);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_EQ(fstat(pidfd, &st2), 0);
+ ASSERT_TRUE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
+
+ ASSERT_EQ(close(pidfd), 0);
+
+ pidfd = open_by_handle_at(self->pidfd, fh, O_CLOEXEC);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_EQ(fstat(pidfd, &st2), 0);
+ ASSERT_TRUE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
+
+ ASSERT_EQ(close(pidfd), 0);
+
+ pidfd = open_by_handle_at(self->pidfd, fh, O_NONBLOCK);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_EQ(fstat(pidfd, &st2), 0);
+ ASSERT_TRUE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
+
+ ASSERT_EQ(close(pidfd), 0);
+
+ free(fh);
+}
+
+/*
+ * Test that we can decode a pidfs file handle from a child pid
+ * namespace.
+ */
+TEST_F(file_handle, file_handle_child_pidns)
+{
+ int mnt_id;
+ struct file_handle *fh;
+ int pidfd = -EBADF;
+ struct stat st1, st2;
+
+ fh = malloc(sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(fh, NULL);
+ memset(fh, 0, sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ fh->handle_bytes = MAX_HANDLE_SZ;
+
+ ASSERT_EQ(name_to_handle_at(self->child_pidfd2, "", fh, &mnt_id, AT_EMPTY_PATH), 0);
+
+ ASSERT_EQ(fstat(self->child_pidfd2, &st1), 0);
+
+ pidfd = open_by_handle_at(self->pidfd, fh, 0);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_EQ(fstat(pidfd, &st2), 0);
+ ASSERT_TRUE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
+
+ ASSERT_EQ(close(pidfd), 0);
+
+ pidfd = open_by_handle_at(self->pidfd, fh, O_CLOEXEC);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_EQ(fstat(pidfd, &st2), 0);
+ ASSERT_TRUE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
+
+ ASSERT_EQ(close(pidfd), 0);
+
+ pidfd = open_by_handle_at(self->pidfd, fh, O_NONBLOCK);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_EQ(fstat(pidfd, &st2), 0);
+ ASSERT_TRUE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
+
+ ASSERT_EQ(close(pidfd), 0);
+
+ free(fh);
+}
+
+/*
+ * Test that we fail to decode a pidfs file handle from an ancestor
+ * child pid namespace.
+ */
+TEST_F(file_handle, file_handle_foreign_pidns)
+{
+ int mnt_id;
+ struct file_handle *fh;
+ pid_t pid;
+
+ fh = malloc(sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(fh, NULL);
+ memset(fh, 0, sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ fh->handle_bytes = MAX_HANDLE_SZ;
+
+ ASSERT_EQ(name_to_handle_at(self->pidfd, "", fh, &mnt_id, AT_EMPTY_PATH), 0);
+
+ ASSERT_EQ(setns(self->child_pidfd2, CLONE_NEWUSER | CLONE_NEWPID), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+
+ if (pid == 0) {
+ int pidfd = open_by_handle_at(self->pidfd, fh, 0);
+ if (pidfd >= 0) {
+ TH_LOG("Managed to open pidfd outside of the caller's pid namespace hierarchy");
+ _exit(1);
+ }
+ _exit(0);
+ }
+
+ ASSERT_EQ(wait_for_pid(pid), 0);
+
+ free(fh);
+}
+
+/*
+ * Test that we can decode a pidfs file handle of a process that has
+ * exited but not been reaped.
+ */
+TEST_F(file_handle, pid_has_exited)
+{
+ int mnt_id, pidfd, child_pidfd3;
+ struct file_handle *fh;
+ struct stat st1, st2;
+
+ fh = malloc(sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(fh, NULL);
+ memset(fh, 0, sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ fh->handle_bytes = MAX_HANDLE_SZ;
+
+ ASSERT_EQ(name_to_handle_at(self->child_pidfd3, "", fh, &mnt_id, AT_EMPTY_PATH), 0);
+
+ ASSERT_EQ(fstat(self->child_pidfd3, &st1), 0);
+
+ pidfd = open_by_handle_at(self->pidfd, fh, 0);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_EQ(fstat(pidfd, &st2), 0);
+ ASSERT_TRUE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
+
+ ASSERT_EQ(close(pidfd), 0);
+
+ child_pidfd3 = self->child_pidfd3;
+ self->child_pidfd3 = -EBADF;
+ EXPECT_EQ(sys_pidfd_send_signal(child_pidfd3, SIGKILL, NULL, 0), 0);
+ EXPECT_EQ(close(child_pidfd3), 0);
+ EXPECT_EQ(sys_waitid(P_PID, self->child_pid3, NULL, WEXITED | WNOWAIT), 0);
+
+ pidfd = open_by_handle_at(self->pidfd, fh, 0);
+ ASSERT_GE(pidfd, 0);
+
+ EXPECT_EQ(sys_waitid(P_PID, self->child_pid3, NULL, WEXITED), 0);
+}
+
+/*
+ * Test that we fail to decode a pidfs file handle of a process that has
+ * already been reaped.
+ */
+TEST_F(file_handle, pid_has_been_reaped)
+{
+ int mnt_id, pidfd, child_pidfd3;
+ struct file_handle *fh;
+ struct stat st1, st2;
+
+ fh = malloc(sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(fh, NULL);
+ memset(fh, 0, sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ fh->handle_bytes = MAX_HANDLE_SZ;
+
+ ASSERT_EQ(name_to_handle_at(self->child_pidfd3, "", fh, &mnt_id, AT_EMPTY_PATH), 0);
+
+ ASSERT_EQ(fstat(self->child_pidfd3, &st1), 0);
+
+ pidfd = open_by_handle_at(self->pidfd, fh, 0);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_EQ(fstat(pidfd, &st2), 0);
+ ASSERT_TRUE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
+
+ ASSERT_EQ(close(pidfd), 0);
+
+ child_pidfd3 = self->child_pidfd3;
+ self->child_pidfd3 = -EBADF;
+ EXPECT_EQ(sys_pidfd_send_signal(child_pidfd3, SIGKILL, NULL, 0), 0);
+ EXPECT_EQ(close(child_pidfd3), 0);
+ EXPECT_EQ(sys_waitid(P_PID, self->child_pid3, NULL, WEXITED), 0);
+
+ pidfd = open_by_handle_at(self->pidfd, fh, 0);
+ ASSERT_LT(pidfd, 0);
+}
+
+/*
+ * Test valid flags to open a pidfd file handle. Note, that
+ * PIDFD_NONBLOCK is defined as O_NONBLOCK and O_NONBLOCK is an alias to
+ * O_NDELAY. Also note that PIDFD_THREAD is an alias for O_EXCL.
+ */
+TEST_F(file_handle, open_by_handle_at_valid_flags)
+{
+ int mnt_id;
+ struct file_handle *fh;
+ int pidfd = -EBADF;
+ struct stat st1, st2;
+
+ fh = malloc(sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(fh, NULL);
+ memset(fh, 0, sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ fh->handle_bytes = MAX_HANDLE_SZ;
+
+ ASSERT_EQ(name_to_handle_at(self->child_pidfd2, "", fh, &mnt_id, AT_EMPTY_PATH), 0);
+
+ ASSERT_EQ(fstat(self->child_pidfd2, &st1), 0);
+
+ pidfd = open_by_handle_at(self->pidfd, fh,
+ O_RDONLY |
+ O_WRONLY |
+ O_RDWR |
+ O_NONBLOCK |
+ O_NDELAY |
+ O_CLOEXEC |
+ O_EXCL);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_EQ(fstat(pidfd, &st2), 0);
+ ASSERT_TRUE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
+
+ ASSERT_EQ(close(pidfd), 0);
+}
+
+/*
+ * Test that invalid flags passed to open a pidfd file handle are
+ * rejected.
+ */
+TEST_F(file_handle, open_by_handle_at_invalid_flags)
+{
+ int mnt_id;
+ struct file_handle *fh;
+ int pidfd = -EBADF;
+ static const struct invalid_pidfs_file_handle_flags {
+ int oflag;
+ const char *oflag_name;
+ } invalid_pidfs_file_handle_flags[] = {
+ { FASYNC, "FASYNC" },
+ { O_CREAT, "O_CREAT" },
+ { O_NOCTTY, "O_NOCTTY" },
+ { O_CREAT, "O_CREAT" },
+ { O_TRUNC, "O_TRUNC" },
+ { O_APPEND, "O_APPEND" },
+ { O_SYNC, "O_SYNC" },
+ { O_DSYNC, "O_DSYNC" },
+ { O_DIRECT, "O_DIRECT" },
+ { O_DIRECTORY, "O_DIRECTORY" },
+ { O_NOFOLLOW, "O_NOFOLLOW" },
+ { O_NOATIME, "O_NOATIME" },
+ { O_PATH, "O_PATH" },
+ { O_TMPFILE, "O_TMPFILE" },
+ /*
+ * O_LARGEFILE is added implicitly by
+ * open_by_handle_at() so pidfs simply masks it off.
+ */
+ };
+
+ fh = malloc(sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(fh, NULL);
+ memset(fh, 0, sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ fh->handle_bytes = MAX_HANDLE_SZ;
+
+ ASSERT_EQ(name_to_handle_at(self->child_pidfd2, "", fh, &mnt_id, AT_EMPTY_PATH), 0);
+
+ for (int i = 0; i < ARRAY_SIZE(invalid_pidfs_file_handle_flags); i++) {
+ pidfd = open_by_handle_at(self->pidfd, fh, invalid_pidfs_file_handle_flags[i].oflag);
+ ASSERT_LT(pidfd, 0) {
+ TH_LOG("open_by_handle_at() succeeded with invalid flags: %s", invalid_pidfs_file_handle_flags[i].oflag_name);
+ }
+ }
+}
+
+/* Test that lookup fails. */
+TEST_F(file_handle, lookup_must_fail)
+{
+ int mnt_id;
+ struct file_handle *fh;
+
+ fh = malloc(sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(fh, NULL);
+ memset(fh, 0, sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ fh->handle_bytes = MAX_HANDLE_SZ;
+
+ ASSERT_NE(name_to_handle_at(self->child_pidfd2, "lookup-is-not-possible-with-pidfs", fh, &mnt_id, AT_EMPTY_PATH), 0);
+ ASSERT_EQ(errno, ENOTDIR);
+ ASSERT_NE(name_to_handle_at(self->child_pidfd2, "lookup-is-not-possible-with-pidfs", fh, &mnt_id, 0), 0);
+ ASSERT_EQ(errno, ENOTDIR);
+}
+
+#ifndef AT_HANDLE_CONNECTABLE
+#define AT_HANDLE_CONNECTABLE 0x002
+#endif
+
+/*
+ * Test that AT_HANDLE_CONNECTABLE is rejected. Connectable file handles
+ * don't make sense for pidfs. Note that currently AT_HANDLE_CONNECTABLE
+ * is rejected because it is incompatible with AT_EMPTY_PATH which is
+ * required with pidfds as we don't support lookup.
+ */
+TEST_F(file_handle, invalid_name_to_handle_at_flags)
+{
+ int mnt_id;
+ struct file_handle *fh;
+
+ fh = malloc(sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(fh, NULL);
+ memset(fh, 0, sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ fh->handle_bytes = MAX_HANDLE_SZ;
+
+ ASSERT_NE(name_to_handle_at(self->child_pidfd2, "", fh, &mnt_id, AT_EMPTY_PATH | AT_HANDLE_CONNECTABLE), 0);
+}
+
+#ifndef AT_HANDLE_FID
+#define AT_HANDLE_FID 0x200
+#endif
+
+/*
+ * Test that a request with AT_HANDLE_FID always leads to decodable file
+ * handle as pidfs always provides export operations.
+ */
+TEST_F(file_handle, valid_name_to_handle_at_flags)
+{
+ int mnt_id, pidfd;
+ struct file_handle *fh;
+ struct stat st1, st2;
+
+ fh = malloc(sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ ASSERT_NE(fh, NULL);
+ memset(fh, 0, sizeof(struct file_handle) + MAX_HANDLE_SZ);
+ fh->handle_bytes = MAX_HANDLE_SZ;
+
+ ASSERT_EQ(name_to_handle_at(self->child_pidfd2, "", fh, &mnt_id, AT_EMPTY_PATH | AT_HANDLE_FID), 0);
+
+ ASSERT_EQ(fstat(self->child_pidfd2, &st1), 0);
+
+ pidfd = open_by_handle_at(self->pidfd, fh, 0);
+ ASSERT_GE(pidfd, 0);
+
+ ASSERT_EQ(fstat(pidfd, &st2), 0);
+ ASSERT_TRUE(st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
+
+ ASSERT_EQ(close(pidfd), 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/pidfd/pidfd_setns_test.c b/tools/testing/selftests/pidfd/pidfd_setns_test.c
index 7c2a4349170a..222f8131283b 100644
--- a/tools/testing/selftests/pidfd/pidfd_setns_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_setns_test.c
@@ -19,7 +19,6 @@
#include <linux/ioctl.h>
#include "pidfd.h"
-#include "../clone3/clone3_selftests.h"
#include "../kselftest_harness.h"
#ifndef PIDFS_IOCTL_MAGIC
@@ -118,22 +117,6 @@ FIXTURE(current_nsset)
int child_pidfd_derived_nsfds2[PIDFD_NS_MAX];
};
-static int sys_waitid(int which, pid_t pid, int options)
-{
- return syscall(__NR_waitid, which, pid, NULL, options, NULL);
-}
-
-pid_t create_child(int *pidfd, unsigned flags)
-{
- struct __clone_args args = {
- .flags = CLONE_PIDFD | flags,
- .exit_signal = SIGCHLD,
- .pidfd = ptr_to_u64(pidfd),
- };
-
- return sys_clone3(&args, sizeof(struct clone_args));
-}
-
static bool switch_timens(void)
{
int fd, ret;
@@ -150,28 +133,6 @@ static bool switch_timens(void)
return ret == 0;
}
-static ssize_t read_nointr(int fd, void *buf, size_t count)
-{
- ssize_t ret;
-
- do {
- ret = read(fd, buf, count);
- } while (ret < 0 && errno == EINTR);
-
- return ret;
-}
-
-static ssize_t write_nointr(int fd, const void *buf, size_t count)
-{
- ssize_t ret;
-
- do {
- ret = write(fd, buf, count);
- } while (ret < 0 && errno == EINTR);
-
- return ret;
-}
-
FIXTURE_SETUP(current_nsset)
{
int i, proc_fd, ret;
@@ -229,7 +190,7 @@ FIXTURE_SETUP(current_nsset)
_exit(EXIT_SUCCESS);
}
- ASSERT_EQ(sys_waitid(P_PID, self->child_pid_exited, WEXITED | WNOWAIT), 0);
+ ASSERT_EQ(sys_waitid(P_PID, self->child_pid_exited, NULL, WEXITED | WNOWAIT), 0);
self->pidfd = sys_pidfd_open(self->pid, 0);
EXPECT_GE(self->pidfd, 0) {
@@ -432,9 +393,9 @@ FIXTURE_TEARDOWN(current_nsset)
EXPECT_EQ(0, close(self->child_pidfd1));
if (self->child_pidfd2 >= 0)
EXPECT_EQ(0, close(self->child_pidfd2));
- ASSERT_EQ(sys_waitid(P_PID, self->child_pid_exited, WEXITED), 0);
- ASSERT_EQ(sys_waitid(P_PID, self->child_pid1, WEXITED), 0);
- ASSERT_EQ(sys_waitid(P_PID, self->child_pid2, WEXITED), 0);
+ ASSERT_EQ(sys_waitid(P_PID, self->child_pid_exited, NULL, WEXITED), 0);
+ ASSERT_EQ(sys_waitid(P_PID, self->child_pid1, NULL, WEXITED), 0);
+ ASSERT_EQ(sys_waitid(P_PID, self->child_pid2, NULL, WEXITED), 0);
}
static int preserve_ns(const int pid, const char *ns)
diff --git a/tools/testing/selftests/pidfd/pidfd_wait.c b/tools/testing/selftests/pidfd/pidfd_wait.c
index 0dcb8365ddc3..1e2d49751cde 100644
--- a/tools/testing/selftests/pidfd/pidfd_wait.c
+++ b/tools/testing/selftests/pidfd/pidfd_wait.c
@@ -26,22 +26,11 @@
#define SKIP(s, ...) XFAIL(s, ##__VA_ARGS__)
#endif
-static pid_t sys_clone3(struct clone_args *args)
-{
- return syscall(__NR_clone3, args, sizeof(struct clone_args));
-}
-
-static int sys_waitid(int which, pid_t pid, siginfo_t *info, int options,
- struct rusage *ru)
-{
- return syscall(__NR_waitid, which, pid, info, options, ru);
-}
-
TEST(wait_simple)
{
int pidfd = -1;
pid_t parent_tid = -1;
- struct clone_args args = {
+ struct __clone_args args = {
.parent_tid = ptr_to_u64(&parent_tid),
.pidfd = ptr_to_u64(&pidfd),
.flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
@@ -55,7 +44,7 @@ TEST(wait_simple)
pidfd = open("/proc/self", O_DIRECTORY | O_RDONLY | O_CLOEXEC);
ASSERT_GE(pidfd, 0);
- pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
+ pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED);
ASSERT_NE(pid, 0);
EXPECT_EQ(close(pidfd), 0);
pidfd = -1;
@@ -63,18 +52,18 @@ TEST(wait_simple)
pidfd = open("/dev/null", O_RDONLY | O_CLOEXEC);
ASSERT_GE(pidfd, 0);
- pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
+ pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED);
ASSERT_NE(pid, 0);
EXPECT_EQ(close(pidfd), 0);
pidfd = -1;
- pid = sys_clone3(&args);
+ pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
if (pid == 0)
exit(EXIT_SUCCESS);
- pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
+ pid = sys_waitid(P_PIDFD, pidfd, &info, WEXITED);
ASSERT_GE(pid, 0);
ASSERT_EQ(WIFEXITED(info.si_status), true);
ASSERT_EQ(WEXITSTATUS(info.si_status), 0);
@@ -89,7 +78,7 @@ TEST(wait_states)
{
int pidfd = -1;
pid_t parent_tid = -1;
- struct clone_args args = {
+ struct __clone_args args = {
.parent_tid = ptr_to_u64(&parent_tid),
.pidfd = ptr_to_u64(&pidfd),
.flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
@@ -102,7 +91,7 @@ TEST(wait_states)
};
ASSERT_EQ(pipe(pfd), 0);
- pid = sys_clone3(&args);
+ pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
if (pid == 0) {
@@ -117,28 +106,28 @@ TEST(wait_states)
}
close(pfd[0]);
- ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WSTOPPED, NULL), 0);
+ ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WSTOPPED), 0);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_STOPPED);
ASSERT_EQ(info.si_pid, parent_tid);
ASSERT_EQ(sys_pidfd_send_signal(pidfd, SIGCONT, NULL, 0), 0);
- ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WCONTINUED, NULL), 0);
+ ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WCONTINUED), 0);
ASSERT_EQ(write(pfd[1], "C", 1), 1);
close(pfd[1]);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_CONTINUED);
ASSERT_EQ(info.si_pid, parent_tid);
- ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WUNTRACED, NULL), 0);
+ ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WUNTRACED), 0);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_STOPPED);
ASSERT_EQ(info.si_pid, parent_tid);
ASSERT_EQ(sys_pidfd_send_signal(pidfd, SIGKILL, NULL, 0), 0);
- ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL), 0);
+ ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WEXITED), 0);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_KILLED);
ASSERT_EQ(info.si_pid, parent_tid);
@@ -151,7 +140,7 @@ TEST(wait_nonblock)
int pidfd;
unsigned int flags = 0;
pid_t parent_tid = -1;
- struct clone_args args = {
+ struct __clone_args args = {
.parent_tid = ptr_to_u64(&parent_tid),
.flags = CLONE_PARENT_SETTID,
.exit_signal = SIGCHLD,
@@ -173,12 +162,12 @@ TEST(wait_nonblock)
SKIP(return, "Skipping PIDFD_NONBLOCK test");
}
- ret = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
+ ret = sys_waitid(P_PIDFD, pidfd, &info, WEXITED);
ASSERT_LT(ret, 0);
ASSERT_EQ(errno, ECHILD);
EXPECT_EQ(close(pidfd), 0);
- pid = sys_clone3(&args);
+ pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
if (pid == 0) {
@@ -201,7 +190,7 @@ TEST(wait_nonblock)
* Callers need to see EAGAIN/EWOULDBLOCK with non-blocking pidfd when
* child processes exist but none have exited.
*/
- ret = sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL);
+ ret = sys_waitid(P_PIDFD, pidfd, &info, WEXITED);
ASSERT_LT(ret, 0);
ASSERT_EQ(errno, EAGAIN);
@@ -210,19 +199,19 @@ TEST(wait_nonblock)
* WNOHANG raised explicitly when child processes exist but none have
* exited.
*/
- ret = sys_waitid(P_PIDFD, pidfd, &info, WEXITED | WNOHANG, NULL);
+ ret = sys_waitid(P_PIDFD, pidfd, &info, WEXITED | WNOHANG);
ASSERT_EQ(ret, 0);
ASSERT_EQ(fcntl(pidfd, F_SETFL, (flags & ~O_NONBLOCK)), 0);
- ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WSTOPPED, NULL), 0);
+ ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WSTOPPED), 0);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_STOPPED);
ASSERT_EQ(info.si_pid, parent_tid);
ASSERT_EQ(sys_pidfd_send_signal(pidfd, SIGCONT, NULL, 0), 0);
- ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WEXITED, NULL), 0);
+ ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WEXITED), 0);
ASSERT_EQ(info.si_signo, SIGCHLD);
ASSERT_EQ(info.si_code, CLD_EXITED);
ASSERT_EQ(info.si_pid, parent_tid);
diff --git a/tools/testing/shared/linux/maple_tree.h b/tools/testing/shared/linux/maple_tree.h
index 06c89bdcc515..f67d47d32857 100644
--- a/tools/testing/shared/linux/maple_tree.h
+++ b/tools/testing/shared/linux/maple_tree.h
@@ -2,6 +2,6 @@
#define atomic_t int32_t
#define atomic_inc(x) uatomic_inc(x)
#define atomic_read(x) uatomic_read(x)
-#define atomic_set(x, y) do {} while (0)
+#define atomic_set(x, y) uatomic_set(x, y)
#define U8_MAX UCHAR_MAX
#include "../../../../include/linux/maple_tree.h"
diff --git a/tools/testing/vma/linux/atomic.h b/tools/testing/vma/linux/atomic.h
index e01f66f98982..3e1b6adc027b 100644
--- a/tools/testing/vma/linux/atomic.h
+++ b/tools/testing/vma/linux/atomic.h
@@ -6,7 +6,7 @@
#define atomic_t int32_t
#define atomic_inc(x) uatomic_inc(x)
#define atomic_read(x) uatomic_read(x)
-#define atomic_set(x, y) do {} while (0)
+#define atomic_set(x, y) uatomic_set(x, y)
#define U8_MAX UCHAR_MAX
#endif /* _LINUX_ATOMIC_H */