summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h45
-rw-r--r--include/linux/amba/bus.h2
-rw-r--r--include/linux/anon_inodes.h5
-rw-r--r--include/linux/arm-smccc.h31
-rw-r--r--include/linux/atmdev.h2
-rw-r--r--include/linux/binfmts.h4
-rw-r--r--include/linux/bio.h64
-rw-r--r--include/linux/bitops.h2
-rw-r--r--include/linux/blk-mq.h20
-rw-r--r--include/linux/blk_types.h33
-rw-r--r--include/linux/blkdev.h56
-rw-r--r--include/linux/blktrace_api.h4
-rw-r--r--include/linux/bpf-cgroup.h101
-rw-r--r--include/linux/bpf.h92
-rw-r--r--include/linux/bpf_verifier.h8
-rw-r--r--include/linux/brcmphy.h25
-rw-r--r--include/linux/btf.h3
-rw-r--r--include/linux/buildid.h12
-rw-r--r--include/linux/can/bittiming.h44
-rw-r--r--include/linux/can/can-ml.h12
-rw-r--r--include/linux/can/dev.h136
-rw-r--r--include/linux/can/length.h174
-rw-r--r--include/linux/can/rx-offload.h3
-rw-r--r--include/linux/can/skb.h80
-rw-r--r--include/linux/capability.h14
-rw-r--r--include/linux/ceph/libceph.h7
-rw-r--r--include/linux/cfag12864b.h2
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/clk-provider.h4
-rw-r--r--include/linux/clk.h28
-rw-r--r--include/linux/clk/imx.h15
-rw-r--r--include/linux/clk/spear.h23
-rw-r--r--include/linux/clk/tegra.h8
-rw-r--r--include/linux/compiler-clang.h10
-rw-r--r--include/linux/compiler-gcc.h11
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/compiler_attributes.h6
-rw-r--r--include/linux/connector.h2
-rw-r--r--include/linux/coresight-pmu.h20
-rw-r--r--include/linux/coresight.h218
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/cpufreq.h30
-rw-r--r--include/linux/cpuhotplug.h3
-rw-r--r--include/linux/cred.h2
-rw-r--r--include/linux/crypto.h172
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/dcookies.h69
-rw-r--r--include/linux/devfreq.h2
-rw-r--r--include/linux/device-mapper.h32
-rw-r--r--include/linux/device.h3
-rw-r--r--include/linux/device/driver.h2
-rw-r--r--include/linux/dfl.h86
-rw-r--r--include/linux/dma-buf.h45
-rw-r--r--include/linux/dma-fence.h3
-rw-r--r--include/linux/dma-heap.h12
-rw-r--r--include/linux/dma-map-ops.h10
-rw-r--r--include/linux/dma-mapping.h33
-rw-r--r--include/linux/dma/k3-psil.h13
-rw-r--r--include/linux/dma/mmp-pdma.h16
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/dmar.h2
-rw-r--r--include/linux/dsa/8021q.h14
-rw-r--r--include/linux/dsa/brcm.h16
-rw-r--r--include/linux/dsa/ocelot.h223
-rw-r--r--include/linux/dtpm.h77
-rw-r--r--include/linux/eeprom_93xx46.h2
-rw-r--r--include/linux/efi.h19
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/elfcore-compat.h15
-rw-r--r--include/linux/elfcore.h7
-rw-r--r--include/linux/entry-common.h4
-rw-r--r--include/linux/entry-kvm.h14
-rw-r--r--include/linux/ethtool.h5
-rw-r--r--include/linux/eventpoll.h2
-rw-r--r--include/linux/export.h9
-rw-r--r--include/linux/exportfs.h1
-rw-r--r--include/linux/f2fs_fs.h3
-rw-r--r--include/linux/fcntl.h2
-rw-r--r--include/linux/filter.h48
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h10
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h339
-rw-r--r--include/linux/fixp-arith.h19
-rw-r--r--include/linux/fortify-string.h302
-rw-r--r--include/linux/fs.h228
-rw-r--r--include/linux/fsl/mc.h8
-rw-r--r--include/linux/fsnotify_backend.h1
-rw-r--r--include/linux/fsverity.h12
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/fwnode.h27
-rw-r--r--include/linux/genhd.h27
-rw-r--r--include/linux/gfp.h28
-rw-r--r--include/linux/gpio/machine.h4
-rw-r--r--include/linux/hid-sensor-hub.h9
-rw-r--r--include/linux/hid-sensor-ids.h14
-rw-r--r--include/linux/hid.h15
-rw-r--r--include/linux/highmem-internal.h5
-rw-r--r--include/linux/huge_mm.h15
-rw-r--r--include/linux/hugetlb.h98
-rw-r--r--include/linux/hyperv.h13
-rw-r--r--include/linux/i3c/device.h2
-rw-r--r--include/linux/icmpv6.h28
-rw-r--r--include/linux/if_hsr.h27
-rw-r--r--include/linux/iio/adc/qcom-vadc-common.h174
-rw-r--r--include/linux/iio/consumer.h36
-rw-r--r--include/linux/ima.h28
-rw-r--r--include/linux/indirect_call_wrapper.h8
-rw-r--r--include/linux/init.h83
-rw-r--r--include/linux/initrd.h11
-rw-r--r--include/linux/intel-iommu.h43
-rw-r--r--include/linux/intel-pti.h35
-rw-r--r--include/linux/interrupt.h9
-rw-r--r--include/linux/io-pgtable.h19
-rw-r--r--include/linux/io_uring.h32
-rw-r--r--include/linux/iomap.h19
-rw-r--r--include/linux/iommu.h21
-rw-r--r--include/linux/ioport.h6
-rw-r--r--include/linux/iova.h12
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/irqflags.h12
-rw-r--r--include/linux/isa.h2
-rw-r--r--include/linux/jump_label.h12
-rw-r--r--include/linux/kallsyms.h17
-rw-r--r--include/linux/kasan-checks.h6
-rw-r--r--include/linux/kasan.h48
-rw-r--r--include/linux/kbd_kern.h10
-rw-r--r--include/linux/kconfig.h6
-rw-r--r--include/linux/kd.h8
-rw-r--r--include/linux/kernel.h23
-rw-r--r--include/linux/kexec.h7
-rw-r--r--include/linux/key.h5
-rw-r--r--include/linux/keyslot-manager.h14
-rw-r--r--include/linux/kfence.h222
-rw-r--r--include/linux/kgdb.h3
-rw-r--r--include/linux/khugepaged.h2
-rw-r--r--include/linux/ks0108.h2
-rw-r--r--include/linux/kvm_host.h35
-rw-r--r--include/linux/led-class-flash.h42
-rw-r--r--include/linux/led-class-multicolor.h42
-rw-r--r--include/linux/leds.h12
-rw-r--r--include/linux/list.h2
-rw-r--r--include/linux/litex.h150
-rw-r--r--include/linux/local_lock_internal.h5
-rw-r--r--include/linux/lockdep.h15
-rw-r--r--include/linux/lockdep_types.h18
-rw-r--r--include/linux/lsm_hook_defs.h17
-rw-r--r--include/linux/lsm_hooks.h10
-rw-r--r--include/linux/mdev.h2
-rw-r--r--include/linux/mdio.h23
-rw-r--r--include/linux/mei_cl_bus.h2
-rw-r--r--include/linux/memblock.h6
-rw-r--r--include/linux/memcontrol.h43
-rw-r--r--include/linux/memory.h3
-rw-r--r--include/linux/memory_hotplug.h33
-rw-r--r--include/linux/memremap.h6
-rw-r--r--include/linux/mfd/abx500/ab8500.h3
-rw-r--r--include/linux/mfd/axp20x.h2
-rw-r--r--include/linux/mfd/bd9571mwv.h45
-rw-r--r--include/linux/mfd/core.h6
-rw-r--r--include/linux/mfd/hi6421-spmi-pmic.h29
-rw-r--r--include/linux/mfd/intel-m10-bmc.h9
-rw-r--r--include/linux/mfd/intel_msic.h453
-rw-r--r--include/linux/mfd/iqs62x.h11
-rw-r--r--include/linux/mfd/rohm-generic.h16
-rw-r--r--include/linux/mhi.h22
-rw-r--r--include/linux/migrate.h2
-rw-r--r--include/linux/mlx5/device.h14
-rw-r--r--include/linux/mlx5/driver.h56
-rw-r--r--include/linux/mlx5/eswitch.h29
-rw-r--r--include/linux/mlx5/mlx5_ifc.h118
-rw-r--r--include/linux/mm-arch-hooks.h22
-rw-r--r--include/linux/mm.h102
-rw-r--r--include/linux/mm_inline.h113
-rw-r--r--include/linux/mm_types.h7
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/mmc/core.h6
-rw-r--r--include/linux/mmc/host.h25
-rw-r--r--include/linux/mmzone.h71
-rw-r--r--include/linux/mod_devicetable.h42
-rw-r--r--include/linux/module.h48
-rw-r--r--include/linux/mount.h8
-rw-r--r--include/linux/mtd/spi-nor.h1
-rw-r--r--include/linux/mutex.h25
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/nd.h2
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdev_features.h13
-rw-r--r--include/linux/netdevice.h171
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/netlink.h6
-rw-r--r--include/linux/nfs_fs.h10
-rw-r--r--include/linux/nfs_fs_sb.h4
-rw-r--r--include/linux/nfsacl.h3
-rw-r--r--include/linux/nvme.h30
-rw-r--r--include/linux/objtool.h13
-rw-r--r--include/linux/of_device.h14
-rw-r--r--include/linux/of_irq.h9
-rw-r--r--include/linux/of_mdio.h10
-rw-r--r--include/linux/oprofile.h209
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--include/linux/page_counter.h9
-rw-r--r--include/linux/pagemap.h11
-rw-r--r--include/linux/pagevec.h4
-rw-r--r--include/linux/parport.h31
-rw-r--r--include/linux/parser.h1
-rw-r--r--include/linux/pci-epc.h39
-rw-r--r--include/linux/pci-epf.h28
-rw-r--r--include/linux/pci.h36
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/pgtable.h19
-rw-r--r--include/linux/phy.h38
-rw-r--r--include/linux/platform_data/clk-u300.h1
-rw-r--r--include/linux/platform_data/cros_ec_commands.h45
-rw-r--r--include/linux/platform_data/dma-atmel.h61
-rw-r--r--include/linux/platform_data/dma-coh901318.h72
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h11
-rw-r--r--include/linux/platform_data/efm32-spi.h15
-rw-r--r--include/linux/platform_data/efm32-uart.h19
-rw-r--r--include/linux/platform_data/i2c-hid.h41
-rw-r--r--include/linux/platform_data/mlxcpld.h31
-rw-r--r--include/linux/platform_data/mmc-omap.h3
-rw-r--r--include/linux/platform_data/x86/mlxcpld.h52
-rw-r--r--include/linux/platform_profile.h41
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_domain.h12
-rw-r--r--include/linux/pm_opp.h112
-rw-r--r--include/linux/pmbus.h9
-rw-r--r--include/linux/posix_acl.h21
-rw-r--r--include/linux/posix_acl_xattr.h12
-rw-r--r--include/linux/power/max8903_charger.h43
-rw-r--r--include/linux/property.h7
-rw-r--r--include/linux/psp-sev.h17
-rw-r--r--include/linux/ptrace.h2
-rw-r--r--include/linux/qed/qed_chain.h2
-rw-r--r--include/linux/rbtree.h206
-rw-r--r--include/linux/rcu_segcblist.h120
-rw-r--r--include/linux/rcupdate.h44
-rw-r--r--include/linux/regulator/ab8500.h166
-rw-r--r--include/linux/regulator/mt6315-regulator.h44
-rw-r--r--include/linux/regulator/pca9450.h7
-rw-r--r--include/linux/remoteproc/qcom_rproc.h4
-rw-r--r--include/linux/reset.h19
-rw-r--r--include/linux/rmap.h3
-rw-r--r--include/linux/rpmsg/qcom_glink.h8
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/rtc/sirfsoc_rtciobrg.h21
-rw-r--r--include/linux/rwlock.h7
-rw-r--r--include/linux/sched.h66
-rw-r--r--include/linux/sched/prio.h18
-rw-r--r--include/linux/security.h64
-rw-r--r--include/linux/sfi.h210
-rw-r--r--include/linux/sfi_acpi.h93
-rw-r--r--include/linux/sirfsoc_dma.h7
-rw-r--r--include/linux/skbuff.h177
-rw-r--r--include/linux/skmsg.h1
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/slab_def.h3
-rw-r--r--include/linux/slub_def.h3
-rw-r--r--include/linux/soc/brcmstb/brcmstb.h16
-rw-r--r--include/linux/soc/marvell/octeontx2/asm.h8
-rw-r--r--include/linux/soc/mediatek/infracfg.h8
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h12
-rw-r--r--include/linux/soc/mediatek/mtk-mutex.h26
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h3
-rw-r--r--include/linux/soc/qcom/mdt_loader.h35
-rw-r--r--include/linux/sony-laptop.h2
-rw-r--r--include/linux/soundwire/sdw.h2
-rw-r--r--include/linux/spi/ifx_modem.h15
-rw-r--r--include/linux/spi/lms283gf05.h16
-rw-r--r--include/linux/spi/spi-mem.h9
-rw-r--r--include/linux/spi/spi.h44
-rw-r--r--include/linux/srcu.h3
-rw-r--r--include/linux/srcutiny.h7
-rw-r--r--include/linux/ssb/ssb_driver_gige.h14
-rw-r--r--include/linux/stackdepot.h9
-rw-r--r--include/linux/static_call.h77
-rw-r--r--include/linux/static_call_types.h50
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/string.h282
-rw-r--r--include/linux/sunrpc/msg_prot.h3
-rw-r--r--include/linux/sunrpc/svc.h1
-rw-r--r--include/linux/sunrpc/svc_rdma.h15
-rw-r--r--include/linux/sunrpc/svcsock.h2
-rw-r--r--include/linux/sunrpc/xdr.h13
-rw-r--r--include/linux/sunxi-rsb.h2
-rw-r--r--include/linux/surface_acpi_notify.h39
-rw-r--r--include/linux/surface_aggregator/controller.h824
-rw-r--r--include/linux/surface_aggregator/device.h423
-rw-r--r--include/linux/surface_aggregator/serial_hub.h672
-rw-r--r--include/linux/swap.h9
-rw-r--r--include/linux/swiotlb.h1
-rw-r--r--include/linux/syscalls.h12
-rw-r--r--include/linux/sysfs.h2
-rw-r--r--include/linux/tcp.h3
-rw-r--r--include/linux/tee_drv.h2
-rw-r--r--include/linux/thermal.h18
-rw-r--r--include/linux/thunderbolt.h3
-rw-r--r--include/linux/timer.h2
-rw-r--r--include/linux/topology.h1
-rw-r--r--include/linux/torture.h27
-rw-r--r--include/linux/tpm.h14
-rw-r--r--include/linux/trace.h3
-rw-r--r--include/linux/trace_events.h75
-rw-r--r--include/linux/tracepoint.h54
-rw-r--r--include/linux/tty.h11
-rw-r--r--include/linux/tty_ldisc.h3
-rw-r--r--include/linux/types.h8
-rw-r--r--include/linux/uio.h8
-rw-r--r--include/linux/units.h4
-rw-r--r--include/linux/usb/cdc_ncm.h2
-rw-r--r--include/linux/usb/ch9.h20
-rw-r--r--include/linux/usb/chipidea.h6
-rw-r--r--include/linux/usb/composite.h6
-rw-r--r--include/linux/usb/gadget.h11
-rw-r--r--include/linux/usb/pd.h3
-rw-r--r--include/linux/usb/pd_vdo.h304
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/tcpm.h9
-rw-r--r--include/linux/usb/tegra_usb_phy.h2
-rw-r--r--include/linux/usb/typec.h23
-rw-r--r--include/linux/usb/typec_altmode.h10
-rw-r--r--include/linux/vdpa.h44
-rw-r--r--include/linux/verification.h2
-rw-r--r--include/linux/vfio.h7
-rw-r--r--include/linux/vgaarb.h6
-rw-r--r--include/linux/virtio_pci_modern.h111
-rw-r--r--include/linux/vmalloc.h6
-rw-r--r--include/linux/vme.h2
-rw-r--r--include/linux/vmstat.h6
-rw-r--r--include/linux/vmw_vmci_defs.h4
-rw-r--r--include/linux/vt_kern.h12
-rw-r--r--include/linux/w1.h2
-rw-r--r--include/linux/wm97xx.h1
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/linux/xattr.h30
-rw-r--r--include/linux/z2_battery.h1
-rw-r--r--include/linux/zpool.h3
-rw-r--r--include/linux/zsmalloc.h2
-rw-r--r--include/linux/zstd.h8
339 files changed, 8327 insertions, 4131 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 053bf05fb1f7..3c5757d539ab 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -546,9 +546,19 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_OSLPI_SUPPORT 0x00000100
#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000
+#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
extern bool osc_sb_apei_support_acked;
extern bool osc_pc_lpi_support_confirmed;
+extern bool osc_sb_native_usb4_support_confirmed;
+
+/* USB4 Capabilities */
+#define OSC_USB_USB3_TUNNELING 0x00000001
+#define OSC_USB_DP_TUNNELING 0x00000002
+#define OSC_USB_PCIE_TUNNELING 0x00000004
+#define OSC_USB_XDOMAIN 0x00000008
+
+extern u32 osc_sb_native_usb4_control;
/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */
#define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001
@@ -581,9 +591,6 @@ extern bool osc_pc_lpi_support_confirmed;
#define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E
#define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F
-extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
- u32 *mask, u32 req);
-
/* Enable _OST when all relevant hotplug operations are enabled */
#if defined(CONFIG_ACPI_HOTPLUG_CPU) && \
defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \
@@ -1114,14 +1121,6 @@ acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
void **valptr);
-int acpi_dev_prop_read_single(struct acpi_device *adev,
- const char *propname, enum dev_prop_type proptype,
- void *val);
-int acpi_node_prop_read(const struct fwnode_handle *fwnode,
- const char *propname, enum dev_prop_type proptype,
- void *val, size_t nval);
-int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname,
- enum dev_prop_type proptype, void *val, size_t nval);
struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
@@ -1223,30 +1222,6 @@ static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode,
return -ENXIO;
}
-static inline int acpi_dev_prop_read_single(const struct acpi_device *adev,
- const char *propname,
- enum dev_prop_type proptype,
- void *val)
-{
- return -ENXIO;
-}
-
-static inline int acpi_node_prop_read(const struct fwnode_handle *fwnode,
- const char *propname,
- enum dev_prop_type proptype,
- void *val, size_t nval)
-{
- return -ENXIO;
-}
-
-static inline int acpi_dev_prop_read(const struct acpi_device *adev,
- const char *propname,
- enum dev_prop_type proptype,
- void *val, size_t nval)
-{
- return -ENXIO;
-}
-
static inline struct fwnode_handle *
acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 0bbfd647f5c6..6cc93ab5b809 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -76,7 +76,7 @@ struct amba_device {
struct amba_driver {
struct device_driver drv;
int (*probe)(struct amba_device *, const struct amba_id *);
- int (*remove)(struct amba_device *);
+ void (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
const struct amba_id *id_table;
};
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index d0d7d96261ad..71881a2b6f78 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -10,12 +10,17 @@
#define _LINUX_ANON_INODES_H
struct file_operations;
+struct inode;
struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags);
int anon_inode_getfd(const char *name, const struct file_operations *fops,
void *priv, int flags);
+int anon_inode_getfd_secure(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags,
+ const struct inode *context_inode);
#endif /* _LINUX_ANON_INODES_H */
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index f860645f6512..62c54234576c 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -102,6 +102,37 @@
ARM_SMCCC_OWNER_STANDARD_HYP, \
0x21)
+/* TRNG entropy source calls (defined by ARM DEN0098) */
+#define ARM_SMCCC_TRNG_VERSION \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x50)
+
+#define ARM_SMCCC_TRNG_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x51)
+
+#define ARM_SMCCC_TRNG_GET_UUID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x52)
+
+#define ARM_SMCCC_TRNG_RND32 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x53)
+
+#define ARM_SMCCC_TRNG_RND64 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x53)
+
/*
* Return codes defined in ARM DEN 0070A
* ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index d7493016cd46..60cd25c0461b 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -207,7 +207,7 @@ struct atm_skb_data {
struct atm_vcc *vcc; /* ATM VCC */
unsigned long atm_options; /* ATM layer options */
unsigned int acct_truesize; /* truesize accounted to vcc */
-};
+} __packed;
#define VCC_HTABLE_SIZE 32
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 0571701ab1c5..0abd93efc181 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -73,6 +73,10 @@ struct linux_binprm {
#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2
#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT)
+/* preserve argv0 for the interpreter */
+#define BINPRM_FLAGS_PRESERVE_ARGV0_BIT 3
+#define BINPRM_FLAGS_PRESERVE_ARGV0 (1 << BINPRM_FLAGS_PRESERVE_ARGV0_BIT)
+
/* Function parameter for binfmt->coredump */
struct coredump_params {
const kernel_siginfo_t *siginfo;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 1edda614f7ce..983ed2fe7c85 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -10,6 +10,7 @@
#include <linux/ioprio.h>
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
+#include <linux/uio.h>
#define BIO_DEBUG
@@ -19,7 +20,12 @@
#define BIO_BUG_ON
#endif
-#define BIO_MAX_PAGES 256
+#define BIO_MAX_PAGES 256U
+
+static inline unsigned int bio_max_segs(unsigned int nr_segs)
+{
+ return min(nr_segs, BIO_MAX_PAGES);
+}
#define bio_prio(bio) (bio)->bi_ioprio
#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
@@ -328,7 +334,6 @@ struct bio_integrity_payload {
struct bvec_iter bip_iter;
- unsigned short bip_slab; /* slab the bip came from */
unsigned short bip_vcnt; /* # of integrity bio_vecs */
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
unsigned short bip_flags; /* control flags */
@@ -406,7 +411,9 @@ extern void bioset_exit(struct bio_set *);
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
-extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
+struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs,
+ struct bio_set *bs);
+struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
extern void bio_put(struct bio *);
extern void __bio_clone_fast(struct bio *, struct bio *);
@@ -414,16 +421,11 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
extern struct bio_set fs_bio_set;
-static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs)
{
return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
}
-static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
-{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
-}
-
extern blk_qc_t submit_bio(struct bio *);
extern void bio_endio(struct bio *);
@@ -441,6 +443,18 @@ static inline void bio_wouldblock_error(struct bio *bio)
bio_endio(bio);
}
+/*
+ * Calculate number of bvec segments that should be allocated to fit data
+ * pointed by @iter. If @iter is backed by bvec it's going to be reused
+ * instead of allocating a new one.
+ */
+static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
+{
+ if (iov_iter_is_bvec(iter))
+ return 0;
+ return iov_iter_npages(iter, max_segs);
+}
+
struct request_queue;
extern int submit_bio_wait(struct bio *bio);
@@ -455,6 +469,8 @@ void bio_chain(struct bio *, struct bio *);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
+int bio_add_zone_append_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int offset);
bool __bio_try_merge_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off, bool *same_page);
void __bio_add_page(struct bio *bio, struct page *page,
@@ -478,29 +494,26 @@ static inline void zero_fill_bio(struct bio *bio)
zero_fill_bio_iter(bio, bio->bi_iter);
}
-extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
-extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
-extern unsigned int bvec_nr_vecs(unsigned short idx);
extern const char *bio_devname(struct bio *bio, char *buffer);
-#define bio_set_dev(bio, bdev) \
-do { \
- if ((bio)->bi_disk != (bdev)->bd_disk) \
- bio_clear_flag(bio, BIO_THROTTLED);\
- (bio)->bi_disk = (bdev)->bd_disk; \
- (bio)->bi_partno = (bdev)->bd_partno; \
- bio_associate_blkg(bio); \
+#define bio_set_dev(bio, bdev) \
+do { \
+ bio_clear_flag(bio, BIO_REMAPPED); \
+ if ((bio)->bi_bdev != (bdev)) \
+ bio_clear_flag(bio, BIO_THROTTLED); \
+ (bio)->bi_bdev = (bdev); \
+ bio_associate_blkg(bio); \
} while (0)
#define bio_copy_dev(dst, src) \
do { \
- (dst)->bi_disk = (src)->bi_disk; \
- (dst)->bi_partno = (src)->bi_partno; \
+ bio_clear_flag(dst, BIO_REMAPPED); \
+ (dst)->bi_bdev = (src)->bi_bdev; \
bio_clone_blkg_association(dst, src); \
} while (0)
#define bio_dev(bio) \
- disk_devt((bio)->bi_disk)
+ disk_devt((bio)->bi_bdev->bd_disk)
#ifdef CONFIG_BLK_CGROUP
void bio_associate_blkg(struct bio *bio);
@@ -703,6 +716,7 @@ struct bio_set {
mempool_t bvec_integrity_pool;
#endif
+ unsigned int back_pad;
/*
* Deadlock avoidance for stacking block drivers: see comments in
* bio_alloc_bioset() for details
@@ -713,12 +727,6 @@ struct bio_set {
struct workqueue_struct *rescue_workqueue;
};
-struct biovec_slab {
- int nr_vecs;
- char *name;
- struct kmem_cache *slab;
-};
-
static inline bool bioset_initialized(struct bio_set *bs)
{
return bs->bio_slab != NULL;
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a61f192c096b..a5a48303b0f1 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -214,7 +214,7 @@ static inline int get_count_order_long(unsigned long l)
* __ffs64 - find first set bit in a 64 bit word
* @word: The 64 bit word
*
- * On 64 bit arches this is a synomyn for __ffs
+ * On 64 bit arches this is a synonym for __ffs
* The result is not defined if no bits are set, so check that @word
* is non-zero before calling this.
*/
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index d705b174d346..2c473c9b8990 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -140,10 +140,6 @@ struct blk_mq_hw_ctx {
* shared across request queues.
*/
atomic_t nr_active;
- /**
- * @elevator_queued: Number of queued requests on hctx.
- */
- atomic_t elevator_queued;
/** @cpuhp_online: List to store request if CPU is going to die */
struct hlist_node cpuhp_online;
@@ -494,6 +490,18 @@ static inline int blk_mq_request_completed(struct request *rq)
return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
}
+/*
+ *
+ * Set the state to complete when completing a request from inside ->queue_rq.
+ * This is used by drivers that want to ensure special complete actions that
+ * need access to the request are called on failure, e.g. by nvme for
+ * multipathing.
+ */
+static inline void blk_mq_set_request_complete(struct request *rq)
+{
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+}
+
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
@@ -602,8 +610,8 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
rq->bio = rq->biotail = bio;
rq->ioprio = bio_prio(bio);
- if (bio->bi_disk)
- rq->rq_disk = bio->bi_disk;
+ if (bio->bi_bdev)
+ rq->rq_disk = bio->bi_bdev->bd_disk;
}
blk_qc_t blk_mq_submit_bio(struct bio *bio);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 866f74261b3b..db026b6ec15a 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -222,16 +222,15 @@ static inline void bio_issue_init(struct bio_issue *issue,
*/
struct bio {
struct bio *bi_next; /* request queue link */
- struct gendisk *bi_disk;
+ struct block_device *bi_bdev;
unsigned int bi_opf; /* bottom bits req flags,
* top bits REQ_OP. Use
* accessors.
*/
- unsigned short bi_flags; /* status, etc and bvec pool number */
+ unsigned short bi_flags; /* BIO_* below */
unsigned short bi_ioprio;
unsigned short bi_write_hint;
blk_status_t bi_status;
- u8 bi_partno;
atomic_t __bi_remaining;
struct bvec_iter bi_iter;
@@ -304,36 +303,10 @@ enum {
* of this bio. */
BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
BIO_TRACKED, /* set if bio goes through the rq_qos path */
+ BIO_REMAPPED,
BIO_FLAG_LAST
};
-/* See BVEC_POOL_OFFSET below before adding new flags */
-
-/*
- * We support 6 different bvec pools, the last one is magic in that it
- * is backed by a mempool.
- */
-#define BVEC_POOL_NR 6
-#define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
-
-/*
- * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
- * 1 to the actual index so that 0 indicates that there are no bvecs to be
- * freed.
- */
-#define BVEC_POOL_BITS (3)
-#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
-#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
-#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
-# error "BVEC_POOL_BITS is too small"
-#endif
-
-/*
- * Flags starting here get preserved by bio_reset() - this includes
- * only BVEC_POOL_IDX()
- */
-#define BIO_RESET_BITS BVEC_POOL_OFFSET
-
typedef __u32 __bitwise blk_mq_req_flags_t;
/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f94ee3089e01..c032cfe133c7 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -153,7 +153,7 @@ struct request {
*/
union {
struct hlist_node hash; /* merge hash */
- struct list_head ipi_list;
+ struct llist_node ipi_list;
};
/*
@@ -337,6 +337,7 @@ struct queue_limits {
unsigned int max_zone_append_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
+ unsigned int zone_write_granularity;
unsigned short max_segments;
unsigned short max_integrity_segments;
@@ -461,7 +462,6 @@ struct request_queue {
#ifdef CONFIG_PM
struct device *dev;
enum rpm_status rpm_status;
- unsigned int nr_pending;
#endif
/*
@@ -948,9 +948,8 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct rq_map_data *, const struct iov_iter *,
gfp_t);
-extern void blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
-extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
+extern void blk_execute_rq(struct gendisk *, struct request *, int);
+extern void blk_execute_rq_nowait(struct gendisk *,
struct request *, int, rq_end_io_fn *);
/* Helper to convert REQ_OP_XXX to its string format XXX */
@@ -1161,6 +1160,8 @@ extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
unsigned int max_zone_append_sectors);
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
+void blk_queue_zone_write_granularity(struct request_queue *q,
+ unsigned int size);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
void blk_queue_update_readahead(struct request_queue *q);
@@ -1289,7 +1290,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
!list_empty(&plug->cb_list));
}
-int blkdev_issue_flush(struct block_device *, gfp_t);
+int blkdev_issue_flush(struct block_device *bdev);
long nr_blockdev_pages(void);
#else /* CONFIG_BLOCK */
struct blk_plug {
@@ -1317,7 +1318,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
return false;
}
-static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
+static inline int blkdev_issue_flush(struct block_device *bdev)
{
return 0;
}
@@ -1474,6 +1475,18 @@ static inline int bdev_io_opt(struct block_device *bdev)
return queue_io_opt(bdev_get_queue(bdev));
}
+static inline unsigned int
+queue_zone_write_granularity(const struct request_queue *q)
+{
+ return q->limits.zone_write_granularity;
+}
+
+static inline unsigned int
+bdev_zone_write_granularity(struct block_device *bdev)
+{
+ return queue_zone_write_granularity(bdev_get_queue(bdev));
+}
+
static inline int queue_alignment_offset(const struct request_queue *q)
{
if (q->limits.misaligned)
@@ -1954,21 +1967,9 @@ unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
void disk_end_io_acct(struct gendisk *disk, unsigned int op,
unsigned long start_time);
-unsigned long part_start_io_acct(struct gendisk *disk,
- struct block_device **part, struct bio *bio);
-void part_end_io_acct(struct block_device *part, struct bio *bio,
- unsigned long start_time);
-
-/**
- * bio_start_io_acct - start I/O accounting for bio based drivers
- * @bio: bio to start account for
- *
- * Returns the start time that should be passed back to bio_end_io_acct().
- */
-static inline unsigned long bio_start_io_acct(struct bio *bio)
-{
- return disk_start_io_acct(bio->bi_disk, bio_sectors(bio), bio_op(bio));
-}
+unsigned long bio_start_io_acct(struct bio *bio);
+void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
+ struct block_device *orig_bdev);
/**
* bio_end_io_acct - end I/O accounting for bio based drivers
@@ -1977,7 +1978,7 @@ static inline unsigned long bio_start_io_acct(struct bio *bio)
*/
static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
{
- return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time);
+ return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
}
int bdev_read_only(struct block_device *bdev);
@@ -2012,21 +2013,16 @@ void bdev_add(struct block_device *bdev, dev_t dev);
struct block_device *I_BDEV(struct inode *inode);
struct block_device *bdgrab(struct block_device *bdev);
void bdput(struct block_device *);
+int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
+ loff_t lend);
#ifdef CONFIG_BLOCK
void invalidate_bdev(struct block_device *bdev);
-int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
- loff_t lend);
int sync_blockdev(struct block_device *bdev);
#else
static inline void invalidate_bdev(struct block_device *bdev)
{
}
-static inline int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
- loff_t lstart, loff_t lend)
-{
- return 0;
-}
static inline int sync_blockdev(struct block_device *bdev)
{
return 0;
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 05556573b896..a083e15df608 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -23,8 +23,6 @@ struct blk_trace {
u32 pid;
u32 dev;
struct dentry *dir;
- struct dentry *dropped_file;
- struct dentry *msg_file;
struct list_head running_list;
atomic_t dropped;
};
@@ -119,7 +117,7 @@ struct compat_blk_user_trace_setup {
#endif
-extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
+void blk_fill_rwbs(char *rwbs, unsigned int op);
static inline sector_t blk_rq_trace_sector(struct request *rq)
{
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 72e69a0e1e8c..c42e02b4d84b 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -23,8 +23,8 @@ struct ctl_table_header;
#ifdef CONFIG_CGROUP_BPF
-extern struct static_key_false cgroup_bpf_enabled_key;
-#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE];
+#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type])
DECLARE_PER_CPU(struct bpf_cgroup_storage*,
bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
@@ -125,7 +125,8 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
struct sockaddr *uaddr,
enum bpf_attach_type type,
- void *t_ctx);
+ void *t_ctx,
+ u32 *flags);
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct bpf_sock_ops_kern *sock_ops,
@@ -147,6 +148,10 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
int __user *optlen, int max_optlen,
int retval);
+int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
+ int optname, void *optval,
+ int *optlen, int retval);
+
static inline enum bpf_cgroup_storage_type cgroup_storage_type(
struct bpf_map *map)
{
@@ -185,7 +190,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
BPF_CGROUP_INET_INGRESS); \
\
@@ -195,7 +200,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
+ if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
typeof(sk) __sk = sk_to_full_sk(sk); \
if (sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
@@ -207,7 +212,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) { \
+ if (cgroup_bpf_enabled(type)) { \
__ret = __cgroup_bpf_run_filter_sk(sk, type); \
} \
__ret; \
@@ -227,33 +232,53 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
({ \
+ u32 __unused_flags; \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(type)) \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
- NULL); \
+ NULL, \
+ &__unused_flags); \
__ret; \
})
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
({ \
+ u32 __unused_flags; \
int __ret = 0; \
- if (cgroup_bpf_enabled) { \
+ if (cgroup_bpf_enabled(type)) { \
lock_sock(sk); \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
- t_ctx); \
+ t_ctx, \
+ &__unused_flags); \
release_sock(sk); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_BIND, NULL)
-
-#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_BIND, NULL)
+/* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
+ * via upper bits of return code. The only flag that is supported
+ * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
+ * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
+ */
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, bind_flags) \
+({ \
+ u32 __flags = 0; \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled(type)) { \
+ lock_sock(sk); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
+ NULL, &__flags); \
+ release_sock(sk); \
+ if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
+ *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
+ } \
+ __ret; \
+})
-#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
- sk->sk_prot->pre_connect)
+#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
+ ((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) || \
+ cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) && \
+ (sk)->sk_prot->pre_connect)
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
@@ -297,7 +322,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS)) \
__ret = __cgroup_bpf_run_filter_sock_ops(sk, \
sock_ops, \
BPF_CGROUP_SOCK_OPS); \
@@ -307,7 +332,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled && (sock_ops)->sk) { \
+ if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
if (__sk && sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
@@ -320,7 +345,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE)) \
__ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
access, \
BPF_CGROUP_DEVICE); \
@@ -332,7 +357,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL)) \
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
buf, count, pos, \
BPF_CGROUP_SYSCTL); \
@@ -343,7 +368,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
kernel_optval) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT)) \
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
optname, optval, \
optlen, \
@@ -354,7 +379,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled) \
+ if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
get_user(__ret, optlen); \
__ret; \
})
@@ -363,11 +388,24 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
max_optlen, retval) \
({ \
int __ret = retval; \
- if (cgroup_bpf_enabled) \
- __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
- optname, optval, \
- optlen, max_optlen, \
- retval); \
+ if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
+ if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
+ !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
+ tcp_bpf_bypass_getsockopt, \
+ level, optname)) \
+ __ret = __cgroup_bpf_run_filter_getsockopt( \
+ sock, level, optname, optval, optlen, \
+ max_optlen, retval); \
+ __ret; \
+})
+
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+ optlen, retval) \
+({ \
+ int __ret = retval; \
+ if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
+ __ret = __cgroup_bpf_run_filter_getsockopt_kern( \
+ sock, level, optname, optval, optlen, retval); \
__ret; \
})
@@ -427,15 +465,14 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
return 0;
}
-#define cgroup_bpf_enabled (0)
+#define cgroup_bpf_enabled(type) (0)
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_BIND_LOCK(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, flags) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
@@ -452,6 +489,8 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
optlen, max_optlen, retval) ({ retval; })
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+ optlen, retval) ({ retval; })
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
kernel_optval) ({ 0; })
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 07cb5d15e743..cccaef1088ea 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -14,7 +14,6 @@
#include <linux/numa.h>
#include <linux/mm_types.h>
#include <linux/wait.h>
-#include <linux/u64_stats_sync.h>
#include <linux/refcount.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -507,12 +506,6 @@ enum bpf_cgroup_storage_type {
*/
#define MAX_BPF_FUNC_ARGS 12
-struct bpf_prog_stats {
- u64 cnt;
- u64 nsecs;
- struct u64_stats_sync syncp;
-} __aligned(2 * sizeof(u64));
-
struct btf_func_model {
u8 ret_size;
u8 nr_args;
@@ -536,7 +529,7 @@ struct btf_func_model {
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
*/
-#define BPF_MAX_TRAMP_PROGS 40
+#define BPF_MAX_TRAMP_PROGS 38
struct bpf_tramp_progs {
struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
@@ -568,10 +561,10 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
struct bpf_tramp_progs *tprogs,
void *orig_call);
/* these two functions are called from generated trampoline */
-u64 notrace __bpf_prog_enter(void);
+u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
-void notrace __bpf_prog_enter_sleepable(void);
-void notrace __bpf_prog_exit_sleepable(void);
+u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
+void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
struct bpf_ksym {
unsigned long start;
@@ -761,9 +754,15 @@ struct bpf_ctx_arg_aux {
u32 btf_id;
};
+struct btf_mod_pair {
+ struct btf *btf;
+ struct module *module;
+};
+
struct bpf_prog_aux {
atomic64_t refcnt;
u32 used_map_cnt;
+ u32 used_btf_cnt;
u32 max_ctx_offset;
u32 max_pkt_offset;
u32 max_tp_access;
@@ -802,6 +801,7 @@ struct bpf_prog_aux {
const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
+ struct btf_mod_pair *used_btfs;
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
@@ -838,7 +838,6 @@ struct bpf_prog_aux {
u32 linfo_idx;
u32 num_exentries;
struct exception_table_entry *extable;
- struct bpf_prog_stats __percpu *stats;
union {
struct work_struct work;
struct rcu_head rcu;
@@ -1066,6 +1065,34 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *include_prog,
struct bpf_prog_array **new_array);
+/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
+#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
+/* BPF program asks to set CN on the packet. */
+#define BPF_RET_SET_CN (1 << 0)
+
+#define BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, ret_flags) \
+ ({ \
+ struct bpf_prog_array_item *_item; \
+ struct bpf_prog *_prog; \
+ struct bpf_prog_array *_array; \
+ u32 _ret = 1; \
+ u32 func_ret; \
+ migrate_disable(); \
+ rcu_read_lock(); \
+ _array = rcu_dereference(array); \
+ _item = &_array->items[0]; \
+ while ((_prog = READ_ONCE(_item->prog))) { \
+ bpf_cgroup_storage_set(_item->cgroup_storage); \
+ func_ret = func(_prog, ctx); \
+ _ret &= (func_ret & 1); \
+ *(ret_flags) |= (func_ret >> 1); \
+ _item++; \
+ } \
+ rcu_read_unlock(); \
+ migrate_enable(); \
+ _ret; \
+ })
+
#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
({ \
struct bpf_prog_array_item *_item; \
@@ -1113,25 +1140,11 @@ _out: \
*/
#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
({ \
- struct bpf_prog_array_item *_item; \
- struct bpf_prog *_prog; \
- struct bpf_prog_array *_array; \
- u32 ret; \
- u32 _ret = 1; \
- u32 _cn = 0; \
- migrate_disable(); \
- rcu_read_lock(); \
- _array = rcu_dereference(array); \
- _item = &_array->items[0]; \
- while ((_prog = READ_ONCE(_item->prog))) { \
- bpf_cgroup_storage_set(_item->cgroup_storage); \
- ret = func(_prog, ctx); \
- _ret &= (ret & 1); \
- _cn |= (ret & 2); \
- _item++; \
- } \
- rcu_read_unlock(); \
- migrate_enable(); \
+ u32 _flags = 0; \
+ bool _cn; \
+ u32 _ret; \
+ _ret = BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, &_flags); \
+ _cn = _flags & BPF_RET_SET_CN; \
if (_ret) \
_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
else \
@@ -1206,8 +1219,6 @@ void bpf_prog_sub(struct bpf_prog *prog, int i);
void bpf_prog_inc(struct bpf_prog *prog);
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
-void __bpf_free_used_maps(struct bpf_prog_aux *aux,
- struct bpf_map **used_maps, u32 len);
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
@@ -1271,6 +1282,11 @@ static inline bool bpf_allow_ptr_leaks(void)
return perfmon_capable();
}
+static inline bool bpf_allow_uninit_stack(void)
+{
+ return perfmon_capable();
+}
+
static inline bool bpf_allow_ptr_to_map_access(void)
{
return perfmon_capable();
@@ -1403,7 +1419,10 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
union bpf_attr __user *uattr);
+
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
+#endif
struct btf *bpf_get_btf_vmlinux(void);
@@ -1667,12 +1686,18 @@ bpf_base_func_proto(enum bpf_func_id func_id)
}
#endif /* CONFIG_BPF_SYSCALL */
+void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
+ struct btf_mod_pair *used_btfs, u32 len);
+
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
enum bpf_prog_type type)
{
return bpf_prog_get_type_dev(ufd, type, false);
}
+void __bpf_free_used_maps(struct bpf_prog_aux *aux,
+ struct bpf_map **used_maps, u32 len);
+
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
int bpf_prog_offload_compile(struct bpf_prog *prog);
@@ -1860,6 +1885,7 @@ extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
extern const struct bpf_func_proto bpf_sock_from_file_proto;
+extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
const struct bpf_func_proto *bpf_tracing_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index e941fe1484e5..971b33aca13d 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -195,7 +195,7 @@ struct bpf_func_state {
* 0 = main function, 1 = first callee.
*/
u32 frameno;
- /* subprog number == index within subprog_stack_depth
+ /* subprog number == index within subprog_info
* zero == main subprog
*/
u32 subprogno;
@@ -340,6 +340,7 @@ struct bpf_insn_aux_data {
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+#define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
#define BPF_VERIFIER_TMP_LOG_SIZE 1024
@@ -398,9 +399,12 @@ struct bpf_verifier_env {
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
struct bpf_verifier_state_list *free_list;
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
+ struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
u32 used_map_cnt; /* number of used maps */
+ u32 used_btf_cnt; /* number of used BTF objects */
u32 id_gen; /* used to generate unique reg IDs */
bool allow_ptr_leaks;
+ bool allow_uninit_stack;
bool allow_ptr_to_map_access;
bool bpf_capable;
bool bypass_spec_v1;
@@ -467,6 +471,8 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
int check_ctx_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno);
+int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ u32 regno, u32 mem_size);
/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index d0bd226d6bd9..c2c2147dfeb8 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -31,6 +31,7 @@
#define PHY_ID_BCM89610 0x03625cd0
#define PHY_ID_BCM72113 0x35905310
+#define PHY_ID_BCM72116 0x35905350
#define PHY_ID_BCM7250 0xae025280
#define PHY_ID_BCM7255 0xae025120
#define PHY_ID_BCM7260 0xae025190
@@ -60,19 +61,11 @@
#define PHY_BCM_OUI_5 0x03625e00
#define PHY_BCM_OUI_6 0xae025000
-#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
-#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
-#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010
-#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020
-#define PHY_BRCM_WIRESPEED_ENABLE 0x00000100
-#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000200
-#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400
-#define PHY_BRCM_STD_IBND_DISABLE 0x00000800
-#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000
-#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000
-#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
-#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
-#define PHY_BRCM_EN_MASTER_MODE 0x00010000
+#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000001
+#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000002
+#define PHY_BRCM_CLEAR_RGMII_MODE 0x00000004
+#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00000008
+#define PHY_BRCM_EN_MASTER_MODE 0x00000010
/* Broadcom BCM7xxx specific workarounds */
#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
@@ -136,6 +129,7 @@
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN 0x0080
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100
#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
@@ -197,6 +191,7 @@
#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
+#define BCM54XX_SHD_SCR3_RXCTXC_DIS 0x0100
/* 01010: Auto Power-Down */
#define BCM54XX_SHD_APD 0x0a
@@ -222,6 +217,9 @@
/* 11111: Mode Control Register */
#define BCM54XX_SHD_MODE 0x1f
#define BCM54XX_SHD_INTF_SEL_MASK GENMASK(2, 1) /* INTERF_SEL[1:0] */
+#define BCM54XX_SHD_INTF_SEL_RGMII 0x02
+#define BCM54XX_SHD_INTF_SEL_SGMII 0x04
+#define BCM54XX_SHD_INTF_SEL_GBIC 0x06
#define BCM54XX_SHD_MODE_1000BX BIT(0) /* Enable 1000-X registers */
/*
@@ -257,7 +255,6 @@
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0)
#define BCM54810_SHD_CLK_CTL 0x3
#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
-#define BCM54810_SHD_SCR3_TRDDAPD 0x0100
/* BCM54612E Registers */
#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34)
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 4c200f5d242b..7fabf1428093 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -91,6 +91,9 @@ int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
int btf_get_fd_by_id(u32 id);
u32 btf_obj_id(const struct btf *btf);
bool btf_is_kernel(const struct btf *btf);
+bool btf_is_module(const struct btf *btf);
+struct module *btf_try_get_module(const struct btf *btf);
+u32 btf_nr_types(const struct btf *btf);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
new file mode 100644
index 000000000000..40232f90db6e
--- /dev/null
+++ b/include/linux/buildid.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BUILDID_H
+#define _LINUX_BUILDID_H
+
+#include <linux/mm_types.h>
+
+#define BUILD_ID_SIZE_MAX 20
+
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
+ __u32 *size);
+
+#endif
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
new file mode 100644
index 000000000000..707575c668f4
--- /dev/null
+++ b/include/linux/can/bittiming.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _CAN_BITTIMING_H
+#define _CAN_BITTIMING_H
+
+#include <linux/netdevice.h>
+#include <linux/can/netlink.h>
+
+#define CAN_SYNC_SEG 1
+
+#ifdef CONFIG_CAN_CALC_BITTIMING
+int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc);
+#else /* !CONFIG_CAN_CALC_BITTIMING */
+static inline int
+can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc)
+{
+ netdev_err(dev, "bit-timing calculation not available\n");
+ return -EINVAL;
+}
+#endif /* CONFIG_CAN_CALC_BITTIMING */
+
+int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt);
+
+/*
+ * can_bit_time() - Duration of one bit
+ *
+ * Please refer to ISO 11898-1:2015, section 11.3.1.1 "Bit time" for
+ * additional information.
+ *
+ * Return: the number of time quanta in one bit.
+ */
+static inline unsigned int can_bit_time(const struct can_bittiming *bt)
+{
+ return CAN_SYNC_SEG + bt->prop_seg + bt->phase_seg1 + bt->phase_seg2;
+}
+
+#endif /* !_CAN_BITTIMING_H */
diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h
index 2f5d731ae251..8afa92d15a66 100644
--- a/include/linux/can/can-ml.h
+++ b/include/linux/can/can-ml.h
@@ -44,6 +44,7 @@
#include <linux/can.h>
#include <linux/list.h>
+#include <linux/netdevice.h>
#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
#define CAN_EFF_RCV_HASH_BITS 10
@@ -65,4 +66,15 @@ struct can_ml_priv {
#endif
};
+static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev)
+{
+ return netdev_get_ml_priv(dev, ML_PRIV_CAN);
+}
+
+static inline void can_set_ml_priv(struct net_device *dev,
+ struct can_ml_priv *ml_priv)
+{
+ netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN);
+}
+
#endif /* CAN_ML_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 197a79535cc2..ac4d83a1ab81 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -15,8 +15,10 @@
#define _CAN_DEV_H
#include <linux/can.h>
+#include <linux/can/bittiming.h>
#include <linux/can/error.h>
#include <linux/can/led.h>
+#include <linux/can/length.h>
#include <linux/can/netlink.h>
#include <linux/can/skb.h>
#include <linux/netdevice.h>
@@ -82,118 +84,6 @@ struct can_priv {
#endif
};
-#define CAN_SYNC_SEG 1
-
-/*
- * can_bit_time() - Duration of one bit
- *
- * Please refer to ISO 11898-1:2015, section 11.3.1.1 "Bit time" for
- * additional information.
- *
- * Return: the number of time quanta in one bit.
- */
-static inline unsigned int can_bit_time(const struct can_bittiming *bt)
-{
- return CAN_SYNC_SEG + bt->prop_seg + bt->phase_seg1 + bt->phase_seg2;
-}
-
-/*
- * can_cc_dlc2len(value) - convert a given data length code (dlc) of a
- * Classical CAN frame into a valid data length of max. 8 bytes.
- *
- * To be used in the CAN netdriver receive path to ensure conformance with
- * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
- */
-#define can_cc_dlc2len(dlc) (min_t(u8, (dlc), CAN_MAX_DLEN))
-
-/* Check for outgoing skbs that have not been created by the CAN subsystem */
-static inline bool can_skb_headroom_valid(struct net_device *dev,
- struct sk_buff *skb)
-{
- /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
- if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
- return false;
-
- /* af_packet does not apply CAN skb specific settings */
- if (skb->ip_summed == CHECKSUM_NONE) {
- /* init headroom */
- can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- /* perform proper loopback on capable devices */
- if (dev->flags & IFF_ECHO)
- skb->pkt_type = PACKET_LOOPBACK;
- else
- skb->pkt_type = PACKET_HOST;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
- }
-
- return true;
-}
-
-/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
-static inline bool can_dropped_invalid_skb(struct net_device *dev,
- struct sk_buff *skb)
-{
- const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
-
- if (skb->protocol == htons(ETH_P_CAN)) {
- if (unlikely(skb->len != CAN_MTU ||
- cfd->len > CAN_MAX_DLEN))
- goto inval_skb;
- } else if (skb->protocol == htons(ETH_P_CANFD)) {
- if (unlikely(skb->len != CANFD_MTU ||
- cfd->len > CANFD_MAX_DLEN))
- goto inval_skb;
- } else
- goto inval_skb;
-
- if (!can_skb_headroom_valid(dev, skb))
- goto inval_skb;
-
- return false;
-
-inval_skb:
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return true;
-}
-
-static inline bool can_is_canfd_skb(const struct sk_buff *skb)
-{
- /* the CAN specific type of skb is identified by its data length */
- return skb->len == CANFD_MTU;
-}
-
-/* helper to get the data length code (DLC) for Classical CAN raw DLC access */
-static inline u8 can_get_cc_dlc(const struct can_frame *cf, const u32 ctrlmode)
-{
- /* return len8_dlc as dlc value only if all conditions apply */
- if ((ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC) &&
- (cf->len == CAN_MAX_DLEN) &&
- (cf->len8_dlc > CAN_MAX_DLEN && cf->len8_dlc <= CAN_MAX_RAW_DLC))
- return cf->len8_dlc;
-
- /* return the payload length as dlc value */
- return cf->len;
-}
-
-/* helper to set len and len8_dlc value for Classical CAN raw DLC access */
-static inline void can_frame_set_cc_len(struct can_frame *cf, const u8 dlc,
- const u32 ctrlmode)
-{
- /* the caller already ensured that dlc is a value from 0 .. 15 */
- if (ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC && dlc > CAN_MAX_DLEN)
- cf->len8_dlc = dlc;
-
- /* limit the payload length 'len' to CAN_MAX_DLEN */
- cf->len = can_cc_dlc2len(dlc);
-}
/* helper to define static CAN controller features at device creation time */
static inline void can_set_static_ctrlmode(struct net_device *dev,
@@ -210,11 +100,7 @@ static inline void can_set_static_ctrlmode(struct net_device *dev,
dev->mtu = CANFD_MTU;
}
-/* get data length from raw data length code (DLC) */
-u8 can_fd_dlc2len(u8 dlc);
-
-/* map the sanitized data length to an appropriate data length code */
-u8 can_fd_len2dlc(u8 len);
+void can_setup(struct net_device *dev);
struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
unsigned int txqs, unsigned int rxqs);
@@ -237,26 +123,18 @@ void unregister_candev(struct net_device *dev);
int can_restart_now(struct net_device *dev);
void can_bus_off(struct net_device *dev);
+const char *can_get_state_str(const enum can_state state);
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state);
-int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx);
-struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
- u8 *len_ptr);
-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
-void can_free_echo_skb(struct net_device *dev, unsigned int idx);
-
#ifdef CONFIG_OF
void of_can_transceiver(struct net_device *dev);
#else
static inline void of_can_transceiver(struct net_device *dev) { }
#endif
-struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
-struct sk_buff *alloc_canfd_skb(struct net_device *dev,
- struct canfd_frame **cfd);
-struct sk_buff *alloc_can_err_skb(struct net_device *dev,
- struct can_frame **cf);
+extern struct rtnl_link_ops can_link_ops;
+int can_netlink_register(void);
+void can_netlink_unregister(void);
#endif /* !_CAN_DEV_H */
diff --git a/include/linux/can/length.h b/include/linux/can/length.h
new file mode 100644
index 000000000000..6995092b774e
--- /dev/null
+++ b/include/linux/can/length.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Oliver Hartkopp <socketcan@hartkopp.net>
+ * Copyright (C) 2020 Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _CAN_LENGTH_H
+#define _CAN_LENGTH_H
+
+/*
+ * Size of a Classical CAN Standard Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier 11
+ * Remote transmission request (RTR) 1
+ * Identifier extension bit (IDE) 1
+ * Reserved bit (r0) 1
+ * Data length code (DLC) 4
+ * Data field 0...64
+ * CRC 15
+ * CRC delimiter 1
+ * ACK slot 1
+ * ACK delimiter 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * rounded up and ignoring bitstuffing
+ */
+#define CAN_FRAME_OVERHEAD_SFF DIV_ROUND_UP(47, 8)
+
+/*
+ * Size of a Classical CAN Extended Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier A 11
+ * Substitute remote request (SRR) 1
+ * Identifier extension bit (IDE) 1
+ * Identifier B 18
+ * Remote transmission request (RTR) 1
+ * Reserved bits (r1, r0) 2
+ * Data length code (DLC) 4
+ * Data field 0...64
+ * CRC 15
+ * CRC delimiter 1
+ * ACK slot 1
+ * ACK delimiter 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * rounded up and ignoring bitstuffing
+ */
+#define CAN_FRAME_OVERHEAD_EFF DIV_ROUND_UP(67, 8)
+
+/*
+ * Size of a CAN-FD Standard Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier 11
+ * Reserved bit (r1) 1
+ * Identifier extension bit (IDE) 1
+ * Flexible data rate format (FDF) 1
+ * Reserved bit (r0) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ * Data field 0...512
+ * Stuff Bit Count (SBC) 0...16: 4 20...64:5
+ * CRC 0...16: 17 20...64:21
+ * CRC delimiter (CD) 1
+ * ACK slot (AS) 1
+ * ACK delimiter (AD) 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * assuming CRC21, rounded up and ignoring bitstuffing
+ */
+#define CANFD_FRAME_OVERHEAD_SFF DIV_ROUND_UP(61, 8)
+
+/*
+ * Size of a CAN-FD Extended Frame
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start-of-frame 1
+ * Identifier A 11
+ * Substitute remote request (SRR) 1
+ * Identifier extension bit (IDE) 1
+ * Identifier B 18
+ * Reserved bit (r1) 1
+ * Flexible data rate format (FDF) 1
+ * Reserved bit (r0) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ * Data field 0...512
+ * Stuff Bit Count (SBC) 0...16: 4 20...64:5
+ * CRC 0...16: 17 20...64:21
+ * CRC delimiter (CD) 1
+ * ACK slot (AS) 1
+ * ACK delimiter (AD) 1
+ * End-of-frame (EOF) 7
+ * Inter frame spacing 3
+ *
+ * assuming CRC21, rounded up and ignoring bitstuffing
+ */
+#define CANFD_FRAME_OVERHEAD_EFF DIV_ROUND_UP(80, 8)
+
+/*
+ * Maximum size of a Classical CAN frame
+ * (rounded up and ignoring bitstuffing)
+ */
+#define CAN_FRAME_LEN_MAX (CAN_FRAME_OVERHEAD_EFF + CAN_MAX_DLEN)
+
+/*
+ * Maximum size of a CAN-FD frame
+ * (rounded up and ignoring bitstuffing)
+ */
+#define CANFD_FRAME_LEN_MAX (CANFD_FRAME_OVERHEAD_EFF + CANFD_MAX_DLEN)
+
+/*
+ * can_cc_dlc2len(value) - convert a given data length code (dlc) of a
+ * Classical CAN frame into a valid data length of max. 8 bytes.
+ *
+ * To be used in the CAN netdriver receive path to ensure conformance with
+ * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
+ */
+#define can_cc_dlc2len(dlc) (min_t(u8, (dlc), CAN_MAX_DLEN))
+
+/* helper to get the data length code (DLC) for Classical CAN raw DLC access */
+static inline u8 can_get_cc_dlc(const struct can_frame *cf, const u32 ctrlmode)
+{
+ /* return len8_dlc as dlc value only if all conditions apply */
+ if ((ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC) &&
+ (cf->len == CAN_MAX_DLEN) &&
+ (cf->len8_dlc > CAN_MAX_DLEN && cf->len8_dlc <= CAN_MAX_RAW_DLC))
+ return cf->len8_dlc;
+
+ /* return the payload length as dlc value */
+ return cf->len;
+}
+
+/* helper to set len and len8_dlc value for Classical CAN raw DLC access */
+static inline void can_frame_set_cc_len(struct can_frame *cf, const u8 dlc,
+ const u32 ctrlmode)
+{
+ /* the caller already ensured that dlc is a value from 0 .. 15 */
+ if (ctrlmode & CAN_CTRLMODE_CC_LEN8_DLC && dlc > CAN_MAX_DLEN)
+ cf->len8_dlc = dlc;
+
+ /* limit the payload length 'len' to CAN_MAX_DLEN */
+ cf->len = can_cc_dlc2len(dlc);
+}
+
+/* get data length from raw data length code (DLC) */
+u8 can_fd_dlc2len(u8 dlc);
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_fd_len2dlc(u8 len);
+
+/* calculate the CAN Frame length in bytes of a given skb */
+unsigned int can_skb_get_frame_len(const struct sk_buff *skb);
+
+/* map the data length to an appropriate data link layer length */
+static inline u8 canfd_sanitize_len(u8 len)
+{
+ return can_fd_dlc2len(can_fd_len2dlc(len));
+}
+
+#endif /* !_CAN_LENGTH_H */
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index f1b38088b765..40882df7105e 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -44,7 +44,8 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp);
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
- unsigned int idx, u32 timestamp);
+ unsigned int idx, u32 timestamp,
+ unsigned int *frame_len_ptr);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb);
void can_rx_offload_del(struct can_rx_offload *offload);
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index fc61cf4eff1c..685f34cfba20 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -16,6 +16,20 @@
#include <linux/can.h>
#include <net/sock.h>
+void can_flush_echo_skb(struct net_device *dev);
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx, unsigned int frame_len);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ u8 *len_ptr, unsigned int *frame_len_ptr);
+unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *frame_len_ptr);
+void can_free_echo_skb(struct net_device *dev, unsigned int idx);
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd);
+struct sk_buff *alloc_can_err_skb(struct net_device *dev,
+ struct can_frame **cf);
+
/*
* The struct can_skb_priv is used to transport additional information along
* with the stored struct can(fd)_frame that can not be contained in existing
@@ -29,11 +43,13 @@
* struct can_skb_priv - private additional data inside CAN sk_buffs
* @ifindex: ifindex of the first interface the CAN frame appeared on
* @skbcnt: atomic counter to have an unique id together with skb pointer
+ * @frame_len: length of CAN frame in data link layer
* @cf: align to the following CAN frame at skb->data
*/
struct can_skb_priv {
int ifindex;
int skbcnt;
+ unsigned int frame_len;
struct can_frame cf[];
};
@@ -74,4 +90,68 @@ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
return nskb;
}
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static inline bool can_skb_headroom_valid(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+ if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+ return false;
+
+ /* af_packet does not apply CAN skb specific settings */
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ /* init headroom */
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* perform proper loopback on capable devices */
+ if (dev->flags & IFF_ECHO)
+ skb->pkt_type = PACKET_LOOPBACK;
+ else
+ skb->pkt_type = PACKET_HOST;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ }
+
+ return true;
+}
+
+/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
+static inline bool can_dropped_invalid_skb(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (skb->protocol == htons(ETH_P_CAN)) {
+ if (unlikely(skb->len != CAN_MTU ||
+ cfd->len > CAN_MAX_DLEN))
+ goto inval_skb;
+ } else if (skb->protocol == htons(ETH_P_CANFD)) {
+ if (unlikely(skb->len != CANFD_MTU ||
+ cfd->len > CANFD_MAX_DLEN))
+ goto inval_skb;
+ } else
+ goto inval_skb;
+
+ if (!can_skb_headroom_valid(dev, skb))
+ goto inval_skb;
+
+ return false;
+
+inval_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+}
+
+static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+{
+ /* the CAN specific type of skb is identified by its data length */
+ return skb->len == CANFD_MTU;
+}
+
#endif /* !_CAN_SKB_H */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index b2f698915c0f..65efb74c3585 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -247,8 +247,11 @@ static inline bool ns_capable_setid(struct user_namespace *ns, int cap)
return true;
}
#endif /* CONFIG_MULTIUSER */
-extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode);
-extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
+bool privileged_wrt_inode_uidgid(struct user_namespace *ns,
+ struct user_namespace *mnt_userns,
+ const struct inode *inode);
+bool capable_wrt_inode_uidgid(struct user_namespace *mnt_userns,
+ const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
static inline bool perfmon_capable(void)
@@ -268,8 +271,11 @@ static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns)
}
/* audit system wants to get cap info from files as well */
-extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
+int get_vfs_caps_from_disk(struct user_namespace *mnt_userns,
+ const struct dentry *dentry,
+ struct cpu_vfs_cap_data *cpu_caps);
-extern int cap_convert_nscap(struct dentry *dentry, const void **ivalue, size_t size);
+int cap_convert_nscap(struct user_namespace *mnt_userns, struct dentry *dentry,
+ const void **ivalue, size_t size);
#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index eb9008bb3992..409d8c29bc4f 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -32,10 +32,9 @@
#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes (msgr1) */
-#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */
-#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
-#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs (msgr1) */
-#define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */
+#define CEPH_OPT_TCP_NODELAY (1<<4) /* TCP_NODELAY on TCP sockets */
+#define CEPH_OPT_NOMSGSIGN (1<<5) /* don't sign msgs (msgr1) */
+#define CEPH_OPT_ABORT_ON_FULL (1<<6) /* abort w/ ENOSPC when full */
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h
index 4060004968c8..6617d9c68d86 100644
--- a/include/linux/cfag12864b.h
+++ b/include/linux/cfag12864b.h
@@ -4,7 +4,7 @@
* Version: 0.1.0
* Description: cfag12864b LCD driver header
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-12
*/
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 451c2d26a5db..4f2f79de083e 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -307,7 +307,7 @@ void css_task_iter_end(struct css_task_iter *it);
* Inline functions.
*/
-static inline u64 cgroup_id(struct cgroup *cgrp)
+static inline u64 cgroup_id(const struct cgroup *cgrp)
{
return cgrp->kn->id;
}
@@ -701,7 +701,7 @@ void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
struct cgroup_subsys_state;
struct cgroup;
-static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; }
+static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
static inline void css_get(struct cgroup_subsys_state *css) {}
static inline void css_put(struct cgroup_subsys_state *css) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index e4316890661a..58f6fe866ae9 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -941,7 +941,9 @@ struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div);
void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
-
+struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div);
/**
* struct clk_fractional_divider - adjustable fractional divider clock
*
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 31ff1bf1b79f..266e8de3cb51 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -92,7 +92,7 @@ struct clk_bulk_data {
#ifdef CONFIG_COMMON_CLK
/**
- * clk_notifier_register: register a clock rate-change notifier callback
+ * clk_notifier_register - register a clock rate-change notifier callback
* @clk: clock whose rate we are interested in
* @nb: notifier block with callback function pointer
*
@@ -103,7 +103,7 @@ struct clk_bulk_data {
int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
/**
- * clk_notifier_unregister: unregister a clock rate-change notifier callback
+ * clk_notifier_unregister - unregister a clock rate-change notifier callback
* @clk: clock whose rate we are no longer interested in
* @nb: notifier block which will be unregistered
*/
@@ -238,6 +238,7 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
#endif
+#ifdef CONFIG_HAVE_CLK_PREPARE
/**
* clk_prepare - prepare a clock source
* @clk: clock source
@@ -246,10 +247,26 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
*
* Must not be called from within atomic context.
*/
-#ifdef CONFIG_HAVE_CLK_PREPARE
int clk_prepare(struct clk *clk);
int __must_check clk_bulk_prepare(int num_clks,
const struct clk_bulk_data *clks);
+
+/**
+ * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
+ * @clk: clock source
+ *
+ * Returns true if clk_prepare() implicitly enables the clock, effectively
+ * making clk_enable()/clk_disable() no-ops, false otherwise.
+ *
+ * This is of interest mainly to the power management code where actually
+ * disabling the clock also requires unpreparing it to have any material
+ * effect.
+ *
+ * Regardless of the value returned here, the caller must always invoke
+ * clk_enable() or clk_prepare_enable() and counterparts for usage counts
+ * to be right.
+ */
+bool clk_is_enabled_when_prepared(struct clk *clk);
#else
static inline int clk_prepare(struct clk *clk)
{
@@ -263,6 +280,11 @@ clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks)
might_sleep();
return 0;
}
+
+static inline bool clk_is_enabled_when_prepared(struct clk *clk)
+{
+ return false;
+}
#endif
/**
diff --git a/include/linux/clk/imx.h b/include/linux/clk/imx.h
new file mode 100644
index 000000000000..75a0d9696552
--- /dev/null
+++ b/include/linux/clk/imx.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Freescale Semiconductor, Inc.
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ */
+
+#ifndef __LINUX_CLK_IMX_H
+#define __LINUX_CLK_IMX_H
+
+#include <linux/types.h>
+
+void imx6sl_set_wait_clk(bool enter);
+
+#endif
diff --git a/include/linux/clk/spear.h b/include/linux/clk/spear.h
new file mode 100644
index 000000000000..a64d034ceddd
--- /dev/null
+++ b/include/linux/clk/spear.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 STMicroelectronics - All Rights Reserved
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ */
+
+#ifndef __LINUX_CLK_SPEAR_H
+#define __LINUX_CLK_SPEAR_H
+
+#ifdef CONFIG_MACH_SPEAR1310
+void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base);
+#else
+static inline void spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base) {}
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+void __init spear1340_clk_init(void __iomem *misc_base);
+#else
+static inline void spear1340_clk_init(void __iomem *misc_base) {}
+#endif
+
+#endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 3f01d43f0598..eb016fc9cc0b 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -136,6 +136,7 @@ extern void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value);
extern void tegra210_clk_emc_update_setting(u32 emc_src_value);
struct clk;
+struct tegra_emc;
typedef long (tegra20_clk_emc_round_cb)(unsigned long rate,
unsigned long min_rate,
@@ -146,6 +147,13 @@ void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
void *cb_arg);
int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same);
+typedef int (tegra124_emc_prepare_timing_change_cb)(struct tegra_emc *emc,
+ unsigned long rate);
+typedef void (tegra124_emc_complete_timing_change_cb)(struct tegra_emc *emc,
+ unsigned long rate);
+void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
+ tegra124_emc_complete_timing_change_cb *complete_cb);
+
struct tegra210_clk_emc_config {
unsigned long rate;
bool same_freq;
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 98cff1b4b088..04c0a5a717f7 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -3,16 +3,6 @@
#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
#endif
-#define CLANG_VERSION (__clang_major__ * 10000 \
- + __clang_minor__ * 100 \
- + __clang_patchlevel__)
-
-#if CLANG_VERSION < 100001
-#ifndef __BPF_TRACING__
-# error Sorry, your version of Clang is too old - please use 10.0.1 or newer.
-#endif
-#endif
-
/* Compiler specific definitions for Clang compiler */
/* same as gcc, this was present in clang-2.6 so we can assume it works
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 555ab0fddbef..48750243db4c 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -10,17 +10,6 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
-/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */
-#if GCC_VERSION < 40900
-# error Sorry, your version of GCC is too old - please use 4.9 or newer.
-#elif defined(CONFIG_ARM64) && GCC_VERSION < 50100
-/*
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293
- * https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk
- */
-# error Sorry, your version of GCC is too old - please use 5.1 or newer.
-#endif
-
/*
* This macro obfuscates arithmetic on a variable address so that gcc
* shouldn't recognize the original var, and make assumptions about it.
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index b8fe0c23cfff..df5b405e6305 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -76,6 +76,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#else
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
+# define likely_notrace(x) likely(x)
+# define unlikely_notrace(x) unlikely(x)
#endif
/* Optimization barrier */
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index ea5e04e75845..c043b8d2b17b 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -211,6 +211,12 @@
#endif
/*
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#Common-Function-Attributes
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#flatten
+ */
+# define __flatten __attribute__((flatten))
+
+/*
* Note the missing underscores.
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 8ea860efea37..487350bb19c3 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -99,7 +99,7 @@ void cn_del_callback(const struct cb_id *id);
int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask);
/**
- * cn_netlink_send_mult - Sends message to the specified groups.
+ * cn_netlink_send - Sends message to the specified groups.
*
* @msg: message header(with attached data).
* @portid: destination port.
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index b0e35eec6499..4ac5c081af93 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -10,17 +10,27 @@
#define CORESIGHT_ETM_PMU_NAME "cs_etm"
#define CORESIGHT_ETM_PMU_SEED 0x10
-/* ETMv3.5/PTM's ETMCR config bit */
-#define ETM_OPT_CYCACC 12
-#define ETM_OPT_CTXTID 14
-#define ETM_OPT_TS 28
-#define ETM_OPT_RETSTK 29
+/*
+ * Below are the definition of bit offsets for perf option, and works as
+ * arbitrary values for all ETM versions.
+ *
+ * Most of them are orignally from ETMv3.5/PTM's ETMCR config, therefore,
+ * ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and
+ * directly use below macros as config bits.
+ */
+#define ETM_OPT_CYCACC 12
+#define ETM_OPT_CTXTID 14
+#define ETM_OPT_CTXTID2 15
+#define ETM_OPT_TS 28
+#define ETM_OPT_RETSTK 29
/* ETMv4 CONFIGR programming bits for the ETM OPTs */
#define ETM4_CFG_BIT_CYCACC 4
#define ETM4_CFG_BIT_CTXTID 6
+#define ETM4_CFG_BIT_VMID 7
#define ETM4_CFG_BIT_TS 11
#define ETM4_CFG_BIT_RETSTK 12
+#define ETM4_CFG_BIT_VMID_OPT 15
static inline int coresight_get_trace_id(int cpu)
{
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 7d3c87e5b97c..976ec2697610 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -7,6 +7,7 @@
#define _LINUX_CORESIGHT_H
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/perf_event.h>
#include <linux/sched.h>
@@ -115,6 +116,32 @@ struct coresight_platform_data {
};
/**
+ * struct csdev_access - Abstraction of a CoreSight device access.
+ *
+ * @io_mem : True if the device has memory mapped I/O
+ * @base : When io_mem == true, base address of the component
+ * @read : Read from the given "offset" of the given instance.
+ * @write : Write "val" to the given "offset".
+ */
+struct csdev_access {
+ bool io_mem;
+ union {
+ void __iomem *base;
+ struct {
+ u64 (*read)(u32 offset, bool relaxed, bool _64bit);
+ void (*write)(u64 val, u32 offset, bool relaxed,
+ bool _64bit);
+ };
+ };
+};
+
+#define CSDEV_ACCESS_IOMEM(_addr) \
+ ((struct csdev_access) { \
+ .io_mem = true, \
+ .base = (_addr), \
+ })
+
+/**
* struct coresight_desc - description of a component required from drivers
* @type: as defined by @coresight_dev_type.
* @subtype: as defined by @coresight_dev_subtype.
@@ -125,6 +152,7 @@ struct coresight_platform_data {
* @groups: operations specific to this component. These will end up
* in the component's sysfs sub-directory.
* @name: name for the coresight device, also shown under sysfs.
+ * @access: Describe access to the device
*/
struct coresight_desc {
enum coresight_dev_type type;
@@ -134,6 +162,7 @@ struct coresight_desc {
struct device *dev;
const struct attribute_group **groups;
const char *name;
+ struct csdev_access access;
};
/**
@@ -173,7 +202,8 @@ struct coresight_sysfs_link {
* @type: as defined by @coresight_dev_type.
* @subtype: as defined by @coresight_dev_subtype.
* @ops: generic operations for this component, as defined
- by @coresight_ops.
+ * by @coresight_ops.
+ * @access: Device i/o access abstraction for this device.
* @dev: The device entity associated to this component.
* @refcnt: keep track of what is in use.
* @orphan: true if the component has connections that haven't been linked.
@@ -195,6 +225,7 @@ struct coresight_device {
enum coresight_dev_type type;
union coresight_dev_subtype subtype;
const struct coresight_ops *ops;
+ struct csdev_access access;
struct device dev;
atomic_t *refcnt;
bool orphan;
@@ -326,23 +357,133 @@ struct coresight_ops {
};
#if IS_ENABLED(CONFIG_CORESIGHT)
+
+static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
+ u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readl_relaxed(csa->base + offset);
+
+ return csa->read(offset, true, false);
+}
+
+static inline u32 csdev_access_read32(struct csdev_access *csa, u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readl(csa->base + offset);
+
+ return csa->read(offset, false, false);
+}
+
+static inline void csdev_access_relaxed_write32(struct csdev_access *csa,
+ u32 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writel_relaxed(val, csa->base + offset);
+ else
+ csa->write(val, offset, true, false);
+}
+
+static inline void csdev_access_write32(struct csdev_access *csa, u32 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writel(val, csa->base + offset);
+ else
+ csa->write(val, offset, false, false);
+}
+
+#ifdef CONFIG_64BIT
+
+static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa,
+ u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readq_relaxed(csa->base + offset);
+
+ return csa->read(offset, true, true);
+}
+
+static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset)
+{
+ if (likely(csa->io_mem))
+ return readq(csa->base + offset);
+
+ return csa->read(offset, false, true);
+}
+
+static inline void csdev_access_relaxed_write64(struct csdev_access *csa,
+ u64 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writeq_relaxed(val, csa->base + offset);
+ else
+ csa->write(val, offset, true, true);
+}
+
+static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset)
+{
+ if (likely(csa->io_mem))
+ writeq(val, csa->base + offset);
+ else
+ csa->write(val, offset, false, true);
+}
+
+#else /* !CONFIG_64BIT */
+
+static inline u64 csdev_access_relaxed_read64(struct csdev_access *csa,
+ u32 offset)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline u64 csdev_access_read64(struct csdev_access *csa, u32 offset)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline void csdev_access_relaxed_write64(struct csdev_access *csa,
+ u64 val, u32 offset)
+{
+ WARN_ON(1);
+}
+
+static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 offset)
+{
+ WARN_ON(1);
+}
+#endif /* CONFIG_64BIT */
+
extern struct coresight_device *
coresight_register(struct coresight_desc *desc);
extern void coresight_unregister(struct coresight_device *csdev);
extern int coresight_enable(struct coresight_device *csdev);
extern void coresight_disable(struct coresight_device *csdev);
-extern int coresight_timeout(void __iomem *addr, u32 offset,
+extern int coresight_timeout(struct csdev_access *csa, u32 offset,
int position, int value);
-extern int coresight_claim_device(void __iomem *base);
-extern int coresight_claim_device_unlocked(void __iomem *base);
+extern int coresight_claim_device(struct coresight_device *csdev);
+extern int coresight_claim_device_unlocked(struct coresight_device *csdev);
-extern void coresight_disclaim_device(void __iomem *base);
-extern void coresight_disclaim_device_unlocked(void __iomem *base);
+extern void coresight_disclaim_device(struct coresight_device *csdev);
+extern void coresight_disclaim_device_unlocked(struct coresight_device *csdev);
extern char *coresight_alloc_device_name(struct coresight_dev_list *devs,
struct device *dev);
extern bool coresight_loses_context_with_cpu(struct device *dev);
+
+u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset);
+u32 coresight_read32(struct coresight_device *csdev, u32 offset);
+void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset);
+void coresight_relaxed_write32(struct coresight_device *csdev,
+ u32 val, u32 offset);
+u64 coresight_relaxed_read64(struct coresight_device *csdev, u32 offset);
+u64 coresight_read64(struct coresight_device *csdev, u32 offset);
+void coresight_relaxed_write64(struct coresight_device *csdev,
+ u64 val, u32 offset);
+void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset);
+
#else
static inline struct coresight_device *
coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -350,29 +491,78 @@ static inline void coresight_unregister(struct coresight_device *csdev) {}
static inline int
coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
static inline void coresight_disable(struct coresight_device *csdev) {}
-static inline int coresight_timeout(void __iomem *addr, u32 offset,
- int position, int value) { return 1; }
-static inline int coresight_claim_device_unlocked(void __iomem *base)
+
+static inline int coresight_timeout(struct csdev_access *csa, u32 offset,
+ int position, int value)
+{
+ return 1;
+}
+
+static inline int coresight_claim_device_unlocked(struct coresight_device *csdev)
{
return -EINVAL;
}
-static inline int coresight_claim_device(void __iomem *base)
+static inline int coresight_claim_device(struct coresight_device *csdev)
{
return -EINVAL;
}
-static inline void coresight_disclaim_device(void __iomem *base) {}
-static inline void coresight_disclaim_device_unlocked(void __iomem *base) {}
+static inline void coresight_disclaim_device(struct coresight_device *csdev) {}
+static inline void coresight_disclaim_device_unlocked(struct coresight_device *csdev) {}
static inline bool coresight_loses_context_with_cpu(struct device *dev)
{
return false;
}
-#endif
+
+static inline u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline u32 coresight_read32(struct coresight_device *csdev, u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset)
+{
+}
+
+static inline void coresight_relaxed_write32(struct coresight_device *csdev,
+ u32 val, u32 offset)
+{
+}
+
+static inline u64 coresight_relaxed_read64(struct coresight_device *csdev,
+ u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline u64 coresight_read64(struct coresight_device *csdev, u32 offset)
+{
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static inline void coresight_relaxed_write64(struct coresight_device *csdev,
+ u64 val, u32 offset)
+{
+}
+
+static inline void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_CORESIGHT) */
extern int coresight_get_cpu(struct device *dev);
struct coresight_platform_data *coresight_get_platform_data(struct device *dev);
-#endif
+#endif /* _LINUX_COREISGHT_H */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index d6428aaf67e7..3aaa0687e8df 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -111,6 +111,8 @@ static inline void cpu_maps_update_done(void)
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
+extern int lockdep_is_cpus_held(void);
+
#ifdef CONFIG_HOTPLUG_CPU
extern void cpus_write_lock(void);
extern void cpus_write_unlock(void);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 9c8b7437b6cd..353969c7acd3 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -387,14 +387,22 @@ struct cpufreq_driver {
/* flags */
-/* driver isn't removed even if all ->init() calls failed */
-#define CPUFREQ_STICKY BIT(0)
+/*
+ * Set by drivers that need to update internale upper and lower boundaries along
+ * with the target frequency and so the core and governors should also invoke
+ * the diver if the target frequency does not change, but the policy min or max
+ * may have changed.
+ */
+#define CPUFREQ_NEED_UPDATE_LIMITS BIT(0)
/* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */
#define CPUFREQ_CONST_LOOPS BIT(1)
-/* don't warn on suspend/resume speed mismatches */
-#define CPUFREQ_PM_NO_WARN BIT(2)
+/*
+ * Set by drivers that want the core to automatically register the cpufreq
+ * driver as a thermal cooling device.
+ */
+#define CPUFREQ_IS_COOLING_DEV BIT(2)
/*
* This should be set by platforms having multiple clock-domains, i.e.
@@ -426,20 +434,6 @@ struct cpufreq_driver {
*/
#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
-/*
- * Set by drivers that want the core to automatically register the cpufreq
- * driver as a thermal cooling device.
- */
-#define CPUFREQ_IS_COOLING_DEV BIT(7)
-
-/*
- * Set by drivers that need to update internale upper and lower boundaries along
- * with the target frequency and so the core and governors should also invoke
- * the diver if the target frequency does not change, but the policy min or max
- * may have changed.
- */
-#define CPUFREQ_NEED_UPDATE_LIMITS BIT(8)
-
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 0042ef362511..f14adb882338 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -168,6 +168,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_CQM_ONLINE,
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
+ CPUHP_AP_PERF_S390_CFD_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
@@ -185,6 +186,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
+ CPUHP_AP_PERF_CSKY_ONLINE,
CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
@@ -193,6 +195,7 @@ enum cpuhp_state {
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
CPUHP_AP_X86_HPET_ONLINE,
CPUHP_AP_X86_KVM_CLK_ONLINE,
+ CPUHP_AP_DTPM_CPU_ONLINE,
CPUHP_AP_ACTIVE,
CPUHP_ONLINE,
};
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 18639c069263..4c6350503697 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -25,7 +25,7 @@ struct inode;
struct group_info {
atomic_t usage;
int ngroups;
- kgid_t gid[0];
+ kgid_t gid[];
} __randomize_layout;
/**
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index ef90e07c9635..da5e0d74bb2f 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -151,9 +151,12 @@
* The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
* declaration) is used to ensure that the crypto_tfm context structure is
* aligned correctly for the given architecture so that there are no alignment
- * faults for C data types. In particular, this is required on platforms such
- * as arm where pointers are 32-bit aligned but there are data types such as
- * u64 which require 64-bit alignment.
+ * faults for C data types. On architectures that support non-cache coherent
+ * DMA, such as ARM or arm64, it also takes into account the minimal alignment
+ * that is required to ensure that the context struct member does not share any
+ * cachelines with the rest of the struct. This is needed to ensure that cache
+ * maintenance for non-coherent DMA (cache invalidation in particular) does not
+ * affect data that may be accessed by the CPU concurrently.
*/
#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
@@ -636,10 +639,6 @@ struct crypto_tfm {
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
};
-struct crypto_cipher {
- struct crypto_tfm base;
-};
-
struct crypto_comp {
struct crypto_tfm base;
};
@@ -743,165 +742,6 @@ static inline unsigned int crypto_tfm_ctx_alignment(void)
return __alignof__(tfm->__crt_ctx);
}
-/**
- * DOC: Single Block Cipher API
- *
- * The single block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
- *
- * Using the single block cipher API calls, operations with the basic cipher
- * primitive can be implemented. These cipher primitives exclude any block
- * chaining operations including IV handling.
- *
- * The purpose of this single block cipher API is to support the implementation
- * of templates or other concepts that only need to perform the cipher operation
- * on one block at a time. Templates invoke the underlying cipher primitive
- * block-wise and process either the input or the output data of these cipher
- * operations.
- */
-
-static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_cipher *)tfm;
-}
-
-/**
- * crypto_alloc_cipher() - allocate single block cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * single block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for a single block cipher. The returned struct
- * crypto_cipher is the cipher handle that is required for any subsequent API
- * invocation for that single block cipher.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
- u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
-}
-
-static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_cipher() - zeroize and free the single block cipher handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_cipher(struct crypto_cipher *tfm)
-{
- crypto_free_tfm(crypto_cipher_tfm(tfm));
-}
-
-/**
- * crypto_has_cipher() - Search for the availability of a single block cipher
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * single block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the single block cipher is known to the kernel crypto API;
- * false otherwise
- */
-static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
-}
-
-/**
- * crypto_cipher_blocksize() - obtain block size for cipher
- * @tfm: cipher handle
- *
- * The block size for the single block cipher referenced with the cipher handle
- * tfm is returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
-}
-
-static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
-}
-
-static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
-}
-
-static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
-}
-
-static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_cipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the single block cipher referenced by the
- * cipher handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-int crypto_cipher_setkey(struct crypto_cipher *tfm,
- const u8 *key, unsigned int keylen);
-
-/**
- * crypto_cipher_encrypt_one() - encrypt one block of plaintext
- * @tfm: cipher handle
- * @dst: points to the buffer that will be filled with the ciphertext
- * @src: buffer holding the plaintext to be encrypted
- *
- * Invoke the encryption operation of one block. The caller must ensure that
- * the plaintext and ciphertext buffers are at least one block in size.
- */
-void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src);
-
-/**
- * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
- * @tfm: cipher handle
- * @dst: points to the buffer that will be filled with the plaintext
- * @src: buffer holding the ciphertext to be decrypted
- *
- * Invoke the decryption operation of one block. The caller must ensure that
- * the plaintext and ciphertext buffers are at least one block in size.
- */
-void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src);
-
static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
{
return (struct crypto_comp *)tfm;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index d7b369fc15d3..c1e48014106f 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -262,6 +262,8 @@ extern void d_tmpfile(struct dentry *, struct inode *);
extern struct dentry *d_find_alias(struct inode *);
extern void d_prune_aliases(struct inode *);
+extern struct dentry *d_find_alias_rcu(struct inode *);
+
/* test whether we have any submounts in a subdir tree */
extern int path_has_submounts(const struct path *);
diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h
deleted file mode 100644
index ddfdac20cad0..000000000000
--- a/include/linux/dcookies.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * dcookies.h
- *
- * Persistent cookie-path mappings
- *
- * Copyright 2002 John Levon <levon@movementarian.org>
- */
-
-#ifndef DCOOKIES_H
-#define DCOOKIES_H
-
-
-#ifdef CONFIG_PROFILING
-
-#include <linux/dcache.h>
-#include <linux/types.h>
-
-struct dcookie_user;
-struct path;
-
-/**
- * dcookie_register - register a user of dcookies
- *
- * Register as a dcookie user. Returns %NULL on failure.
- */
-struct dcookie_user * dcookie_register(void);
-
-/**
- * dcookie_unregister - unregister a user of dcookies
- *
- * Unregister as a dcookie user. This may invalidate
- * any dcookie values returned from get_dcookie().
- */
-void dcookie_unregister(struct dcookie_user * user);
-
-/**
- * get_dcookie - acquire a dcookie
- *
- * Convert the given dentry/vfsmount pair into
- * a cookie value.
- *
- * Returns -EINVAL if no living task has registered as a
- * dcookie user.
- *
- * Returns 0 on success, with *cookie filled in
- */
-int get_dcookie(const struct path *path, unsigned long *cookie);
-
-#else
-
-static inline struct dcookie_user * dcookie_register(void)
-{
- return NULL;
-}
-
-static inline void dcookie_unregister(struct dcookie_user * user)
-{
- return;
-}
-
-static inline int get_dcookie(const struct path *path, unsigned long *cookie)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_PROFILING */
-
-#endif /* DCOOKIES_H */
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index b6d3bae1c74d..26ea0850be9b 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -137,6 +137,7 @@ struct devfreq_stats {
* using devfreq.
* @profile: device-specific devfreq profile
* @governor: method how to choose frequency based on the usage.
+ * @opp_table: Reference to OPP table of dev.parent, if one exists.
* @nb: notifier block used to notify devfreq object that it should
* reevaluate operable frequencies. Devfreq users may use
* devfreq.nb to the corresponding register notifier call chain.
@@ -173,6 +174,7 @@ struct devfreq {
struct device dev;
struct devfreq_dev_profile *profile;
const struct devfreq_governor *governor;
+ struct opp_table *opp_table;
struct notifier_block nb;
struct delayed_work work;
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 61a66fb8ebb3..7f4ac87c0b32 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -93,9 +93,18 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
+#ifdef CONFIG_BLK_DEV_ZONED
typedef int (*dm_report_zones_fn) (struct dm_target *ti,
struct dm_report_zones_args *args,
unsigned int nr_zones);
+#else
+/*
+ * Define dm_report_zones_fn so that targets can assign to NULL if
+ * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
+ * awkward #ifdefs in their target_type, etc.
+ */
+typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
+#endif
/*
* These iteration functions are typically used to check (and combine)
@@ -187,9 +196,7 @@ struct target_type {
dm_status_fn status;
dm_message_fn message;
dm_prepare_ioctl_fn prepare_ioctl;
-#ifdef CONFIG_BLK_DEV_ZONED
dm_report_zones_fn report_zones;
-#endif
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
@@ -248,8 +255,13 @@ struct target_type {
/*
* Indicates that a target supports host-managed zoned block devices.
*/
+#ifdef CONFIG_BLK_DEV_ZONED
#define DM_TARGET_ZONED_HM 0x00000040
#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
+#else
+#define DM_TARGET_ZONED_HM 0x00000000
+#define dm_target_supports_zoned_hm(type) (false)
+#endif
/*
* A target handles REQ_NOWAIT
@@ -257,6 +269,12 @@ struct target_type {
#define DM_TARGET_NOWAIT 0x00000080
#define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
+/*
+ * A target supports passing through inline crypto support.
+ */
+#define DM_TARGET_PASSES_CRYPTO 0x00000100
+#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
+
struct dm_target {
struct dm_table *table;
struct target_type *type;
@@ -325,6 +343,11 @@ struct dm_target {
* whether or not its underlying devices have support.
*/
bool discards_supported:1;
+
+ /*
+ * Set if we need to limit the number of in-flight bios when swapping.
+ */
+ bool limit_swap_bios:1;
};
void *dm_per_bio_data(struct bio *bio, size_t data_size);
@@ -534,6 +557,11 @@ struct dm_table *dm_swap_table(struct mapped_device *md,
struct dm_table *t);
/*
+ * Table keyslot manager functions
+ */
+void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm);
+
+/*
* A wrapper around vmalloc.
*/
void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
diff --git a/include/linux/device.h b/include/linux/device.h
index 1779f90eeb4c..ba660731bd25 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -291,6 +291,7 @@ struct device_dma_parameters {
* sg limitations.
*/
unsigned int max_segment_size;
+ unsigned int min_align_mask;
unsigned long segment_boundary_mask;
};
@@ -323,6 +324,7 @@ enum device_link_state {
* AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
* MANAGED: The core tracks presence of supplier/consumer drivers (internal).
* SYNC_STATE_ONLY: Link only affects sync_state() behavior.
+ * INFERRED: Inferred from data (eg: firmware) and not from driver actions.
*/
#define DL_FLAG_STATELESS BIT(0)
#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
@@ -332,6 +334,7 @@ enum device_link_state {
#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5)
#define DL_FLAG_MANAGED BIT(6)
#define DL_FLAG_SYNC_STATE_ONLY BIT(7)
+#define DL_FLAG_INFERRED BIT(8)
/**
* enum dl_dev_state - Device driver presence tracking information.
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index ee7ba5b5417e..a498ebcf4993 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -75,7 +75,7 @@ enum probe_type {
* @resume: Called to bring a device from sleep mode.
* @groups: Default attributes that get created by the driver core
* automatically.
- * @dev_groups: Additional attributes attached to device instance once the
+ * @dev_groups: Additional attributes attached to device instance once
* it is bound to the driver.
* @pm: Power management operations of the device which matched
* this driver.
diff --git a/include/linux/dfl.h b/include/linux/dfl.h
new file mode 100644
index 000000000000..6cc10982351a
--- /dev/null
+++ b/include/linux/dfl.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for DFL driver and device API
+ *
+ * Copyright (C) 2020 Intel Corporation, Inc.
+ */
+
+#ifndef __LINUX_DFL_H
+#define __LINUX_DFL_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * enum dfl_id_type - define the DFL FIU types
+ */
+enum dfl_id_type {
+ FME_ID = 0,
+ PORT_ID = 1,
+ DFL_ID_MAX,
+};
+
+/**
+ * struct dfl_device - represent an dfl device on dfl bus
+ *
+ * @dev: generic device interface.
+ * @id: id of the dfl device.
+ * @type: type of DFL FIU of the device. See enum dfl_id_type.
+ * @feature_id: feature identifier local to its DFL FIU type.
+ * @mmio_res: mmio resource of this dfl device.
+ * @irqs: list of Linux IRQ numbers of this dfl device.
+ * @num_irqs: number of IRQs supported by this dfl device.
+ * @cdev: pointer to DFL FPGA container device this dfl device belongs to.
+ * @id_entry: matched id entry in dfl driver's id table.
+ */
+struct dfl_device {
+ struct device dev;
+ int id;
+ u16 type;
+ u16 feature_id;
+ struct resource mmio_res;
+ int *irqs;
+ unsigned int num_irqs;
+ struct dfl_fpga_cdev *cdev;
+ const struct dfl_device_id *id_entry;
+};
+
+/**
+ * struct dfl_driver - represent an dfl device driver
+ *
+ * @drv: driver model structure.
+ * @id_table: pointer to table of device IDs the driver is interested in.
+ * { } member terminated.
+ * @probe: mandatory callback for device binding.
+ * @remove: callback for device unbinding.
+ */
+struct dfl_driver {
+ struct device_driver drv;
+ const struct dfl_device_id *id_table;
+
+ int (*probe)(struct dfl_device *dfl_dev);
+ void (*remove)(struct dfl_device *dfl_dev);
+};
+
+#define to_dfl_dev(d) container_of(d, struct dfl_device, dev)
+#define to_dfl_drv(d) container_of(d, struct dfl_driver, drv)
+
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE.
+ */
+#define dfl_driver_register(drv) \
+ __dfl_driver_register(drv, THIS_MODULE)
+int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner);
+void dfl_driver_unregister(struct dfl_driver *dfl_drv);
+
+/*
+ * module_dfl_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit().
+ */
+#define module_dfl_driver(__dfl_driver) \
+ module_driver(__dfl_driver, dfl_driver_register, \
+ dfl_driver_unregister)
+
+#endif /* __LINUX_DFL_H */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index cf72699cb2bc..efdc56b9d95f 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -85,14 +85,16 @@ struct dma_buf_ops {
/**
* @pin:
*
- * This is called by dma_buf_pin and lets the exporter know that the
- * DMA-buf can't be moved any more.
+ * This is called by dma_buf_pin() and lets the exporter know that the
+ * DMA-buf can't be moved any more. The exporter should pin the buffer
+ * into system memory to make sure it is generally accessible by other
+ * devices.
*
- * This is called with the dmabuf->resv object locked and is mutual
+ * This is called with the &dmabuf.resv object locked and is mutual
* exclusive with @cache_sgt_mapping.
*
- * This callback is optional and should only be used in limited use
- * cases like scanout and not for temporary pin operations.
+ * This is called automatically for non-dynamic importers from
+ * dma_buf_attach().
*
* Returns:
*
@@ -103,7 +105,7 @@ struct dma_buf_ops {
/**
* @unpin:
*
- * This is called by dma_buf_unpin and lets the exporter know that the
+ * This is called by dma_buf_unpin() and lets the exporter know that the
* DMA-buf can be moved again.
*
* This is called with the dmabuf->resv object locked and is mutual
@@ -152,6 +154,12 @@ struct dma_buf_ops {
* On failure, returns a negative error value wrapped into a pointer.
* May also return -EINTR when a signal was received while being
* blocked.
+ *
+ * Note that exporters should not try to cache the scatter list, or
+ * return the same one for multiple calls. Caching is done either by the
+ * DMA-BUF code (for non-dynamic importers) or the importer. Ownership
+ * of the scatter list is transferred to the caller, and returned by
+ * @unmap_dma_buf.
*/
struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
enum dma_data_direction);
@@ -183,24 +191,19 @@ struct dma_buf_ops {
* @begin_cpu_access:
*
* This is called from dma_buf_begin_cpu_access() and allows the
- * exporter to ensure that the memory is actually available for cpu
- * access - the exporter might need to allocate or swap-in and pin the
- * backing storage. The exporter also needs to ensure that cpu access is
- * coherent for the access direction. The direction can be used by the
- * exporter to optimize the cache flushing, i.e. access with a different
+ * exporter to ensure that the memory is actually coherent for cpu
+ * access. The exporter also needs to ensure that cpu access is coherent
+ * for the access direction. The direction can be used by the exporter
+ * to optimize the cache flushing, i.e. access with a different
* direction (read instead of write) might return stale or even bogus
* data (e.g. when the exporter needs to copy the data to temporary
* storage).
*
- * This callback is optional.
+ * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL
+ * command for userspace mappings established through @mmap, and also
+ * for kernel mappings established with @vmap.
*
- * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
- * from userspace (where storage shouldn't be pinned to avoid handing
- * de-factor mlock rights to userspace) and for the kernel-internal
- * users of the various kmap interfaces, where the backing storage must
- * be pinned to guarantee that the atomic kmap calls can succeed. Since
- * there's no in-kernel users of the kmap interfaces yet this isn't a
- * real problem.
+ * This callback is optional.
*
* Returns:
*
@@ -216,9 +219,7 @@ struct dma_buf_ops {
*
* This is called from dma_buf_end_cpu_access() when the importer is
* done accessing the CPU. The exporter can use this to flush caches and
- * unpin any resources pinned in @begin_cpu_access.
- * The result of any dma_buf kmap calls after end_cpu_access is
- * undefined.
+ * undo anything else done in @begin_cpu_access.
*
* This callback is optional.
*
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 09e23adb351d..9f12efaaa93a 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -372,6 +372,9 @@ static inline void __dma_fence_might_wait(void) {}
int dma_fence_signal(struct dma_fence *fence);
int dma_fence_signal_locked(struct dma_fence *fence);
+int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
+int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
+ ktime_t timestamp);
signed long dma_fence_default_wait(struct dma_fence *fence,
bool intr, signed long timeout);
int dma_fence_add_callback(struct dma_fence *fence,
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 454e354d1ffb..5bc5c946af58 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -16,15 +16,15 @@ struct dma_heap;
/**
* struct dma_heap_ops - ops to operate on a given heap
- * @allocate: allocate dmabuf and return fd
+ * @allocate: allocate dmabuf and return struct dma_buf ptr
*
- * allocate returns dmabuf fd on success, -errno on error.
+ * allocate returns dmabuf on success, ERR_PTR(-errno) on error.
*/
struct dma_heap_ops {
- int (*allocate)(struct dma_heap *heap,
- unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags);
+ struct dma_buf *(*allocate)(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags);
};
/**
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 70fcd0f610ea..51872e736e7b 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -22,11 +22,6 @@ struct dma_map_ops {
gfp_t gfp);
void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
dma_addr_t dma_handle, enum dma_data_direction dir);
- void *(*alloc_noncoherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, enum dma_data_direction dir,
- gfp_t gfp);
- void (*free_noncoherent)(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, enum dma_data_direction dir);
int (*mmap)(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t, unsigned long attrs);
@@ -229,11 +224,10 @@ bool dma_free_from_pool(struct device *dev, void *start, size_t size);
int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
dma_addr_t dma_start, u64 size);
-#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
-#include <asm/dma-coherence.h>
-#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+extern bool dma_default_coherent;
static inline bool dev_is_dma_coherent(struct device *dev)
{
return dev->dma_coherent;
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 2e49996a8f39..2a984cb4d1e0 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -263,10 +263,19 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
void dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir);
-void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
-void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, enum dma_data_direction dir);
+
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
+{
+ struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
+ return page ? page_address(page) : NULL;
+}
+
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
+{
+ dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
+}
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
@@ -500,6 +509,22 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
return -EIO;
}
+static inline unsigned int dma_get_min_align_mask(struct device *dev)
+{
+ if (dev->dma_parms)
+ return dev->dma_parms->min_align_mask;
+ return 0;
+}
+
+static inline int dma_set_min_align_mask(struct device *dev,
+ unsigned int min_align_mask)
+{
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return -EIO;
+ dev->dma_parms->min_align_mask = min_align_mask;
+ return 0;
+}
+
static inline int dma_get_cache_alignment(void)
{
#ifdef ARCH_DMA_MINALIGN
diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h
index 36e22c5a0f29..5f106d852f1c 100644
--- a/include/linux/dma/k3-psil.h
+++ b/include/linux/dma/k3-psil.h
@@ -42,14 +42,14 @@ enum psil_endpoint_type {
/**
* struct psil_endpoint_config - PSI-L Endpoint configuration
* @ep_type: PSI-L endpoint type
+ * @channel_tpl: Desired throughput level for the channel
* @pkt_mode: If set, the channel must be in Packet mode, otherwise in
* TR mode
* @notdpkt: TDCM must be suppressed on the TX channel
* @needs_epib: Endpoint needs EPIB
- * @psd_size: If set, PSdata is used by the endpoint
- * @channel_tpl: Desired throughput level for the channel
* @pdma_acc32: ACC32 must be enabled on the PDMA side
* @pdma_burst: BURST must be enabled on the PDMA side
+ * @psd_size: If set, PSdata is used by the endpoint
* @mapped_channel_id: PKTDMA thread to channel mapping for mapped channels.
* The thread must be serviced by the specified channel if
* mapped_channel_id is >= 0 in case of PKTDMA
@@ -62,23 +62,22 @@ enum psil_endpoint_type {
*/
struct psil_endpoint_config {
enum psil_endpoint_type ep_type;
+ enum udma_tp_level channel_tpl;
unsigned pkt_mode:1;
unsigned notdpkt:1;
unsigned needs_epib:1;
- u32 psd_size;
- enum udma_tp_level channel_tpl;
-
/* PDMA properties, valid for PSIL_EP_PDMA_* */
unsigned pdma_acc32:1;
unsigned pdma_burst:1;
+ u32 psd_size;
/* PKDMA mapped channel */
- int mapped_channel_id;
+ s16 mapped_channel_id;
/* PKTDMA tflow and rflow ranges for mapped channel */
u16 flow_start;
u16 flow_num;
- u16 default_flow_id;
+ s16 default_flow_id;
};
int psil_set_new_ep_config(struct device *dev, const char *name,
diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h
deleted file mode 100644
index 25cab62a28c4..000000000000
--- a/include/linux/dma/mmp-pdma.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _MMP_PDMA_H_
-#define _MMP_PDMA_H_
-
-struct dma_chan;
-
-#ifdef CONFIG_MMP_PDMA
-bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param);
-#else
-static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
-{
- return false;
-}
-#endif
-
-#endif /* _MMP_PDMA_H_ */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 68130f5f599e..004736b6a9c8 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -745,6 +745,8 @@ enum dmaengine_alignment {
DMAENGINE_ALIGN_16_BYTES = 4,
DMAENGINE_ALIGN_32_BYTES = 5,
DMAENGINE_ALIGN_64_BYTES = 6,
+ DMAENGINE_ALIGN_128_BYTES = 7,
+ DMAENGINE_ALIGN_256_BYTES = 8,
};
/**
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 65565820328a..e04436a7ff27 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -138,6 +138,7 @@ extern void intel_iommu_shutdown(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
+extern int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
@@ -149,6 +150,7 @@ static inline void intel_iommu_shutdown(void) { }
#define dmar_parse_one_atsr dmar_res_noop
#define dmar_check_one_atsr dmar_res_noop
#define dmar_release_one_atsr dmar_res_noop
+#define dmar_parse_one_satc dmar_res_noop
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
index 88cd72dfa4e0..b12b05f1c8b4 100644
--- a/include/linux/dsa/8021q.h
+++ b/include/linux/dsa/8021q.h
@@ -64,6 +64,10 @@ int dsa_8021q_rx_source_port(u16 vid);
u16 dsa_8021q_rx_subvlan(u16 vid);
+bool vid_is_dsa_8021q_rxvlan(u16 vid);
+
+bool vid_is_dsa_8021q_txvlan(u16 vid);
+
bool vid_is_dsa_8021q(u16 vid);
#else
@@ -123,6 +127,16 @@ u16 dsa_8021q_rx_subvlan(u16 vid)
return 0;
}
+bool vid_is_dsa_8021q_rxvlan(u16 vid)
+{
+ return false;
+}
+
+bool vid_is_dsa_8021q_txvlan(u16 vid)
+{
+ return false;
+}
+
bool vid_is_dsa_8021q(u16 vid)
{
return false;
diff --git a/include/linux/dsa/brcm.h b/include/linux/dsa/brcm.h
new file mode 100644
index 000000000000..47545a948784
--- /dev/null
+++ b/include/linux/dsa/brcm.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2014 Broadcom Corporation
+ */
+
+/* Included by drivers/net/ethernet/broadcom/bcmsysport.c and
+ * net/dsa/tag_brcm.c
+ */
+#ifndef _NET_DSA_BRCM_H
+#define _NET_DSA_BRCM_H
+
+/* Broadcom tag specific helpers to insert and extract queue/port number */
+#define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q)
+#define BRCM_TAG_GET_PORT(v) ((v) >> 8)
+#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
+
+#endif
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
new file mode 100644
index 000000000000..4265f328681a
--- /dev/null
+++ b/include/linux/dsa/ocelot.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2019-2021 NXP Semiconductors
+ */
+
+#ifndef _NET_DSA_TAG_OCELOT_H
+#define _NET_DSA_TAG_OCELOT_H
+
+#include <linux/packing.h>
+
+#define OCELOT_TAG_LEN 16
+#define OCELOT_SHORT_PREFIX_LEN 4
+#define OCELOT_LONG_PREFIX_LEN 16
+#define OCELOT_TOTAL_TAG_LEN (OCELOT_SHORT_PREFIX_LEN + OCELOT_TAG_LEN)
+
+/* The CPU injection header and the CPU extraction header can have 3 types of
+ * prefixes: long, short and no prefix. The format of the header itself is the
+ * same in all 3 cases.
+ *
+ * Extraction with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | ff:ff:ff:ff:ff:ff | fe:ff:ff:ff:ff:ff | 8880 | 000a | extraction | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Extraction with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | extraction | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Extraction with no prefix:
+ *
+ * +------------+-------+
+ * | extraction | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ *
+ * Injection with long prefix:
+ *
+ * +-------------------+-------------------+------+------+------------+-------+
+ * | any dmac | any smac | 8880 | 000a | injection | frame |
+ * | | | | | header | |
+ * +-------------------+-------------------+------+------+------------+-------+
+ * 48 bits 48 bits 16 bits 16 bits 128 bits
+ *
+ * Injection with short prefix:
+ *
+ * +------+------+------------+-------+
+ * | 8880 | 000a | injection | frame |
+ * | | | header | |
+ * +------+------+------------+-------+
+ * 16 bits 16 bits 128 bits
+ *
+ * Injection with no prefix:
+ *
+ * +------------+-------+
+ * | injection | frame |
+ * | header | |
+ * +------------+-------+
+ * 128 bits
+ *
+ * The injection header looks like this (network byte order, bit 127
+ * is part of lowest address byte in memory, bit 0 is part of highest
+ * address byte):
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 |BYPASS| MASQ | MASQ_PORT |REW_OP|REW_OP|
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | RSV | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | DEST |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | RSV |TFRM_TIMER|
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | TFRM_TIMER | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 | RSV | DP | POP_CNT | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ *
+ * And the extraction header looks like this:
+ *
+ * +------+------+------+------+------+------+------+------+
+ * 127:120 | RSV | REW_OP |
+ * +------+------+------+------+------+------+------+------+
+ * 119:112 | REW_OP | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 111:104 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 103: 96 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 95: 88 | REW_VAL |
+ * +------+------+------+------+------+------+------+------+
+ * 87: 80 | REW_VAL | LLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 79: 72 | LLEN | WLEN |
+ * +------+------+------+------+------+------+------+------+
+ * 71: 64 | WLEN | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 63: 56 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 55: 48 | RSV |
+ * +------+------+------+------+------+------+------+------+
+ * 47: 40 | RSV | SRC_PORT | ACL_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 39: 32 | ACL_ID | RSV | SFLOW_ID |
+ * +------+------+------+------+------+------+------+------+
+ * 31: 24 |ACL_HIT| DP | LRN_FLAGS | CPUQ |
+ * +------+------+------+------+------+------+------+------+
+ * 23: 16 | CPUQ | QOS_CLASS |TAG_TYPE|
+ * +------+------+------+------+------+------+------+------+
+ * 15: 8 | PCP | DEI | VID |
+ * +------+------+------+------+------+------+------+------+
+ * 7: 0 | VID |
+ * +------+------+------+------+------+------+------+------+
+ */
+
+static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val)
+{
+ packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_len(void *extraction, u64 *len)
+{
+ u64 llen, wlen;
+
+ packing(extraction, &llen, 84, 79, OCELOT_TAG_LEN, UNPACK, 0);
+ packing(extraction, &wlen, 78, 71, OCELOT_TAG_LEN, UNPACK, 0);
+
+ *len = 60 * wlen + llen - 80;
+}
+
+static inline void ocelot_xfh_get_src_port(void *extraction, u64 *src_port)
+{
+ packing(extraction, src_port, 46, 43, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_cpuq(void *extraction, u64 *cpuq)
+{
+ packing(extraction, cpuq, 28, 20, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_qos_class(void *extraction, u64 *qos_class)
+{
+ packing(extraction, qos_class, 19, 17, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_tag_type(void *extraction, u64 *tag_type)
+{
+ packing(extraction, tag_type, 16, 16, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_xfh_get_vlan_tci(void *extraction, u64 *vlan_tci)
+{
+ packing(extraction, vlan_tci, 15, 0, OCELOT_TAG_LEN, UNPACK, 0);
+}
+
+static inline void ocelot_ifh_set_bypass(void *injection, u64 bypass)
+{
+ packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_rew_op(void *injection, u64 rew_op)
+{
+ packing(injection, &rew_op, 125, 117, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_dest(void *injection, u64 dest)
+{
+ packing(injection, &dest, 67, 56, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_qos_class(void *injection, u64 qos_class)
+{
+ packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void seville_ifh_set_dest(void *injection, u64 dest)
+{
+ packing(injection, &dest, 67, 57, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_src(void *injection, u64 src)
+{
+ packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_tag_type(void *injection, u64 tag_type)
+{
+ packing(injection, &tag_type, 16, 16, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static inline void ocelot_ifh_set_vid(void *injection, u64 vid)
+{
+ packing(injection, &vid, 11, 0, OCELOT_TAG_LEN, PACK, 0);
+}
+
+#endif
diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h
new file mode 100644
index 000000000000..e80a332e3d8a
--- /dev/null
+++ b/include/linux/dtpm.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Linaro Ltd
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ */
+#ifndef ___DTPM_H__
+#define ___DTPM_H__
+
+#include <linux/powercap.h>
+
+#define MAX_DTPM_DESCR 8
+#define MAX_DTPM_CONSTRAINTS 1
+
+struct dtpm {
+ struct powercap_zone zone;
+ struct dtpm *parent;
+ struct list_head sibling;
+ struct list_head children;
+ struct dtpm_ops *ops;
+ unsigned long flags;
+ u64 power_limit;
+ u64 power_max;
+ u64 power_min;
+ int weight;
+ void *private;
+};
+
+struct dtpm_ops {
+ u64 (*set_power_uw)(struct dtpm *, u64);
+ u64 (*get_power_uw)(struct dtpm *);
+ void (*release)(struct dtpm *);
+};
+
+struct dtpm_descr;
+
+typedef int (*dtpm_init_t)(struct dtpm_descr *);
+
+struct dtpm_descr {
+ struct dtpm *parent;
+ const char *name;
+ dtpm_init_t init;
+};
+
+/* Init section thermal table */
+extern struct dtpm_descr *__dtpm_table[];
+extern struct dtpm_descr *__dtpm_table_end[];
+
+#define DTPM_TABLE_ENTRY(name) \
+ static typeof(name) *__dtpm_table_entry_##name \
+ __used __section("__dtpm_table") = &name
+
+#define DTPM_DECLARE(name) DTPM_TABLE_ENTRY(name)
+
+#define for_each_dtpm_table(__dtpm) \
+ for (__dtpm = __dtpm_table; \
+ __dtpm < __dtpm_table_end; \
+ __dtpm++)
+
+static inline struct dtpm *to_dtpm(struct powercap_zone *zone)
+{
+ return container_of(zone, struct dtpm, zone);
+}
+
+int dtpm_update_power(struct dtpm *dtpm, u64 power_min, u64 power_max);
+
+int dtpm_release_zone(struct powercap_zone *pcz);
+
+struct dtpm *dtpm_alloc(struct dtpm_ops *ops);
+
+void dtpm_unregister(struct dtpm *dtpm);
+
+int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent);
+
+int dtpm_register_cpu(struct dtpm *parent);
+
+#endif
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
index eec7928ff8fe..99580c22f91a 100644
--- a/include/linux/eeprom_93xx46.h
+++ b/include/linux/eeprom_93xx46.h
@@ -16,6 +16,8 @@ struct eeprom_93xx46_platform_data {
#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
/* Instructions such as EWEN are (addrlen + 2) in length. */
#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
+/* Add extra cycle after address during a read */
+#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2)
/*
* optional hooks to control additional logic
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 763b816ba19c..8710f5710c1d 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -29,10 +29,10 @@
#include <asm/page.h>
#define EFI_SUCCESS 0
-#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
#define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1)))
#define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1)))
-#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
#define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_READY ( 6 | (1UL << (BITS_PER_LONG-1)))
#define EFI_DEVICE_ERROR ( 7 | (1UL << (BITS_PER_LONG-1)))
@@ -167,8 +167,6 @@ struct capsule_info {
int __efi_capsule_setup_info(struct capsule_info *cap_info);
-typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg);
-
/*
* Types and defines for Time Services
*/
@@ -605,10 +603,6 @@ efi_guid_to_str(efi_guid_t *guid, char *out)
}
extern void efi_init (void);
-extern void *efi_get_pal_addr (void);
-extern void efi_map_pal_code (void);
-extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
-extern void efi_gettimeofday (struct timespec64 *ts);
#ifdef CONFIG_EFI
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#else
@@ -1110,13 +1104,6 @@ enum efi_secureboot_mode efi_get_secureboot_mode(efi_get_variable_t *get_var)
return efi_secureboot_mode_enabled;
}
-#ifdef CONFIG_RESET_ATTACK_MITIGATION
-void efi_enable_reset_attack_mitigation(void);
-#else
-static inline void
-efi_enable_reset_attack_mitigation(void) { }
-#endif
-
#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
void efi_check_for_embedded_firmwares(void);
#else
@@ -1125,8 +1112,6 @@ static inline void efi_check_for_embedded_firmwares(void) { }
efi_status_t efi_random_get_seed(void);
-void efi_retrieve_tpm2_eventlog(void);
-
/*
* Arch code can implement the following three template macros, avoiding
* reptition for the void/non-void return cases of {__,}efi_call_virt():
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index bacc40a0bdf3..1fe8e105b83b 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -172,6 +172,8 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t);
/* Supports zoned block devices sequential write constraint */
#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
+/* Supports scheduling on multiple hardware queues */
+#define ELEVATOR_F_MQ_AWARE (1U << 1)
#endif /* CONFIG_BLOCK */
#endif
diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h
index 10485f0c9740..e272c3d452ce 100644
--- a/include/linux/elfcore-compat.h
+++ b/include/linux/elfcore-compat.h
@@ -17,7 +17,7 @@ struct compat_elf_siginfo
compat_int_t si_errno;
};
-struct compat_elf_prstatus
+struct compat_elf_prstatus_common
{
struct compat_elf_siginfo pr_info;
short pr_cursig;
@@ -31,8 +31,6 @@ struct compat_elf_prstatus
struct old_timeval32 pr_stime;
struct old_timeval32 pr_cutime;
struct old_timeval32 pr_cstime;
- compat_elf_gregset_t pr_reg;
- compat_int_t pr_fpvalid;
};
struct compat_elf_prpsinfo
@@ -49,4 +47,15 @@ struct compat_elf_prpsinfo
char pr_psargs[ELF_PRARGSZ];
};
+#ifdef CONFIG_ARCH_HAS_ELFCORE_COMPAT
+#include <asm/elfcore-compat.h>
+#endif
+
+struct compat_elf_prstatus
+{
+ struct compat_elf_prstatus_common common;
+ compat_elf_gregset_t pr_reg;
+ compat_int_t pr_fpvalid;
+};
+
#endif /* _LINUX_ELFCORE_COMPAT_H */
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index de51c1bef27d..2aaa15779d50 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -29,7 +29,7 @@ struct elf_siginfo
* the SVR4 structure, but more Linuxy, with things that Linux does
* not support and which gdb doesn't really use excluded.
*/
-struct elf_prstatus
+struct elf_prstatus_common
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
@@ -43,6 +43,11 @@ struct elf_prstatus
struct __kernel_old_timeval pr_stime; /* System time */
struct __kernel_old_timeval pr_cutime; /* Cumulative user time */
struct __kernel_old_timeval pr_cstime; /* Cumulative system time */
+};
+
+struct elf_prstatus
+{
+ struct elf_prstatus_common common;
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index a104b298019a..883acef895bc 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -2,6 +2,7 @@
#ifndef __LINUX_ENTRYCOMMON_H
#define __LINUX_ENTRYCOMMON_H
+#include <linux/static_call_types.h>
#include <linux/tracehook.h>
#include <linux/syscalls.h>
#include <linux/seccomp.h>
@@ -454,6 +455,9 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
* Conditional reschedule with additional sanity checks.
*/
void irqentry_exit_cond_resched(void);
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DECLARE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
+#endif
/**
* irqentry_exit - Handle return from exception that used irqentry_enter()
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
index 9b93f8584ff7..8b2b1d68b954 100644
--- a/include/linux/entry-kvm.h
+++ b/include/linux/entry-kvm.h
@@ -47,6 +47,20 @@ static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
/**
+ * xfer_to_guest_mode_prepare - Perform last minute preparation work that
+ * need to be handled while IRQs are disabled
+ * upon entering to guest.
+ *
+ * Has to be invoked with interrupts disabled before the last call
+ * to xfer_to_guest_mode_work_pending().
+ */
+static inline void xfer_to_guest_mode_prepare(void)
+{
+ lockdep_assert_irqs_disabled();
+ rcu_nocb_flush_deferred_wakeup();
+}
+
+/**
* __xfer_to_guest_mode_work_pending - Check if work is pending
*
* Returns: True if work pending, False otherwise.
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e3da25b51ae4..ec4cd3921c67 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -128,6 +128,8 @@ struct ethtool_link_ksettings {
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
} link_modes;
+ u32 lanes;
+ enum ethtool_link_mode_bit_indices link_mode;
};
/**
@@ -265,6 +267,8 @@ struct ethtool_pause_stats {
/**
* struct ethtool_ops - optional netdev operations
+ * @cap_link_lanes_supported: indicates if the driver supports lanes
+ * parameter.
* @supported_coalesce_params: supported types of interrupt coalescing.
* @get_drvinfo: Report driver/device information. Should only set the
* @driver, @version, @fw_version and @bus_info fields. If not
@@ -420,6 +424,7 @@ struct ethtool_pause_stats {
* of the generic netdev features interface.
*/
struct ethtool_ops {
+ u32 cap_link_lanes_supported:1;
u32 supported_coalesce_params;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 0350393465d4..593322c946e6 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -18,7 +18,7 @@ struct file;
#ifdef CONFIG_EPOLL
-#ifdef CONFIG_CHECKPOINT_RESTORE
+#ifdef CONFIG_KCMP
struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff);
#endif
diff --git a/include/linux/export.h b/include/linux/export.h
index fceb5e855717..6271a5d9c988 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -157,18 +157,9 @@ struct kernel_symbol {
#define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "")
#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_gpl")
-#define EXPORT_SYMBOL_GPL_FUTURE(sym) _EXPORT_SYMBOL(sym, "_gpl_future")
#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", #ns)
#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "_gpl", #ns)
-#ifdef CONFIG_UNUSED_SYMBOLS
-#define EXPORT_UNUSED_SYMBOL(sym) _EXPORT_SYMBOL(sym, "_unused")
-#define EXPORT_UNUSED_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_unused_gpl")
-#else
-#define EXPORT_UNUSED_SYMBOL(sym)
-#define EXPORT_UNUSED_SYMBOL_GPL(sym)
-#endif
-
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_EXPORT_H */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 9f4d4bcbf251..fe848901fcc3 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -213,6 +213,7 @@ struct export_operations {
bool write, u32 *device_generation);
int (*commit_blocks)(struct inode *inode, struct iomap *iomaps,
int nr_iomaps, struct iattr *iattr);
+ u64 (*fetch_iversion)(struct inode *);
#define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */
#define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */
#define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 7dc2a06cf19a..c6cc0a566ef5 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -274,6 +274,9 @@ struct f2fs_inode {
__u8 i_compress_algorithm; /* compress algorithm */
__u8 i_log_cluster_size; /* log of cluster size */
__le16 i_compress_flag; /* compress flag */
+ /* 0 bit: chksum flag
+ * [10,15] bits: compress level
+ */
__le32 i_extra_end[0]; /* for attribute size calculation */
} __packed;
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 921e750843e6..766fcd973beb 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -19,7 +19,7 @@
/* List of all valid flags for the how->resolve argument: */
#define VALID_RESOLVE_FLAGS \
(RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \
- RESOLVE_BENEATH | RESOLVE_IN_ROOT)
+ RESOLVE_BENEATH | RESOLVE_IN_ROOT | RESOLVE_CACHED)
/* List of all open_how "versions". */
#define OPEN_HOW_SIZE_VER0 24 /* sizeof first published struct */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 29c27656165b..3b00fc906ccd 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -22,6 +22,7 @@
#include <linux/vmalloc.h>
#include <linux/sockptr.h>
#include <crypto/sha1.h>
+#include <linux/u64_stats_sync.h>
#include <net/sch_generic.h>
@@ -259,15 +260,32 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
-/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
-#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
+/*
+ * Atomic operations:
+ *
+ * BPF_ADD *(uint *) (dst_reg + off16) += src_reg
+ * BPF_AND *(uint *) (dst_reg + off16) &= src_reg
+ * BPF_OR *(uint *) (dst_reg + off16) |= src_reg
+ * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
+ * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
+ * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
+ * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
+ * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
+ * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
+ * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
+ */
+
+#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
((struct bpf_insn) { \
- .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
- .imm = 0 })
+ .imm = OP })
+
+/* Legacy alias */
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
@@ -522,6 +540,13 @@ struct bpf_binary_header {
u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
};
+struct bpf_prog_stats {
+ u64 cnt;
+ u64 nsecs;
+ u64 misses;
+ struct u64_stats_sync syncp;
+} __aligned(2 * sizeof(u64));
+
struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
@@ -540,10 +565,12 @@ struct bpf_prog {
u32 len; /* Number of filter blocks */
u32 jited_len; /* Size of jited insns in bytes */
u8 tag[BPF_TAG_SIZE];
- struct bpf_prog_aux *aux; /* Auxiliary fields */
- struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ struct bpf_prog_stats __percpu *stats;
+ int __percpu *active;
unsigned int (*bpf_func)(const void *ctx,
const struct bpf_insn *insn);
+ struct bpf_prog_aux *aux; /* Auxiliary fields */
+ struct sock_fprog_kern *orig_prog; /* Original BPF program */
/* Instructions for interpreter */
struct sock_filter insns[0];
struct bpf_insn insnsi[];
@@ -564,7 +591,7 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
struct bpf_prog_stats *__stats; \
u64 __start = sched_clock(); \
__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
- __stats = this_cpu_ptr(prog->aux->stats); \
+ __stats = this_cpu_ptr(prog->stats); \
u64_stats_update_begin(&__stats->syncp); \
__stats->cnt++; \
__stats->nsecs += sched_clock() - __start; \
@@ -886,7 +913,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
#define __bpf_call_base_args \
((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
- __bpf_call_base)
+ (void *)__bpf_call_base)
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
@@ -1281,6 +1308,11 @@ struct bpf_sysctl_kern {
u64 tmp_reg;
};
+#define BPF_SOCKOPT_KERN_BUF_SIZE 32
+struct bpf_sockopt_buf {
+ u8 data[BPF_SOCKOPT_KERN_BUF_SIZE];
+};
+
struct bpf_sockopt_kern {
struct sock *sk;
u8 *optval;
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index a93d85932eb9..ebc295647581 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -6,7 +6,7 @@
#ifndef __STRATIX10_SVC_CLIENT_H
#define __STRATIX10_SVC_CLIENT_H
-/**
+/*
* Service layer driver supports client names
*
* fpga: for FPGA configuration
@@ -15,7 +15,7 @@
#define SVC_CLIENT_FPGA "fpga"
#define SVC_CLIENT_RSU "rsu"
-/**
+/*
* Status of the sent command, in bit number
*
* SVC_STATUS_OK:
@@ -50,7 +50,7 @@
#define SVC_STATUS_ERROR 5
#define SVC_STATUS_NO_SUPPORT 6
-/**
+/*
* Flag bit for COMMAND_RECONFIG
*
* COMMAND_RECONFIG_FLAG_PARTIAL:
@@ -58,7 +58,7 @@
*/
#define COMMAND_RECONFIG_FLAG_PARTIAL 1
-/**
+/*
* Timeout settings for service clients:
* timeout value used in Stratix10 FPGA manager driver.
* timeout value used in RSU driver
@@ -218,7 +218,7 @@ void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr);
int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg);
/**
- * intel_svc_done() - complete service request
+ * stratix10_svc_done() - complete service request
* @chan: service channel assigned to the client
*
* This function is used by service client to inform service layer that
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 2a0da841c942..71177b17eee5 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -64,28 +64,27 @@ enum pm_api_id {
PM_GET_API_VERSION = 1,
PM_SYSTEM_SHUTDOWN = 12,
PM_REQUEST_NODE = 13,
- PM_RELEASE_NODE,
- PM_SET_REQUIREMENT,
+ PM_RELEASE_NODE = 14,
+ PM_SET_REQUIREMENT = 15,
PM_RESET_ASSERT = 17,
- PM_RESET_GET_STATUS,
+ PM_RESET_GET_STATUS = 18,
PM_PM_INIT_FINALIZE = 21,
- PM_FPGA_LOAD,
- PM_FPGA_GET_STATUS,
+ PM_FPGA_LOAD = 22,
+ PM_FPGA_GET_STATUS = 23,
PM_GET_CHIPID = 24,
PM_IOCTL = 34,
- PM_QUERY_DATA,
- PM_CLOCK_ENABLE,
- PM_CLOCK_DISABLE,
- PM_CLOCK_GETSTATE,
- PM_CLOCK_SETDIVIDER,
- PM_CLOCK_GETDIVIDER,
- PM_CLOCK_SETRATE,
- PM_CLOCK_GETRATE,
- PM_CLOCK_SETPARENT,
- PM_CLOCK_GETPARENT,
+ PM_QUERY_DATA = 35,
+ PM_CLOCK_ENABLE = 36,
+ PM_CLOCK_DISABLE = 37,
+ PM_CLOCK_GETSTATE = 38,
+ PM_CLOCK_SETDIVIDER = 39,
+ PM_CLOCK_GETDIVIDER = 40,
+ PM_CLOCK_SETRATE = 41,
+ PM_CLOCK_GETRATE = 42,
+ PM_CLOCK_SETPARENT = 43,
+ PM_CLOCK_GETPARENT = 44,
PM_SECURE_AES = 47,
PM_FEATURE_CHECK = 63,
- PM_API_MAX,
};
/* PMU-FW return status codes */
@@ -93,21 +92,21 @@ enum pm_ret_status {
XST_PM_SUCCESS = 0,
XST_PM_NO_FEATURE = 19,
XST_PM_INTERNAL = 2000,
- XST_PM_CONFLICT,
- XST_PM_NO_ACCESS,
- XST_PM_INVALID_NODE,
- XST_PM_DOUBLE_REQ,
- XST_PM_ABORT_SUSPEND,
+ XST_PM_CONFLICT = 2001,
+ XST_PM_NO_ACCESS = 2002,
+ XST_PM_INVALID_NODE = 2003,
+ XST_PM_DOUBLE_REQ = 2004,
+ XST_PM_ABORT_SUSPEND = 2005,
XST_PM_MULT_USER = 2008,
};
enum pm_ioctl_id {
IOCTL_SD_DLL_RESET = 6,
- IOCTL_SET_SD_TAPDELAY,
- IOCTL_SET_PLL_FRAC_MODE,
- IOCTL_GET_PLL_FRAC_MODE,
- IOCTL_SET_PLL_FRAC_DATA,
- IOCTL_GET_PLL_FRAC_DATA,
+ IOCTL_SET_SD_TAPDELAY = 7,
+ IOCTL_SET_PLL_FRAC_MODE = 8,
+ IOCTL_GET_PLL_FRAC_MODE = 9,
+ IOCTL_SET_PLL_FRAC_DATA = 10,
+ IOCTL_GET_PLL_FRAC_DATA = 11,
IOCTL_WRITE_GGS = 12,
IOCTL_READ_GGS = 13,
IOCTL_WRITE_PGGS = 14,
@@ -117,185 +116,185 @@ enum pm_ioctl_id {
};
enum pm_query_id {
- PM_QID_INVALID,
- PM_QID_CLOCK_GET_NAME,
- PM_QID_CLOCK_GET_TOPOLOGY,
- PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
- PM_QID_CLOCK_GET_PARENTS,
- PM_QID_CLOCK_GET_ATTRIBUTES,
+ PM_QID_INVALID = 0,
+ PM_QID_CLOCK_GET_NAME = 1,
+ PM_QID_CLOCK_GET_TOPOLOGY = 2,
+ PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS = 3,
+ PM_QID_CLOCK_GET_PARENTS = 4,
+ PM_QID_CLOCK_GET_ATTRIBUTES = 5,
PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
- PM_QID_CLOCK_GET_MAX_DIVISOR,
+ PM_QID_CLOCK_GET_MAX_DIVISOR = 13,
};
enum zynqmp_pm_reset_action {
- PM_RESET_ACTION_RELEASE,
- PM_RESET_ACTION_ASSERT,
- PM_RESET_ACTION_PULSE,
+ PM_RESET_ACTION_RELEASE = 0,
+ PM_RESET_ACTION_ASSERT = 1,
+ PM_RESET_ACTION_PULSE = 2,
};
enum zynqmp_pm_reset {
ZYNQMP_PM_RESET_START = 1000,
ZYNQMP_PM_RESET_PCIE_CFG = ZYNQMP_PM_RESET_START,
- ZYNQMP_PM_RESET_PCIE_BRIDGE,
- ZYNQMP_PM_RESET_PCIE_CTRL,
- ZYNQMP_PM_RESET_DP,
- ZYNQMP_PM_RESET_SWDT_CRF,
- ZYNQMP_PM_RESET_AFI_FM5,
- ZYNQMP_PM_RESET_AFI_FM4,
- ZYNQMP_PM_RESET_AFI_FM3,
- ZYNQMP_PM_RESET_AFI_FM2,
- ZYNQMP_PM_RESET_AFI_FM1,
- ZYNQMP_PM_RESET_AFI_FM0,
- ZYNQMP_PM_RESET_GDMA,
- ZYNQMP_PM_RESET_GPU_PP1,
- ZYNQMP_PM_RESET_GPU_PP0,
- ZYNQMP_PM_RESET_GPU,
- ZYNQMP_PM_RESET_GT,
- ZYNQMP_PM_RESET_SATA,
- ZYNQMP_PM_RESET_ACPU3_PWRON,
- ZYNQMP_PM_RESET_ACPU2_PWRON,
- ZYNQMP_PM_RESET_ACPU1_PWRON,
- ZYNQMP_PM_RESET_ACPU0_PWRON,
- ZYNQMP_PM_RESET_APU_L2,
- ZYNQMP_PM_RESET_ACPU3,
- ZYNQMP_PM_RESET_ACPU2,
- ZYNQMP_PM_RESET_ACPU1,
- ZYNQMP_PM_RESET_ACPU0,
- ZYNQMP_PM_RESET_DDR,
- ZYNQMP_PM_RESET_APM_FPD,
- ZYNQMP_PM_RESET_SOFT,
- ZYNQMP_PM_RESET_GEM0,
- ZYNQMP_PM_RESET_GEM1,
- ZYNQMP_PM_RESET_GEM2,
- ZYNQMP_PM_RESET_GEM3,
- ZYNQMP_PM_RESET_QSPI,
- ZYNQMP_PM_RESET_UART0,
- ZYNQMP_PM_RESET_UART1,
- ZYNQMP_PM_RESET_SPI0,
- ZYNQMP_PM_RESET_SPI1,
- ZYNQMP_PM_RESET_SDIO0,
- ZYNQMP_PM_RESET_SDIO1,
- ZYNQMP_PM_RESET_CAN0,
- ZYNQMP_PM_RESET_CAN1,
- ZYNQMP_PM_RESET_I2C0,
- ZYNQMP_PM_RESET_I2C1,
- ZYNQMP_PM_RESET_TTC0,
- ZYNQMP_PM_RESET_TTC1,
- ZYNQMP_PM_RESET_TTC2,
- ZYNQMP_PM_RESET_TTC3,
- ZYNQMP_PM_RESET_SWDT_CRL,
- ZYNQMP_PM_RESET_NAND,
- ZYNQMP_PM_RESET_ADMA,
- ZYNQMP_PM_RESET_GPIO,
- ZYNQMP_PM_RESET_IOU_CC,
- ZYNQMP_PM_RESET_TIMESTAMP,
- ZYNQMP_PM_RESET_RPU_R50,
- ZYNQMP_PM_RESET_RPU_R51,
- ZYNQMP_PM_RESET_RPU_AMBA,
- ZYNQMP_PM_RESET_OCM,
- ZYNQMP_PM_RESET_RPU_PGE,
- ZYNQMP_PM_RESET_USB0_CORERESET,
- ZYNQMP_PM_RESET_USB1_CORERESET,
- ZYNQMP_PM_RESET_USB0_HIBERRESET,
- ZYNQMP_PM_RESET_USB1_HIBERRESET,
- ZYNQMP_PM_RESET_USB0_APB,
- ZYNQMP_PM_RESET_USB1_APB,
- ZYNQMP_PM_RESET_IPI,
- ZYNQMP_PM_RESET_APM_LPD,
- ZYNQMP_PM_RESET_RTC,
- ZYNQMP_PM_RESET_SYSMON,
- ZYNQMP_PM_RESET_AFI_FM6,
- ZYNQMP_PM_RESET_LPD_SWDT,
- ZYNQMP_PM_RESET_FPD,
- ZYNQMP_PM_RESET_RPU_DBG1,
- ZYNQMP_PM_RESET_RPU_DBG0,
- ZYNQMP_PM_RESET_DBG_LPD,
- ZYNQMP_PM_RESET_DBG_FPD,
- ZYNQMP_PM_RESET_APLL,
- ZYNQMP_PM_RESET_DPLL,
- ZYNQMP_PM_RESET_VPLL,
- ZYNQMP_PM_RESET_IOPLL,
- ZYNQMP_PM_RESET_RPLL,
- ZYNQMP_PM_RESET_GPO3_PL_0,
- ZYNQMP_PM_RESET_GPO3_PL_1,
- ZYNQMP_PM_RESET_GPO3_PL_2,
- ZYNQMP_PM_RESET_GPO3_PL_3,
- ZYNQMP_PM_RESET_GPO3_PL_4,
- ZYNQMP_PM_RESET_GPO3_PL_5,
- ZYNQMP_PM_RESET_GPO3_PL_6,
- ZYNQMP_PM_RESET_GPO3_PL_7,
- ZYNQMP_PM_RESET_GPO3_PL_8,
- ZYNQMP_PM_RESET_GPO3_PL_9,
- ZYNQMP_PM_RESET_GPO3_PL_10,
- ZYNQMP_PM_RESET_GPO3_PL_11,
- ZYNQMP_PM_RESET_GPO3_PL_12,
- ZYNQMP_PM_RESET_GPO3_PL_13,
- ZYNQMP_PM_RESET_GPO3_PL_14,
- ZYNQMP_PM_RESET_GPO3_PL_15,
- ZYNQMP_PM_RESET_GPO3_PL_16,
- ZYNQMP_PM_RESET_GPO3_PL_17,
- ZYNQMP_PM_RESET_GPO3_PL_18,
- ZYNQMP_PM_RESET_GPO3_PL_19,
- ZYNQMP_PM_RESET_GPO3_PL_20,
- ZYNQMP_PM_RESET_GPO3_PL_21,
- ZYNQMP_PM_RESET_GPO3_PL_22,
- ZYNQMP_PM_RESET_GPO3_PL_23,
- ZYNQMP_PM_RESET_GPO3_PL_24,
- ZYNQMP_PM_RESET_GPO3_PL_25,
- ZYNQMP_PM_RESET_GPO3_PL_26,
- ZYNQMP_PM_RESET_GPO3_PL_27,
- ZYNQMP_PM_RESET_GPO3_PL_28,
- ZYNQMP_PM_RESET_GPO3_PL_29,
- ZYNQMP_PM_RESET_GPO3_PL_30,
- ZYNQMP_PM_RESET_GPO3_PL_31,
- ZYNQMP_PM_RESET_RPU_LS,
- ZYNQMP_PM_RESET_PS_ONLY,
- ZYNQMP_PM_RESET_PL,
- ZYNQMP_PM_RESET_PS_PL0,
- ZYNQMP_PM_RESET_PS_PL1,
- ZYNQMP_PM_RESET_PS_PL2,
- ZYNQMP_PM_RESET_PS_PL3,
+ ZYNQMP_PM_RESET_PCIE_BRIDGE = 1001,
+ ZYNQMP_PM_RESET_PCIE_CTRL = 1002,
+ ZYNQMP_PM_RESET_DP = 1003,
+ ZYNQMP_PM_RESET_SWDT_CRF = 1004,
+ ZYNQMP_PM_RESET_AFI_FM5 = 1005,
+ ZYNQMP_PM_RESET_AFI_FM4 = 1006,
+ ZYNQMP_PM_RESET_AFI_FM3 = 1007,
+ ZYNQMP_PM_RESET_AFI_FM2 = 1008,
+ ZYNQMP_PM_RESET_AFI_FM1 = 1009,
+ ZYNQMP_PM_RESET_AFI_FM0 = 1010,
+ ZYNQMP_PM_RESET_GDMA = 1011,
+ ZYNQMP_PM_RESET_GPU_PP1 = 1012,
+ ZYNQMP_PM_RESET_GPU_PP0 = 1013,
+ ZYNQMP_PM_RESET_GPU = 1014,
+ ZYNQMP_PM_RESET_GT = 1015,
+ ZYNQMP_PM_RESET_SATA = 1016,
+ ZYNQMP_PM_RESET_ACPU3_PWRON = 1017,
+ ZYNQMP_PM_RESET_ACPU2_PWRON = 1018,
+ ZYNQMP_PM_RESET_ACPU1_PWRON = 1019,
+ ZYNQMP_PM_RESET_ACPU0_PWRON = 1020,
+ ZYNQMP_PM_RESET_APU_L2 = 1021,
+ ZYNQMP_PM_RESET_ACPU3 = 1022,
+ ZYNQMP_PM_RESET_ACPU2 = 1023,
+ ZYNQMP_PM_RESET_ACPU1 = 1024,
+ ZYNQMP_PM_RESET_ACPU0 = 1025,
+ ZYNQMP_PM_RESET_DDR = 1026,
+ ZYNQMP_PM_RESET_APM_FPD = 1027,
+ ZYNQMP_PM_RESET_SOFT = 1028,
+ ZYNQMP_PM_RESET_GEM0 = 1029,
+ ZYNQMP_PM_RESET_GEM1 = 1030,
+ ZYNQMP_PM_RESET_GEM2 = 1031,
+ ZYNQMP_PM_RESET_GEM3 = 1032,
+ ZYNQMP_PM_RESET_QSPI = 1033,
+ ZYNQMP_PM_RESET_UART0 = 1034,
+ ZYNQMP_PM_RESET_UART1 = 1035,
+ ZYNQMP_PM_RESET_SPI0 = 1036,
+ ZYNQMP_PM_RESET_SPI1 = 1037,
+ ZYNQMP_PM_RESET_SDIO0 = 1038,
+ ZYNQMP_PM_RESET_SDIO1 = 1039,
+ ZYNQMP_PM_RESET_CAN0 = 1040,
+ ZYNQMP_PM_RESET_CAN1 = 1041,
+ ZYNQMP_PM_RESET_I2C0 = 1042,
+ ZYNQMP_PM_RESET_I2C1 = 1043,
+ ZYNQMP_PM_RESET_TTC0 = 1044,
+ ZYNQMP_PM_RESET_TTC1 = 1045,
+ ZYNQMP_PM_RESET_TTC2 = 1046,
+ ZYNQMP_PM_RESET_TTC3 = 1047,
+ ZYNQMP_PM_RESET_SWDT_CRL = 1048,
+ ZYNQMP_PM_RESET_NAND = 1049,
+ ZYNQMP_PM_RESET_ADMA = 1050,
+ ZYNQMP_PM_RESET_GPIO = 1051,
+ ZYNQMP_PM_RESET_IOU_CC = 1052,
+ ZYNQMP_PM_RESET_TIMESTAMP = 1053,
+ ZYNQMP_PM_RESET_RPU_R50 = 1054,
+ ZYNQMP_PM_RESET_RPU_R51 = 1055,
+ ZYNQMP_PM_RESET_RPU_AMBA = 1056,
+ ZYNQMP_PM_RESET_OCM = 1057,
+ ZYNQMP_PM_RESET_RPU_PGE = 1058,
+ ZYNQMP_PM_RESET_USB0_CORERESET = 1059,
+ ZYNQMP_PM_RESET_USB1_CORERESET = 1060,
+ ZYNQMP_PM_RESET_USB0_HIBERRESET = 1061,
+ ZYNQMP_PM_RESET_USB1_HIBERRESET = 1062,
+ ZYNQMP_PM_RESET_USB0_APB = 1063,
+ ZYNQMP_PM_RESET_USB1_APB = 1064,
+ ZYNQMP_PM_RESET_IPI = 1065,
+ ZYNQMP_PM_RESET_APM_LPD = 1066,
+ ZYNQMP_PM_RESET_RTC = 1067,
+ ZYNQMP_PM_RESET_SYSMON = 1068,
+ ZYNQMP_PM_RESET_AFI_FM6 = 1069,
+ ZYNQMP_PM_RESET_LPD_SWDT = 1070,
+ ZYNQMP_PM_RESET_FPD = 1071,
+ ZYNQMP_PM_RESET_RPU_DBG1 = 1072,
+ ZYNQMP_PM_RESET_RPU_DBG0 = 1073,
+ ZYNQMP_PM_RESET_DBG_LPD = 1074,
+ ZYNQMP_PM_RESET_DBG_FPD = 1075,
+ ZYNQMP_PM_RESET_APLL = 1076,
+ ZYNQMP_PM_RESET_DPLL = 1077,
+ ZYNQMP_PM_RESET_VPLL = 1078,
+ ZYNQMP_PM_RESET_IOPLL = 1079,
+ ZYNQMP_PM_RESET_RPLL = 1080,
+ ZYNQMP_PM_RESET_GPO3_PL_0 = 1081,
+ ZYNQMP_PM_RESET_GPO3_PL_1 = 1082,
+ ZYNQMP_PM_RESET_GPO3_PL_2 = 1083,
+ ZYNQMP_PM_RESET_GPO3_PL_3 = 1084,
+ ZYNQMP_PM_RESET_GPO3_PL_4 = 1085,
+ ZYNQMP_PM_RESET_GPO3_PL_5 = 1086,
+ ZYNQMP_PM_RESET_GPO3_PL_6 = 1087,
+ ZYNQMP_PM_RESET_GPO3_PL_7 = 1088,
+ ZYNQMP_PM_RESET_GPO3_PL_8 = 1089,
+ ZYNQMP_PM_RESET_GPO3_PL_9 = 1090,
+ ZYNQMP_PM_RESET_GPO3_PL_10 = 1091,
+ ZYNQMP_PM_RESET_GPO3_PL_11 = 1092,
+ ZYNQMP_PM_RESET_GPO3_PL_12 = 1093,
+ ZYNQMP_PM_RESET_GPO3_PL_13 = 1094,
+ ZYNQMP_PM_RESET_GPO3_PL_14 = 1095,
+ ZYNQMP_PM_RESET_GPO3_PL_15 = 1096,
+ ZYNQMP_PM_RESET_GPO3_PL_16 = 1097,
+ ZYNQMP_PM_RESET_GPO3_PL_17 = 1098,
+ ZYNQMP_PM_RESET_GPO3_PL_18 = 1099,
+ ZYNQMP_PM_RESET_GPO3_PL_19 = 1100,
+ ZYNQMP_PM_RESET_GPO3_PL_20 = 1101,
+ ZYNQMP_PM_RESET_GPO3_PL_21 = 1102,
+ ZYNQMP_PM_RESET_GPO3_PL_22 = 1103,
+ ZYNQMP_PM_RESET_GPO3_PL_23 = 1104,
+ ZYNQMP_PM_RESET_GPO3_PL_24 = 1105,
+ ZYNQMP_PM_RESET_GPO3_PL_25 = 1106,
+ ZYNQMP_PM_RESET_GPO3_PL_26 = 1107,
+ ZYNQMP_PM_RESET_GPO3_PL_27 = 1108,
+ ZYNQMP_PM_RESET_GPO3_PL_28 = 1109,
+ ZYNQMP_PM_RESET_GPO3_PL_29 = 1110,
+ ZYNQMP_PM_RESET_GPO3_PL_30 = 1111,
+ ZYNQMP_PM_RESET_GPO3_PL_31 = 1112,
+ ZYNQMP_PM_RESET_RPU_LS = 1113,
+ ZYNQMP_PM_RESET_PS_ONLY = 1114,
+ ZYNQMP_PM_RESET_PL = 1115,
+ ZYNQMP_PM_RESET_PS_PL0 = 1116,
+ ZYNQMP_PM_RESET_PS_PL1 = 1117,
+ ZYNQMP_PM_RESET_PS_PL2 = 1118,
+ ZYNQMP_PM_RESET_PS_PL3 = 1119,
ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3
};
enum zynqmp_pm_suspend_reason {
SUSPEND_POWER_REQUEST = 201,
- SUSPEND_ALERT,
- SUSPEND_SYSTEM_SHUTDOWN,
+ SUSPEND_ALERT = 202,
+ SUSPEND_SYSTEM_SHUTDOWN = 203,
};
enum zynqmp_pm_request_ack {
ZYNQMP_PM_REQUEST_ACK_NO = 1,
- ZYNQMP_PM_REQUEST_ACK_BLOCKING,
- ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING = 2,
+ ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING = 3,
};
enum pm_node_id {
NODE_SD_0 = 39,
- NODE_SD_1,
+ NODE_SD_1 = 40,
};
enum tap_delay_type {
PM_TAPDELAY_INPUT = 0,
- PM_TAPDELAY_OUTPUT,
+ PM_TAPDELAY_OUTPUT = 1,
};
enum dll_reset_type {
- PM_DLL_RESET_ASSERT,
- PM_DLL_RESET_RELEASE,
- PM_DLL_RESET_PULSE,
+ PM_DLL_RESET_ASSERT = 0,
+ PM_DLL_RESET_RELEASE = 1,
+ PM_DLL_RESET_PULSE = 2,
};
enum zynqmp_pm_shutdown_type {
- ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN,
- ZYNQMP_PM_SHUTDOWN_TYPE_RESET,
- ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY,
+ ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN = 0,
+ ZYNQMP_PM_SHUTDOWN_TYPE_RESET = 1,
+ ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY = 2,
};
enum zynqmp_pm_shutdown_subtype {
- ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM,
- ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY,
- ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM = 0,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY = 1,
+ ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM = 2,
};
/**
diff --git a/include/linux/fixp-arith.h b/include/linux/fixp-arith.h
index 8396013785ef..281cb4f83dbe 100644
--- a/include/linux/fixp-arith.h
+++ b/include/linux/fixp-arith.h
@@ -141,4 +141,23 @@ static inline s32 fixp_sin32_rad(u32 radians, u32 twopi)
#define fixp_cos32_rad(rad, twopi) \
fixp_sin32_rad(rad + twopi / 4, twopi)
+/**
+ * fixp_linear_interpolate() - interpolates a value from two known points
+ *
+ * @x0: x value of point 0
+ * @y0: y value of point 0
+ * @x1: x value of point 1
+ * @y1: y value of point 1
+ * @x: the linear interpolant
+ */
+static inline int fixp_linear_interpolate(int x0, int y0, int x1, int y1, int x)
+{
+ if (y0 == y1 || x == x0)
+ return y0;
+ if (x1 == x0 || x == x1)
+ return y1;
+
+ return y0 + ((y1 - y0) * (x - x0) / (x1 - x0));
+}
+
#endif
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
new file mode 100644
index 000000000000..c1be37437e77
--- /dev/null
+++ b/include/linux/fortify-string.h
@@ -0,0 +1,302 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FORTIFY_STRING_H_
+#define _LINUX_FORTIFY_STRING_H_
+
+
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
+extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
+extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
+extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
+extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
+extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
+extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
+#else
+#define __underlying_memchr __builtin_memchr
+#define __underlying_memcmp __builtin_memcmp
+#define __underlying_memcpy __builtin_memcpy
+#define __underlying_memmove __builtin_memmove
+#define __underlying_memset __builtin_memset
+#define __underlying_strcat __builtin_strcat
+#define __underlying_strcpy __builtin_strcpy
+#define __underlying_strlen __builtin_strlen
+#define __underlying_strncat __builtin_strncat
+#define __underlying_strncpy __builtin_strncpy
+#endif
+
+__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 1);
+
+ if (__builtin_constant_p(size) && p_size < size)
+ __write_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __underlying_strncpy(p, q, size);
+}
+
+__FORTIFY_INLINE char *strcat(char *p, const char *q)
+{
+ size_t p_size = __builtin_object_size(p, 1);
+
+ if (p_size == (size_t)-1)
+ return __underlying_strcat(p, q);
+ if (strlcat(p, q, p_size) >= p_size)
+ fortify_panic(__func__);
+ return p;
+}
+
+__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
+{
+ __kernel_size_t ret;
+ size_t p_size = __builtin_object_size(p, 1);
+
+ /* Work around gcc excess stack consumption issue */
+ if (p_size == (size_t)-1 ||
+ (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
+ return __underlying_strlen(p);
+ ret = strnlen(p, p_size);
+ if (p_size <= ret)
+ fortify_panic(__func__);
+ return ret;
+}
+
+extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
+__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
+{
+ size_t p_size = __builtin_object_size(p, 1);
+ __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
+
+ if (p_size <= ret && maxlen != ret)
+ fortify_panic(__func__);
+ return ret;
+}
+
+/* defined after fortified strlen to reuse it */
+extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
+__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
+{
+ size_t ret;
+ size_t p_size = __builtin_object_size(p, 1);
+ size_t q_size = __builtin_object_size(q, 1);
+
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __real_strlcpy(p, q, size);
+ ret = strlen(q);
+ if (size) {
+ size_t len = (ret >= size) ? size - 1 : ret;
+
+ if (__builtin_constant_p(len) && len >= p_size)
+ __write_overflow();
+ if (len >= p_size)
+ fortify_panic(__func__);
+ __underlying_memcpy(p, q, len);
+ p[len] = '\0';
+ }
+ return ret;
+}
+
+/* defined after fortified strnlen to reuse it */
+extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
+__FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size)
+{
+ size_t len;
+ /* Use string size rather than possible enclosing struct size. */
+ size_t p_size = __builtin_object_size(p, 1);
+ size_t q_size = __builtin_object_size(q, 1);
+
+ /* If we cannot get size of p and q default to call strscpy. */
+ if (p_size == (size_t) -1 && q_size == (size_t) -1)
+ return __real_strscpy(p, q, size);
+
+ /*
+ * If size can be known at compile time and is greater than
+ * p_size, generate a compile time write overflow error.
+ */
+ if (__builtin_constant_p(size) && size > p_size)
+ __write_overflow();
+
+ /*
+ * This call protects from read overflow, because len will default to q
+ * length if it smaller than size.
+ */
+ len = strnlen(q, size);
+ /*
+ * If len equals size, we will copy only size bytes which leads to
+ * -E2BIG being returned.
+ * Otherwise we will copy len + 1 because of the final '\O'.
+ */
+ len = len == size ? size : len + 1;
+
+ /*
+ * Generate a runtime write overflow error if len is greater than
+ * p_size.
+ */
+ if (len > p_size)
+ fortify_panic(__func__);
+
+ /*
+ * We can now safely call vanilla strscpy because we are protected from:
+ * 1. Read overflow thanks to call to strnlen().
+ * 2. Write overflow thanks to above ifs.
+ */
+ return __real_strscpy(p, q, len);
+}
+
+/* defined after fortified strlen and strnlen to reuse them */
+__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
+{
+ size_t p_len, copy_len;
+ size_t p_size = __builtin_object_size(p, 1);
+ size_t q_size = __builtin_object_size(q, 1);
+
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __underlying_strncat(p, q, count);
+ p_len = strlen(p);
+ copy_len = strnlen(q, count);
+ if (p_size < p_len + copy_len + 1)
+ fortify_panic(__func__);
+ __underlying_memcpy(p + p_len, q, copy_len);
+ p[p_len + copy_len] = '\0';
+ return p;
+}
+
+__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+
+ if (__builtin_constant_p(size) && p_size < size)
+ __write_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __underlying_memset(p, c, size);
+}
+
+__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __write_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __underlying_memcpy(p, q, size);
+}
+
+__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __write_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __underlying_memmove(p, q, size);
+}
+
+extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
+__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_memscan(p, c, size);
+}
+
+__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __read_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __underlying_memcmp(p, q, size);
+}
+
+__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __underlying_memchr(p, c, size);
+}
+
+void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
+__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_memchr_inv(p, c, size);
+}
+
+extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
+__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_kmemdup(p, size, gfp);
+}
+
+/* defined after fortified strlen and memcpy to reuse them */
+__FORTIFY_INLINE char *strcpy(char *p, const char *q)
+{
+ size_t p_size = __builtin_object_size(p, 1);
+ size_t q_size = __builtin_object_size(q, 1);
+ size_t size;
+
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __underlying_strcpy(p, q);
+ size = strlen(q) + 1;
+ /* test here to use the more stringent object size */
+ if (p_size < size)
+ fortify_panic(__func__);
+ memcpy(p, q, size);
+ return p;
+}
+
+/* Don't use these outside the FORITFY_SOURCE implementation */
+#undef __underlying_memchr
+#undef __underlying_memcmp
+#undef __underlying_memcpy
+#undef __underlying_memmove
+#undef __underlying_memset
+#undef __underlying_strcat
+#undef __underlying_strcpy
+#undef __underlying_strlen
+#undef __underlying_strncat
+#undef __underlying_strncpy
+
+#endif /* _LINUX_FORTIFY_STRING_H_ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index fd47deea7c17..ec8f3ddf4a6a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -39,6 +39,8 @@
#include <linux/fs_types.h>
#include <linux/build_bug.h>
#include <linux/stddef.h>
+#include <linux/mount.h>
+#include <linux/cred.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
@@ -1572,6 +1574,52 @@ static inline void i_gid_write(struct inode *inode, gid_t gid)
inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
}
+static inline kuid_t kuid_into_mnt(struct user_namespace *mnt_userns,
+ kuid_t kuid)
+{
+ return make_kuid(mnt_userns, __kuid_val(kuid));
+}
+
+static inline kgid_t kgid_into_mnt(struct user_namespace *mnt_userns,
+ kgid_t kgid)
+{
+ return make_kgid(mnt_userns, __kgid_val(kgid));
+}
+
+static inline kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns,
+ const struct inode *inode)
+{
+ return kuid_into_mnt(mnt_userns, inode->i_uid);
+}
+
+static inline kgid_t i_gid_into_mnt(struct user_namespace *mnt_userns,
+ const struct inode *inode)
+{
+ return kgid_into_mnt(mnt_userns, inode->i_gid);
+}
+
+static inline kuid_t kuid_from_mnt(struct user_namespace *mnt_userns,
+ kuid_t kuid)
+{
+ return KUIDT_INIT(from_kuid(mnt_userns, kuid));
+}
+
+static inline kgid_t kgid_from_mnt(struct user_namespace *mnt_userns,
+ kgid_t kgid)
+{
+ return KGIDT_INIT(from_kgid(mnt_userns, kgid));
+}
+
+static inline kuid_t fsuid_into_mnt(struct user_namespace *mnt_userns)
+{
+ return kuid_from_mnt(mnt_userns, current_fsuid());
+}
+
+static inline kgid_t fsgid_into_mnt(struct user_namespace *mnt_userns)
+{
+ return kgid_from_mnt(mnt_userns, current_fsgid());
+}
+
extern struct timespec64 current_time(struct inode *inode);
/*
@@ -1714,28 +1762,48 @@ static inline bool sb_start_intwrite_trylock(struct super_block *sb)
return __sb_start_write_trylock(sb, SB_FREEZE_FS);
}
-
-extern bool inode_owner_or_capable(const struct inode *inode);
+bool inode_owner_or_capable(struct user_namespace *mnt_userns,
+ const struct inode *inode);
/*
* VFS helper functions..
*/
-extern int vfs_create(struct inode *, struct dentry *, umode_t, bool);
-extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
-extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
-extern int vfs_symlink(struct inode *, struct dentry *, const char *);
-extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **);
-extern int vfs_rmdir(struct inode *, struct dentry *);
-extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
-extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
+int vfs_create(struct user_namespace *, struct inode *,
+ struct dentry *, umode_t, bool);
+int vfs_mkdir(struct user_namespace *, struct inode *,
+ struct dentry *, umode_t);
+int vfs_mknod(struct user_namespace *, struct inode *, struct dentry *,
+ umode_t, dev_t);
+int vfs_symlink(struct user_namespace *, struct inode *,
+ struct dentry *, const char *);
+int vfs_link(struct dentry *, struct user_namespace *, struct inode *,
+ struct dentry *, struct inode **);
+int vfs_rmdir(struct user_namespace *, struct inode *, struct dentry *);
+int vfs_unlink(struct user_namespace *, struct inode *, struct dentry *,
+ struct inode **);
+
+struct renamedata {
+ struct user_namespace *old_mnt_userns;
+ struct inode *old_dir;
+ struct dentry *old_dentry;
+ struct user_namespace *new_mnt_userns;
+ struct inode *new_dir;
+ struct dentry *new_dentry;
+ struct inode **delegated_inode;
+ unsigned int flags;
+} __randomize_layout;
-static inline int vfs_whiteout(struct inode *dir, struct dentry *dentry)
+int vfs_rename(struct renamedata *);
+
+static inline int vfs_whiteout(struct user_namespace *mnt_userns,
+ struct inode *dir, struct dentry *dentry)
{
- return vfs_mknod(dir, dentry, S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
+ return vfs_mknod(mnt_userns, dir, dentry, S_IFCHR | WHITEOUT_MODE,
+ WHITEOUT_DEV);
}
-extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode,
- int open_flag);
+struct dentry *vfs_tmpfile(struct user_namespace *mnt_userns,
+ struct dentry *dentry, umode_t mode, int open_flag);
int vfs_mkobj(struct dentry *, umode_t,
int (*f)(struct dentry *, umode_t, void *),
@@ -1757,8 +1825,8 @@ extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
/*
* VFS file helper functions.
*/
-extern void inode_init_owner(struct inode *inode, const struct inode *dir,
- umode_t mode);
+void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode,
+ const struct inode *dir, umode_t mode);
extern bool may_open_dev(const struct path *path);
/*
@@ -1862,22 +1930,28 @@ struct file_operations {
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
- int (*permission) (struct inode *, int);
+ int (*permission) (struct user_namespace *, struct inode *, int);
struct posix_acl * (*get_acl)(struct inode *, int);
int (*readlink) (struct dentry *, char __user *,int);
- int (*create) (struct inode *,struct dentry *, umode_t, bool);
+ int (*create) (struct user_namespace *, struct inode *,struct dentry *,
+ umode_t, bool);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
- int (*symlink) (struct inode *,struct dentry *,const char *);
- int (*mkdir) (struct inode *,struct dentry *,umode_t);
+ int (*symlink) (struct user_namespace *, struct inode *,struct dentry *,
+ const char *);
+ int (*mkdir) (struct user_namespace *, struct inode *,struct dentry *,
+ umode_t);
int (*rmdir) (struct inode *,struct dentry *);
- int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
- int (*rename) (struct inode *, struct dentry *,
+ int (*mknod) (struct user_namespace *, struct inode *,struct dentry *,
+ umode_t,dev_t);
+ int (*rename) (struct user_namespace *, struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
- int (*setattr) (struct dentry *, struct iattr *);
- int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
+ int (*setattr) (struct user_namespace *, struct dentry *,
+ struct iattr *);
+ int (*getattr) (struct user_namespace *, const struct path *,
+ struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
@@ -1885,8 +1959,10 @@ struct inode_operations {
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
umode_t create_mode);
- int (*tmpfile) (struct inode *, struct dentry *, umode_t);
- int (*set_acl)(struct inode *, struct posix_acl *, int);
+ int (*tmpfile) (struct user_namespace *, struct inode *,
+ struct dentry *, umode_t);
+ int (*set_acl)(struct user_namespace *, struct inode *,
+ struct posix_acl *, int);
} ____cacheline_aligned;
static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio,
@@ -2035,9 +2111,11 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
-static inline bool HAS_UNMAPPED_ID(struct inode *inode)
+static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns,
+ struct inode *inode)
{
- return !uid_valid(inode->i_uid) || !gid_valid(inode->i_gid);
+ return !uid_valid(i_uid_into_mnt(mnt_userns, inode)) ||
+ !gid_valid(i_gid_into_mnt(mnt_userns, inode));
}
static inline enum rw_hint file_write_hint(struct file *file)
@@ -2084,8 +2162,8 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
/*
* Inode state bits. Protected by inode->i_lock
*
- * Three bits determine the dirty state of the inode, I_DIRTY_SYNC,
- * I_DIRTY_DATASYNC and I_DIRTY_PAGES.
+ * Four bits determine the dirty state of the inode: I_DIRTY_SYNC,
+ * I_DIRTY_DATASYNC, I_DIRTY_PAGES, and I_DIRTY_TIME.
*
* Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
* until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
@@ -2094,12 +2172,20 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
* Two bits are used for locking and completion notification, I_NEW and I_SYNC.
*
* I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
- * fdatasync(). i_atime is the usual cause.
- * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
+ * fdatasync() (unless I_DIRTY_DATASYNC is also set).
+ * Timestamp updates are the usual cause.
+ * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
* these changes separately from I_DIRTY_SYNC so that we
* don't have to write inode on fdatasync() when only
- * mtime has changed in it.
+ * e.g. the timestamps have changed.
* I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
+ * I_DIRTY_TIME The inode itself only has dirty timestamps, and the
+ * lazytime mount option is enabled. We keep track of this
+ * separately from I_DIRTY_SYNC in order to implement
+ * lazytime. This gets cleared if I_DIRTY_INODE
+ * (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. I.e.
+ * either I_DIRTY_TIME *or* I_DIRTY_INODE can be set in
+ * i_state, but not both. I_DIRTY_PAGES may still be set.
* I_NEW Serves as both a mutex and completion notification.
* New inodes set I_NEW. If two processes both create
* the same inode, one of them will release its inode and
@@ -2186,6 +2272,21 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
__mark_inode_dirty(inode, I_DIRTY_SYNC);
}
+/*
+ * Returns true if the given inode itself only has dirty timestamps (its pages
+ * may still be dirty) and isn't currently being allocated or freed.
+ * Filesystems should call this if when writing an inode when lazytime is
+ * enabled, they want to opportunistically write the timestamps of other inodes
+ * located very nearby on-disk, e.g. in the same inode block. This returns true
+ * if the given inode is in need of such an opportunistic update. Requires
+ * i_lock, or at least later re-checking under i_lock.
+ */
+static inline bool inode_is_dirtytime_only(struct inode *inode)
+{
+ return (inode->i_state & (I_DIRTY_TIME | I_NEW |
+ I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME;
+}
+
extern void inc_nlink(struct inode *inode);
extern void drop_nlink(struct inode *inode);
extern void clear_nlink(struct inode *inode);
@@ -2231,6 +2332,7 @@ struct file_system_type {
#define FS_HAS_SUBTYPE 4
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
+#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
#define FS_THP_SUPPORT 8192 /* Remove once all fs converted */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
@@ -2517,9 +2619,13 @@ struct filename {
};
static_assert(offsetof(struct filename, iname) % sizeof(long) == 0);
+static inline struct user_namespace *file_mnt_user_ns(struct file *file)
+{
+ return mnt_user_ns(file->f_path.mnt);
+}
extern long vfs_truncate(const struct path *, loff_t);
-extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
- struct file *filp);
+int do_truncate(struct user_namespace *, struct dentry *, loff_t start,
+ unsigned int time_attrs, struct file *filp);
extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern long do_sys_open(int dfd, const char __user *filename, int flags,
@@ -2756,10 +2862,22 @@ static inline int bmap(struct inode *inode, sector_t *block)
}
#endif
-extern int notify_change(struct dentry *, struct iattr *, struct inode **);
-extern int inode_permission(struct inode *, int);
-extern int generic_permission(struct inode *, int);
-extern int __check_sticky(struct inode *dir, struct inode *inode);
+int notify_change(struct user_namespace *, struct dentry *,
+ struct iattr *, struct inode **);
+int inode_permission(struct user_namespace *, struct inode *, int);
+int generic_permission(struct user_namespace *, struct inode *, int);
+static inline int file_permission(struct file *file, int mask)
+{
+ return inode_permission(file_mnt_user_ns(file),
+ file_inode(file), mask);
+}
+static inline int path_permission(const struct path *path, int mask)
+{
+ return inode_permission(mnt_user_ns(path->mnt),
+ d_inode(path->dentry), mask);
+}
+int __check_sticky(struct user_namespace *mnt_userns, struct inode *dir,
+ struct inode *inode);
static inline bool execute_ok(struct inode *inode)
{
@@ -2962,8 +3080,8 @@ extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
extern int generic_write_check_limits(struct file *file, loff_t pos,
loff_t *count);
extern int generic_file_rw_checks(struct file *file_in, struct file *file_out);
-extern ssize_t generic_file_buffered_read(struct kiocb *iocb,
- struct iov_iter *to, ssize_t already_read);
+ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *to,
+ ssize_t already_read);
extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
@@ -3090,7 +3208,7 @@ extern int __page_symlink(struct inode *inode, const char *symname, int len,
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
-extern void generic_fillattr(struct inode *, struct kstat *);
+void generic_fillattr(struct user_namespace *, struct inode *, struct kstat *);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
@@ -3140,15 +3258,18 @@ extern int dcache_dir_open(struct inode *, struct file *);
extern int dcache_dir_close(struct inode *, struct file *);
extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
extern int dcache_readdir(struct file *, struct dir_context *);
-extern int simple_setattr(struct dentry *, struct iattr *);
-extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int);
+extern int simple_setattr(struct user_namespace *, struct dentry *,
+ struct iattr *);
+extern int simple_getattr(struct user_namespace *, const struct path *,
+ struct kstat *, u32, unsigned int);
extern int simple_statfs(struct dentry *, struct kstatfs *);
extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
-extern int simple_rename(struct inode *, struct dentry *,
- struct inode *, struct dentry *, unsigned int);
+extern int simple_rename(struct user_namespace *, struct inode *,
+ struct dentry *, struct inode *, struct dentry *,
+ unsigned int);
extern void simple_recursive_removal(struct dentry *,
void (*callback)(struct dentry *));
extern int noop_fsync(struct file *, loff_t, loff_t, int);
@@ -3192,11 +3313,6 @@ extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_check_addressable(unsigned, u64);
-#ifdef CONFIG_UNICODE
-extern int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str);
-extern int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
- const char *str, const struct qstr *name);
-#endif
extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry);
#ifdef CONFIG_MIGRATION
@@ -3211,9 +3327,10 @@ extern int buffer_migrate_page_norefs(struct address_space *,
#define buffer_migrate_page_norefs NULL
#endif
-extern int setattr_prepare(struct dentry *, struct iattr *);
+int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
-extern void setattr_copy(struct inode *inode, const struct iattr *attr);
+void setattr_copy(struct user_namespace *, struct inode *inode,
+ const struct iattr *attr);
extern int file_update_time(struct file *file);
@@ -3377,12 +3494,13 @@ static inline bool is_sxid(umode_t mode)
return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
}
-static inline int check_sticky(struct inode *dir, struct inode *inode)
+static inline int check_sticky(struct user_namespace *mnt_userns,
+ struct inode *dir, struct inode *inode)
{
if (!(dir->i_mode & S_ISVTX))
return 0;
- return __check_sticky(dir, inode);
+ return __check_sticky(mnt_userns, dir, inode);
}
static inline void inode_has_no_xattr(struct inode *inode)
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index db244874e834..63b56aba925a 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
+#include <uapi/linux/fsl_mc.h>
#define FSL_MC_VENDOR_FREESCALE 0x1957
@@ -209,8 +210,6 @@ struct fsl_mc_device {
#define to_fsl_mc_device(_dev) \
container_of(_dev, struct fsl_mc_device, dev)
-#define MC_CMD_NUM_OF_PARAMS 7
-
struct mc_cmd_header {
u8 src_id;
u8 flags_hw;
@@ -220,11 +219,6 @@ struct mc_cmd_header {
__le16 cmd_id;
};
-struct fsl_mc_command {
- __le64 header;
- __le64 params[MC_CMD_NUM_OF_PARAMS];
-};
-
enum mc_cmd_status {
MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index a2e42d3cd87c..e5409b83e731 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -470,6 +470,7 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
/* create a new group */
extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops);
+extern struct fsnotify_group *fsnotify_alloc_user_group(const struct fsnotify_ops *ops);
/* get reference to a group */
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index c1144a450392..b568b3c7d095 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -138,6 +138,10 @@ int fsverity_file_open(struct inode *inode, struct file *filp);
int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr);
void fsverity_cleanup_inode(struct inode *inode);
+/* read_metadata.c */
+
+int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg);
+
/* verify.c */
bool fsverity_verify_page(struct page *page);
@@ -183,6 +187,14 @@ static inline void fsverity_cleanup_inode(struct inode *inode)
{
}
+/* read_metadata.c */
+
+static inline int fsverity_ioctl_read_metadata(struct file *filp,
+ const void __user *uarg)
+{
+ return -EOPNOTSUPP;
+}
+
/* verify.c */
static inline bool fsverity_verify_page(struct page *page)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 9a8ce28e4485..86e5028bfa20 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -485,7 +485,6 @@ struct dyn_ftrace {
struct dyn_arch_ftrace arch;
};
-int ftrace_force_update(void);
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset);
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
@@ -740,7 +739,6 @@ extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
#else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; }
-static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_module_init(struct module *mod) { }
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index fde4ad97564c..ed4e67a7ff1c 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/err.h>
struct fwnode_operations;
struct device;
@@ -18,9 +19,13 @@ struct device;
/*
* fwnode link flags
*
- * LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
+ * LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
+ * NOT_DEVICE: The fwnode will never be populated as a struct device.
+ * INITIALIZED: The hardware corresponding to fwnode has been initialized.
*/
#define FWNODE_FLAG_LINKS_ADDED BIT(0)
+#define FWNODE_FLAG_NOT_DEVICE BIT(1)
+#define FWNODE_FLAG_INITIALIZED BIT(2)
struct fwnode_handle {
struct fwnode_handle *secondary;
@@ -50,6 +55,13 @@ struct fwnode_endpoint {
const struct fwnode_handle *local_fwnode;
};
+/*
+ * ports and endpoints defined as software_nodes should all follow a common
+ * naming scheme; use these macros to ensure commonality.
+ */
+#define SWNODE_GRAPH_PORT_NAME_FMT "port@%u"
+#define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u"
+
#define NR_FWNODE_REFERENCE_ARGS 8
/**
@@ -159,7 +171,20 @@ static inline void fwnode_init(struct fwnode_handle *fwnode,
INIT_LIST_HEAD(&fwnode->suppliers);
}
+static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode,
+ bool initialized)
+{
+ if (IS_ERR_OR_NULL(fwnode))
+ return;
+
+ if (initialized)
+ fwnode->flags |= FWNODE_FLAG_INITIALIZED;
+ else
+ fwnode->flags &= ~FWNODE_FLAG_INITIALIZED;
+}
+
extern u32 fw_devlink_get_flags(void);
+extern bool fw_devlink_is_strict(void);
int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup);
void fwnode_links_purge(struct fwnode_handle *fwnode);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 809aaa32d53c..f364619092cc 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -32,6 +32,7 @@ extern struct class block_class;
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/workqueue.h>
+#include <linux/xarray.h>
#define PARTITION_META_INFO_VOLNAMELTH 64
/*
@@ -116,13 +117,6 @@ enum {
DISK_EVENT_FLAG_UEVENT = 1 << 1,
};
-struct disk_part_tbl {
- struct rcu_head rcu_head;
- int len;
- struct block_device __rcu *last_lookup;
- struct block_device __rcu *part[];
-};
-
struct disk_events;
struct badblocks;
@@ -148,12 +142,7 @@ struct gendisk {
unsigned short events; /* supported events */
unsigned short event_flags; /* flags related to event processing */
- /* Array of pointers to partitions indexed by partno.
- * Protected with matching bdev lock but stat and other
- * non-critical accesses use RCU. Always access through
- * helpers.
- */
- struct disk_part_tbl __rcu *part_tbl;
+ struct xarray part_tbl;
struct block_device *part0;
const struct block_device_operations *fops;
@@ -163,6 +152,7 @@ struct gendisk {
int flags;
unsigned long state;
#define GD_NEED_PART_SCAN 0
+#define GD_READ_ONLY 1
struct kobject *slave_dir;
struct timer_rand_state *random;
@@ -212,10 +202,11 @@ static inline dev_t disk_devt(struct gendisk *disk)
return MKDEV(disk->major, disk->first_minor);
}
+void disk_uevent(struct gendisk *disk, enum kobject_action action);
+
/*
* Smarter partition iterator without context limits.
*/
-#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
@@ -223,7 +214,7 @@ static inline dev_t disk_devt(struct gendisk *disk)
struct disk_part_iter {
struct gendisk *disk;
struct block_device *part;
- int idx;
+ unsigned long idx;
unsigned int flags;
};
@@ -231,7 +222,6 @@ extern void disk_part_iter_init(struct disk_part_iter *piter,
struct gendisk *disk, unsigned int flags);
struct block_device *disk_part_iter_next(struct disk_part_iter *piter);
extern void disk_part_iter_exit(struct disk_part_iter *piter);
-extern bool disk_has_partitions(struct gendisk *disk);
/* block/genhd.c */
extern void device_add_disk(struct device *parent, struct gendisk *disk,
@@ -249,11 +239,12 @@ static inline void add_disk_no_queue_reg(struct gendisk *disk)
extern void del_gendisk(struct gendisk *gp);
extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
-extern void set_disk_ro(struct gendisk *disk, int flag);
+void set_disk_ro(struct gendisk *disk, bool read_only);
static inline int get_disk_ro(struct gendisk *disk)
{
- return disk->part0->bd_read_only;
+ return disk->part0->bd_read_only ||
+ test_bit(GD_READ_ONLY, &disk->state);
}
extern void disk_block_events(struct gendisk *disk);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 6e479e9c48ce..8572a1474e16 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -8,6 +8,20 @@
#include <linux/linkage.h>
#include <linux/topology.h>
+/* The typedef is in types.h but we want the documentation here */
+#if 0
+/**
+ * typedef gfp_t - Memory allocation flags.
+ *
+ * GFP flags are commonly used throughout Linux to indicate how memory
+ * should be allocated. The GFP acronym stands for get_free_pages(),
+ * the underlying memory allocation function. Not every GFP flag is
+ * supported by every function which may allocate memory. Most users
+ * will want to use a plain ``GFP_KERNEL``.
+ */
+typedef unsigned int __bitwise gfp_t;
+#endif
+
struct vm_area_struct;
/*
@@ -583,8 +597,16 @@ extern void free_pages(unsigned long addr, unsigned int order);
struct page_frag_cache;
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
-extern void *page_frag_alloc(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask);
+extern void *page_frag_alloc_align(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align_mask);
+
+static inline void *page_frag_alloc(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask)
+{
+ return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
+}
+
extern void page_frag_free(void *addr);
#define __free_page(page) __free_pages((page), 0)
@@ -612,6 +634,8 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
extern void pm_restrict_gfp_mask(void);
extern void pm_restore_gfp_mask(void);
+extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
+
#ifdef CONFIG_PM_SLEEP
extern bool pm_suspended_storage(void);
#else
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 781a053abbb9..d755e529c1e3 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -75,7 +75,7 @@ struct gpiod_hog {
* gpiod_get_index()
*/
#define GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, _idx, _flags) \
-{ \
+(struct gpiod_lookup) { \
.key = _key, \
.chip_hwnum = _chip_hwnum, \
.con_id = _con_id, \
@@ -87,7 +87,7 @@ struct gpiod_hog {
* Simple definition of a single GPIO hog in an array.
*/
#define GPIO_HOG(_chip_label, _chip_hwnum, _line_name, _lflags, _dflags) \
-{ \
+(struct gpiod_hog) { \
.chip_label = _chip_label, \
.chip_hwnum = _chip_hwnum, \
.line_name = _line_name, \
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 46bcef380446..763802b2b8f9 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -150,7 +150,7 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
* @info: return information about attribute after parsing report
*
* Parses report and returns the attribute information such as report id,
-* field index, units and exponet etc.
+* field index, units and exponent etc.
*/
int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
u8 type,
@@ -167,7 +167,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
* @is_signed: If true then fields < 32 bits will be sign-extended
*
* Issues a synchronous or asynchronous read request for an input attribute.
-* Returns data upto 32 bits.
+* Return: data up to 32 bits.
*/
enum sensor_hub_read_flags {
@@ -205,8 +205,9 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
* @buffer: buffer to copy output
*
* Used to get a field in feature report. For example this can get polling
-* interval, sensitivity, activate/deactivate state. On success it returns
-* number of bytes copied to buffer. On failure, it returns value < 0.
+* interval, sensitivity, activate/deactivate state.
+* Return: On success, it returns the number of bytes copied to buffer.
+* On failure, it returns value < 0.
*/
int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
u32 field_index, int buffer_size, void *buffer);
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index d82a97e311d3..3bbdbccc5805 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -128,6 +128,10 @@
#define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND 0x15
/* Common selectors */
+#define HID_USAGE_SENSOR_PROP_DESC 0x200300
+#define HID_USAGE_SENSOR_PROP_FRIENDLY_NAME 0x200301
+#define HID_USAGE_SENSOR_PROP_SERIAL_NUM 0x200307
+#define HID_USAGE_SENSOR_PROP_MANUFACTURER 0x200305
#define HID_USAGE_SENSOR_PROP_REPORT_INTERVAL 0x20030E
#define HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS 0x20030F
#define HID_USAGE_SENSOR_PROP_SENSITIVITY_RANGE_PCT 0x200310
@@ -158,4 +162,14 @@
#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x200840
#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x200841
+/* Custom Sensor (2000e1) */
+#define HID_USAGE_SENSOR_HINGE 0x20020B
+#define HID_USAGE_SENSOR_DATA_FIELD_LOCATION 0x200400
+#define HID_USAGE_SENSOR_DATA_FIELE_TIME_SINCE_SYS_BOOT 0x20052B
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_USAGE 0x200541
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE_BASE 0x200543
+/* Custom Sensor data 28=>x>=0 */
+#define HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE(x) \
+ (HID_USAGE_SENSOR_DATA_FIELD_CUSTOM_VALUE_BASE + (x))
+
#endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
index c39d71eb1fd0..ef702b3f56e3 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -918,7 +918,7 @@ __u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
/**
* hid_device_io_start - enable HID input during probe, remove
*
- * @hid - the device
+ * @hid: the device
*
* This should only be called during probe or remove and only be
* called by the thread calling probe or remove. It will allow
@@ -936,7 +936,7 @@ static inline void hid_device_io_start(struct hid_device *hid) {
/**
* hid_device_io_stop - disable HID input during probe, remove
*
- * @hid - the device
+ * @hid: the device
*
* Should only be called after hid_device_io_start. It will prevent
* incoming packets from going to the driver for the duration of
@@ -1010,6 +1010,13 @@ static inline void hid_map_usage(struct hid_input *hidinput,
/**
* hid_map_usage_clear - map usage input bits and clear the input bit
*
+ * @hidinput: hidinput which we are interested in
+ * @usage: usage to fill in
+ * @bit: pointer to input->{}bit (out parameter)
+ * @max: maximal valid usage->code to consider later (out parameter)
+ * @type: input event type (EV_KEY, EV_REL, ...)
+ * @c: code which corresponds to this usage and type
+ *
* The same as hid_map_usage, except the @c bit is also cleared in supported
* bits (@bit).
*/
@@ -1084,7 +1091,7 @@ static inline void hid_hw_request(struct hid_device *hdev,
* @rtype: HID report type
* @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
*
- * @return: count of data transfered, negative if error
+ * Return: count of data transferred, negative if error
*
* Same behavior as hid_hw_request, but with raw buffers instead.
*/
@@ -1106,7 +1113,7 @@ static inline int hid_hw_raw_request(struct hid_device *hdev,
* @buf: raw data to transfer
* @len: length of buf
*
- * @return: count of data transfered, negative if error
+ * Return: count of data transferred, negative if error
*/
static inline int hid_hw_output_report(struct hid_device *hdev, __u8 *buf,
size_t len)
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index 1bbe96dc8be6..7902c7d8b55f 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -127,11 +127,6 @@ static inline unsigned long totalhigh_pages(void)
return (unsigned long)atomic_long_read(&_totalhigh_pages);
}
-static inline void totalhigh_pages_inc(void)
-{
- atomic_long_inc(&_totalhigh_pages);
-}
-
static inline void totalhigh_pages_add(long count)
{
atomic_long_add(count, &_totalhigh_pages);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 6a19f35f836b..ba973efcd369 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -78,6 +78,7 @@ static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
}
enum transparent_hugepage_flag {
+ TRANSPARENT_HUGEPAGE_NEVER_DAX,
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
@@ -123,6 +124,13 @@ extern unsigned long transparent_hugepage_flags;
*/
static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
{
+
+ /*
+ * If the hardware/firmware marked hugepage support disabled.
+ */
+ if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
+ return false;
+
if (vma->vm_flags & VM_NOHUGEPAGE)
return false;
@@ -134,12 +142,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
return true;
- /*
- * For dax vmas, try to always use hugepage mappings. If the kernel does
- * not support hugepages, fsdax mappings will fallback to PAGE_SIZE
- * mappings, and device-dax namespaces, that try to guarantee a given
- * mapping size, will fail to enable
- */
+
if (vma_is_dax(vma))
return true;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b5807f23caf8..cccd1aab69dd 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -37,7 +37,7 @@ struct hugepage_subpool {
struct hstate *hstate;
long min_hpages; /* Minimum huge pages or -1 if no minimum. */
long rsv_hpages; /* Pages reserved against global pool to */
- /* sasitfy minimum size. */
+ /* satisfy minimum size. */
};
struct resv_map {
@@ -139,7 +139,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
unsigned long dst_addr,
unsigned long src_addr,
struct page **pagep);
-int hugetlb_reserve_pages(struct inode *inode, long from, long to,
+bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
@@ -472,6 +472,84 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long flags);
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
+/*
+ * huegtlb page specific state flags. These flags are located in page.private
+ * of the hugetlb head page. Functions created via the below macros should be
+ * used to manipulate these flags.
+ *
+ * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
+ * allocation time. Cleared when page is fully instantiated. Free
+ * routine checks flag to restore a reservation on error paths.
+ * Synchronization: Examined or modified by code that knows it has
+ * the only reference to page. i.e. After allocation but before use
+ * or when the page is being freed.
+ * HPG_migratable - Set after a newly allocated page is added to the page
+ * cache and/or page tables. Indicates the page is a candidate for
+ * migration.
+ * Synchronization: Initially set after new page allocation with no
+ * locking. When examined and modified during migration processing
+ * (isolate, migrate, putback) the hugetlb_lock is held.
+ * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
+ * allocator. Typically used for migration target pages when no pages
+ * are available in the pool. The hugetlb free page path will
+ * immediately free pages with this flag set to the buddy allocator.
+ * Synchronization: Can be set after huge page allocation from buddy when
+ * code knows it has only reference. All other examinations and
+ * modifications require hugetlb_lock.
+ * HPG_freed - Set when page is on the free lists.
+ * Synchronization: hugetlb_lock held for examination and modification.
+ */
+enum hugetlb_page_flags {
+ HPG_restore_reserve = 0,
+ HPG_migratable,
+ HPG_temporary,
+ HPG_freed,
+ __NR_HPAGEFLAGS,
+};
+
+/*
+ * Macros to create test, set and clear function definitions for
+ * hugetlb specific page flags.
+ */
+#ifdef CONFIG_HUGETLB_PAGE
+#define TESTHPAGEFLAG(uname, flname) \
+static inline int HPage##uname(struct page *page) \
+ { return test_bit(HPG_##flname, &(page->private)); }
+
+#define SETHPAGEFLAG(uname, flname) \
+static inline void SetHPage##uname(struct page *page) \
+ { set_bit(HPG_##flname, &(page->private)); }
+
+#define CLEARHPAGEFLAG(uname, flname) \
+static inline void ClearHPage##uname(struct page *page) \
+ { clear_bit(HPG_##flname, &(page->private)); }
+#else
+#define TESTHPAGEFLAG(uname, flname) \
+static inline int HPage##uname(struct page *page) \
+ { return 0; }
+
+#define SETHPAGEFLAG(uname, flname) \
+static inline void SetHPage##uname(struct page *page) \
+ { }
+
+#define CLEARHPAGEFLAG(uname, flname) \
+static inline void ClearHPage##uname(struct page *page) \
+ { }
+#endif
+
+#define HPAGEFLAG(uname, flname) \
+ TESTHPAGEFLAG(uname, flname) \
+ SETHPAGEFLAG(uname, flname) \
+ CLEARHPAGEFLAG(uname, flname) \
+
+/*
+ * Create functions associated with hugetlb page flags
+ */
+HPAGEFLAG(RestoreReserve, restore_reserve)
+HPAGEFLAG(Migratable, migratable)
+HPAGEFLAG(Temporary, temporary)
+HPAGEFLAG(Freed, freed)
+
#ifdef CONFIG_HUGETLB_PAGE
#define HSTATE_NAME_LEN 32
@@ -531,6 +609,20 @@ extern unsigned int default_hstate_idx;
#define default_hstate (hstates[default_hstate_idx])
+/*
+ * hugetlb page subpool pointer located in hpage[1].private
+ */
+static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
+{
+ return (struct hugepage_subpool *)(hpage+1)->private;
+}
+
+static inline void hugetlb_set_page_subpool(struct page *hpage,
+ struct hugepage_subpool *subpool)
+{
+ set_page_private(hpage+1, (unsigned long)subpool);
+}
+
static inline struct hstate *hstate_file(struct file *f)
{
return hstate_inode(file_inode(f));
@@ -770,8 +862,6 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
}
#endif
-void set_page_huge_active(struct page *page);
-
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 5ddb479c4d4c..f1d74dcf0353 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -785,6 +785,7 @@ struct vmbus_device {
u16 dev_type;
guid_t guid;
bool perf_device;
+ bool allowed_in_isolated;
};
struct vmbus_channel {
@@ -803,6 +804,7 @@ struct vmbus_channel {
u8 monitor_bit;
bool rescind; /* got rescind msg */
+ bool rescind_ref; /* got rescind msg, got channel reference */
struct completion rescind_event;
u32 ringbuffer_gpadlhandle;
@@ -1471,6 +1473,7 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
#define ICMSGTYPE_SHUTDOWN 3
#define ICMSGTYPE_TIMESYNC 4
#define ICMSGTYPE_VSS 5
+#define ICMSGTYPE_FCOPY 7
#define ICMSGHDRFLAG_TRANSACTION 1
#define ICMSGHDRFLAG_REQUEST 2
@@ -1514,11 +1517,17 @@ struct icmsg_hdr {
u8 reserved[2];
} __packed;
+#define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100
+#define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr))
+#define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \
+ (ICMSG_HDR + sizeof(struct icmsg_negotiate) + \
+ (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version)))
+
struct icmsg_negotiate {
u16 icframe_vercnt;
u16 icmsg_vercnt;
u32 reserved;
- struct ic_version icversion_data[1]; /* any size array */
+ struct ic_version icversion_data[]; /* any size array */
} __packed;
struct shutdown_msg_data {
@@ -1569,7 +1578,7 @@ struct hyperv_service_callback {
};
#define MAX_SRV_VER 0x7ffffff
-extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
+extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen,
const int *fw_version, int fw_vercnt,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version);
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index de102e4418ab..8242e13e7b0b 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -176,7 +176,7 @@ struct i3c_device;
struct i3c_driver {
struct device_driver driver;
int (*probe)(struct i3c_device *dev);
- int (*remove)(struct i3c_device *dev);
+ void (*remove)(struct i3c_device *dev);
const struct i3c_device_id *id_table;
};
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 1b3371ae8193..9055cb380ee2 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -3,6 +3,7 @@
#define _LINUX_ICMPV6_H
#include <linux/skbuff.h>
+#include <linux/ipv6.h>
#include <uapi/linux/icmpv6.h>
static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
@@ -15,13 +16,16 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
#if IS_ENABLED(CONFIG_IPV6)
typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
- const struct in6_addr *force_saddr);
-#if IS_BUILTIN(CONFIG_IPV6)
+ const struct in6_addr *force_saddr,
+ const struct inet6_skb_parm *parm);
void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
- const struct in6_addr *force_saddr);
-static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+ const struct in6_addr *force_saddr,
+ const struct inet6_skb_parm *parm);
+#if IS_BUILTIN(CONFIG_IPV6)
+static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct inet6_skb_parm *parm)
{
- icmp6_send(skb, type, code, info, NULL);
+ icmp6_send(skb, type, code, info, NULL, parm);
}
static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
{
@@ -34,18 +38,28 @@ static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
return 0;
}
#else
-extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
+extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ const struct inet6_skb_parm *parm);
extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
#endif
+static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+{
+ __icmpv6_send(skb, type, code, info, IP6CB(skb));
+}
+
int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
unsigned int data_len);
#if IS_ENABLED(CONFIG_NF_NAT)
void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
#else
-#define icmpv6_ndo_send icmpv6_send
+static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
+{
+ struct inet6_skb_parm parm = { 0 };
+ __icmpv6_send(skb_in, type, code, info, &parm);
+}
#endif
#else
diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h
new file mode 100644
index 000000000000..38bbc537d4e4
--- /dev/null
+++ b/include/linux/if_hsr.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IF_HSR_H_
+#define _LINUX_IF_HSR_H_
+
+/* used to differentiate various protocols */
+enum hsr_version {
+ HSR_V0 = 0,
+ HSR_V1,
+ PRP_V1,
+};
+
+#if IS_ENABLED(CONFIG_HSR)
+extern bool is_hsr_master(struct net_device *dev);
+extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver);
+#else
+static inline bool is_hsr_master(struct net_device *dev)
+{
+ return false;
+}
+static inline int hsr_get_version(struct net_device *dev,
+ enum hsr_version *ver)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_HSR */
+
+#endif /*_LINUX_IF_HSR_H_*/
diff --git a/include/linux/iio/adc/qcom-vadc-common.h b/include/linux/iio/adc/qcom-vadc-common.h
new file mode 100644
index 000000000000..33f60f43e1aa
--- /dev/null
+++ b/include/linux/iio/adc/qcom-vadc-common.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Code shared between the different Qualcomm PMIC voltage ADCs
+ */
+
+#ifndef QCOM_VADC_COMMON_H
+#define QCOM_VADC_COMMON_H
+
+#include <linux/types.h>
+
+#define VADC_CONV_TIME_MIN_US 2000
+#define VADC_CONV_TIME_MAX_US 2100
+
+/* Min ADC code represents 0V */
+#define VADC_MIN_ADC_CODE 0x6000
+/* Max ADC code represents full-scale range of 1.8V */
+#define VADC_MAX_ADC_CODE 0xa800
+
+#define VADC_ABSOLUTE_RANGE_UV 625000
+#define VADC_RATIOMETRIC_RANGE 1800
+
+#define VADC_DEF_PRESCALING 0 /* 1:1 */
+#define VADC_DEF_DECIMATION 0 /* 512 */
+#define VADC_DEF_HW_SETTLE_TIME 0 /* 0 us */
+#define VADC_DEF_AVG_SAMPLES 0 /* 1 sample */
+#define VADC_DEF_CALIB_TYPE VADC_CALIB_ABSOLUTE
+
+#define VADC_DECIMATION_MIN 512
+#define VADC_DECIMATION_MAX 4096
+#define ADC5_DEF_VBAT_PRESCALING 1 /* 1:3 */
+#define ADC5_DECIMATION_SHORT 250
+#define ADC5_DECIMATION_MEDIUM 420
+#define ADC5_DECIMATION_LONG 840
+/* Default decimation - 1024 for rev2, 840 for pmic5 */
+#define ADC5_DECIMATION_DEFAULT 2
+#define ADC5_DECIMATION_SAMPLES_MAX 3
+
+#define VADC_HW_SETTLE_DELAY_MAX 10000
+#define VADC_HW_SETTLE_SAMPLES_MAX 16
+#define VADC_AVG_SAMPLES_MAX 512
+#define ADC5_AVG_SAMPLES_MAX 16
+
+#define PMIC5_CHG_TEMP_SCALE_FACTOR 377500
+#define PMIC5_SMB_TEMP_CONSTANT 419400
+#define PMIC5_SMB_TEMP_SCALE_FACTOR 356
+
+#define PMI_CHG_SCALE_1 -138890
+#define PMI_CHG_SCALE_2 391750000000LL
+
+#define VADC5_MAX_CODE 0x7fff
+#define ADC5_FULL_SCALE_CODE 0x70e4
+#define ADC5_USR_DATA_CHECK 0x8000
+
+#define R_PU_100K 100000
+#define RATIO_MAX_ADC7 BIT(14)
+
+/*
+ * VADC_CALIB_ABSOLUTE: uses the 625mV and 1.25V as reference channels.
+ * VADC_CALIB_RATIOMETRIC: uses the reference voltage (1.8V) and GND for
+ * calibration.
+ */
+enum vadc_calibration {
+ VADC_CALIB_ABSOLUTE = 0,
+ VADC_CALIB_RATIOMETRIC
+};
+
+/**
+ * struct vadc_linear_graph - Represent ADC characteristics.
+ * @dy: numerator slope to calculate the gain.
+ * @dx: denominator slope to calculate the gain.
+ * @gnd: A/D word of the ground reference used for the channel.
+ *
+ * Each ADC device has different offset and gain parameters which are
+ * computed to calibrate the device.
+ */
+struct vadc_linear_graph {
+ s32 dy;
+ s32 dx;
+ s32 gnd;
+};
+
+/**
+ * struct vadc_prescale_ratio - Represent scaling ratio for ADC input.
+ * @num: the inverse numerator of the gain applied to the input channel.
+ * @den: the inverse denominator of the gain applied to the input channel.
+ */
+struct vadc_prescale_ratio {
+ u32 num;
+ u32 den;
+};
+
+/**
+ * enum vadc_scale_fn_type - Scaling function to convert ADC code to
+ * physical scaled units for the channel.
+ * SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
+ * SCALE_THERM_100K_PULLUP: Returns temperature in millidegC.
+ * Uses a mapping table with 100K pullup.
+ * SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * SCALE_XOTHERM: Returns XO thermistor voltage in millidegC.
+ * SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
+ * SCALE_HW_CALIB_DEFAULT: Default scaling to convert raw adc code to
+ * voltage (uV) with hardware applied offset/slope values to adc code.
+ * SCALE_HW_CALIB_THERM_100K_PULLUP: Returns temperature in millidegC using
+ * lookup table. The hardware applies offset/slope to adc code.
+ * SCALE_HW_CALIB_XOTHERM: Returns XO thermistor voltage in millidegC using
+ * 100k pullup. The hardware applies offset/slope to adc code.
+ * SCALE_HW_CALIB_THERM_100K_PU_PM7: Returns temperature in millidegC using
+ * lookup table for PMIC7. The hardware applies offset/slope to adc code.
+ * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * The hardware applies offset/slope to adc code.
+ * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * The hardware applies offset/slope to adc code. This is for PMIC7.
+ * SCALE_HW_CALIB_PM5_CHG_TEMP: Returns result in millidegrees for PMIC5
+ * charger temperature.
+ * SCALE_HW_CALIB_PM5_SMB_TEMP: Returns result in millidegrees for PMIC5
+ * SMB1390 temperature.
+ */
+enum vadc_scale_fn_type {
+ SCALE_DEFAULT = 0,
+ SCALE_THERM_100K_PULLUP,
+ SCALE_PMIC_THERM,
+ SCALE_XOTHERM,
+ SCALE_PMI_CHG_TEMP,
+ SCALE_HW_CALIB_DEFAULT,
+ SCALE_HW_CALIB_THERM_100K_PULLUP,
+ SCALE_HW_CALIB_XOTHERM,
+ SCALE_HW_CALIB_THERM_100K_PU_PM7,
+ SCALE_HW_CALIB_PMIC_THERM,
+ SCALE_HW_CALIB_PMIC_THERM_PM7,
+ SCALE_HW_CALIB_PM5_CHG_TEMP,
+ SCALE_HW_CALIB_PM5_SMB_TEMP,
+ SCALE_HW_CALIB_INVALID,
+};
+
+struct adc5_data {
+ const u32 full_scale_code_volt;
+ const u32 full_scale_code_cur;
+ const struct adc5_channels *adc_chans;
+ const struct iio_info *info;
+ unsigned int *decimation;
+ unsigned int *hw_settle_1;
+ unsigned int *hw_settle_2;
+};
+
+int qcom_vadc_scale(enum vadc_scale_fn_type scaletype,
+ const struct vadc_linear_graph *calib_graph,
+ const struct vadc_prescale_ratio *prescale,
+ bool absolute,
+ u16 adc_code, int *result_mdec);
+
+struct qcom_adc5_scale_type {
+ int (*scale_fn)(const struct vadc_prescale_ratio *prescale,
+ const struct adc5_data *data, u16 adc_code, int *result);
+};
+
+int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype,
+ unsigned int prescale_ratio,
+ const struct adc5_data *data,
+ u16 adc_code, int *result_mdec);
+
+u16 qcom_adc_tm5_temp_volt_scale(unsigned int prescale_ratio,
+ u32 full_scale_code_volt, int temp);
+
+int qcom_adc5_prescaling_from_dt(u32 num, u32 den);
+
+int qcom_adc5_hw_settle_time_from_dt(u32 value, const unsigned int *hw_settle);
+
+int qcom_adc5_avg_samples_from_dt(u32 value);
+
+int qcom_adc5_decimation_from_dt(u32 value, const unsigned int *decimation);
+
+int qcom_vadc_decimation_from_dt(u32 value);
+
+#endif /* QCOM_VADC_COMMON_H */
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index c4118dcb8e05..0a90ba8fa1bb 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -13,6 +13,7 @@
struct iio_dev;
struct iio_chan_spec;
struct device;
+struct device_node;
/**
* struct iio_channel - everything needed for a consumer to use a channel
@@ -97,6 +98,41 @@ void iio_channel_release_all(struct iio_channel *chan);
*/
struct iio_channel *devm_iio_channel_get_all(struct device *dev);
+/**
+ * of_iio_channel_get_by_name() - get description of all that is needed to access channel.
+ * @np: Pointer to consumer device tree node
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
+ */
+#ifdef CONFIG_OF
+struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, const char *name);
+#else
+static inline struct iio_channel *
+of_iio_channel_get_by_name(struct device_node *np, const char *name)
+{
+ return NULL;
+}
+#endif
+
+/**
+ * devm_of_iio_channel_get_by_name() - Resource managed version of of_iio_channel_get_by_name().
+ * @dev: Pointer to consumer device.
+ * @np: Pointer to consumer device tree node
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
+ *
+ * Returns a pointer to negative errno if it is not able to get the iio channel
+ * otherwise returns valid pointer for iio channel.
+ *
+ * The allocated iio channel is automatically released when the device is
+ * unbound.
+ */
+struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
+ struct device_node *np,
+ const char *consumer_channel);
+
struct iio_cb_buffer;
/**
* iio_channel_get_all_cb() - register callback for triggered capture
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 7db9cca1af34..61d5723ec303 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -16,7 +16,8 @@ struct linux_binprm;
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
extern int ima_file_check(struct file *file, int mask);
-extern void ima_post_create_tmpfile(struct inode *inode);
+extern void ima_post_create_tmpfile(struct user_namespace *mnt_userns,
+ struct inode *inode);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot);
@@ -27,10 +28,15 @@ extern int ima_read_file(struct file *file, enum kernel_read_file_id id,
bool contents);
extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
-extern void ima_post_path_mknod(struct dentry *dentry);
+extern void ima_post_path_mknod(struct user_namespace *mnt_userns,
+ struct dentry *dentry);
extern int ima_file_hash(struct file *file, char *buf, size_t buf_size);
extern int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size);
extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size);
+extern void ima_measure_critical_data(const char *event_label,
+ const char *event_name,
+ const void *buf, size_t buf_len,
+ bool hash);
#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM
extern void ima_appraise_parse_cmdline(void);
@@ -68,7 +74,8 @@ static inline int ima_file_check(struct file *file, int mask)
return 0;
}
-static inline void ima_post_create_tmpfile(struct inode *inode)
+static inline void ima_post_create_tmpfile(struct user_namespace *mnt_userns,
+ struct inode *inode)
{
}
@@ -112,7 +119,8 @@ static inline int ima_post_read_file(struct file *file, void *buf, loff_t size,
return 0;
}
-static inline void ima_post_path_mknod(struct dentry *dentry)
+static inline void ima_post_path_mknod(struct user_namespace *mnt_userns,
+ struct dentry *dentry)
{
return;
}
@@ -128,6 +136,12 @@ static inline int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size
}
static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {}
+
+static inline void ima_measure_critical_data(const char *event_label,
+ const char *event_name,
+ const void *buf, size_t buf_len,
+ bool hash) {}
+
#endif /* CONFIG_IMA */
#ifndef CONFIG_IMA_KEXEC
@@ -153,7 +167,8 @@ static inline void ima_post_key_create_or_update(struct key *keyring,
#ifdef CONFIG_IMA_APPRAISE
extern bool is_ima_appraise_enabled(void);
-extern void ima_inode_post_setattr(struct dentry *dentry);
+extern void ima_inode_post_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry);
extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
const void *xattr_value, size_t xattr_value_len);
extern int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name);
@@ -163,7 +178,8 @@ static inline bool is_ima_appraise_enabled(void)
return 0;
}
-static inline void ima_inode_post_setattr(struct dentry *dentry)
+static inline void ima_inode_post_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry)
{
return;
}
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
index 54c02c84906a..c1c76a70a6ce 100644
--- a/include/linux/indirect_call_wrapper.h
+++ b/include/linux/indirect_call_wrapper.h
@@ -36,6 +36,7 @@
#define INDIRECT_CALLABLE_DECLARE(f) f
#define INDIRECT_CALLABLE_SCOPE
+#define EXPORT_INDIRECT_CALLABLE(f) EXPORT_SYMBOL(f)
#else
#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
@@ -44,6 +45,7 @@
#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__)
#define INDIRECT_CALLABLE_DECLARE(f)
#define INDIRECT_CALLABLE_SCOPE static
+#define EXPORT_INDIRECT_CALLABLE(f)
#endif
/*
@@ -60,4 +62,10 @@
#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__)
#endif
+#if IS_ENABLED(CONFIG_INET)
+#define INDIRECT_CALL_INET_1(f, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
+#else
+#define INDIRECT_CALL_INET_1(f, f1, ...) f(__VA_ARGS__)
+#endif
+
#endif
diff --git a/include/linux/init.h b/include/linux/init.h
index e668832ef66a..31f54de58429 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -184,19 +184,80 @@ extern bool initcall_debug;
* as KEEP() in the linker script.
*/
+/* Format: <modname>__<counter>_<line>_<fn> */
+#define __initcall_id(fn) \
+ __PASTE(__KBUILD_MODNAME, \
+ __PASTE(__, \
+ __PASTE(__COUNTER__, \
+ __PASTE(_, \
+ __PASTE(__LINE__, \
+ __PASTE(_, fn))))))
+
+/* Format: __<prefix>__<iid><id> */
+#define __initcall_name(prefix, __iid, id) \
+ __PASTE(__, \
+ __PASTE(prefix, \
+ __PASTE(__, \
+ __PASTE(__iid, id))))
+
+#ifdef CONFIG_LTO_CLANG
+/*
+ * With LTO, the compiler doesn't necessarily obey link order for
+ * initcalls. In order to preserve the correct order, we add each
+ * variable into its own section and generate a linker script (in
+ * scripts/link-vmlinux.sh) to specify the order of the sections.
+ */
+#define __initcall_section(__sec, __iid) \
+ #__sec ".init.." #__iid
+
+/*
+ * With LTO, the compiler can rename static functions to avoid
+ * global naming collisions. We use a global stub function for
+ * initcalls to create a stable symbol name whose address can be
+ * taken in inline assembly when PREL32 relocations are used.
+ */
+#define __initcall_stub(fn, __iid, id) \
+ __initcall_name(initstub, __iid, id)
+
+#define __define_initcall_stub(__stub, fn) \
+ int __init __stub(void); \
+ int __init __stub(void) \
+ { \
+ return fn(); \
+ } \
+ __ADDRESSABLE(__stub)
+#else
+#define __initcall_section(__sec, __iid) \
+ #__sec ".init"
+
+#define __initcall_stub(fn, __iid, id) fn
+
+#define __define_initcall_stub(__stub, fn) \
+ __ADDRESSABLE(fn)
+#endif
+
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#define ___define_initcall(fn, id, __sec) \
- __ADDRESSABLE(fn) \
- asm(".section \"" #__sec ".init\", \"a\" \n" \
- "__initcall_" #fn #id ": \n" \
- ".long " #fn " - . \n" \
+#define ____define_initcall(fn, __stub, __name, __sec) \
+ __define_initcall_stub(__stub, fn) \
+ asm(".section \"" __sec "\", \"a\" \n" \
+ __stringify(__name) ": \n" \
+ ".long " __stringify(__stub) " - . \n" \
".previous \n");
#else
-#define ___define_initcall(fn, id, __sec) \
- static initcall_t __initcall_##fn##id __used \
- __attribute__((__section__(#__sec ".init"))) = fn;
+#define ____define_initcall(fn, __unused, __name, __sec) \
+ static initcall_t __name __used \
+ __attribute__((__section__(__sec))) = fn;
#endif
+#define __unique_initcall(fn, id, __sec, __iid) \
+ ____define_initcall(fn, \
+ __initcall_stub(fn, __iid, id), \
+ __initcall_name(initcall, __iid, id), \
+ __initcall_section(__sec, __iid))
+
+#define ___define_initcall(fn, id, __sec) \
+ __unique_initcall(fn, id, __sec, __initcall_id(fn))
+
#define __define_initcall(fn, id) ___define_initcall(fn, id, .initcall##id)
/*
@@ -236,7 +297,7 @@ extern bool initcall_debug;
#define __exitcall(fn) \
static exitcall_t __exitcall_##fn __exit_call = fn
-#define console_initcall(fn) ___define_initcall(fn,, .con_initcall)
+#define console_initcall(fn) ___define_initcall(fn, con, .con_initcall)
struct obs_kernel_param {
const char *str;
@@ -277,14 +338,14 @@ struct obs_kernel_param {
var = 1; \
return 0; \
} \
- __setup_param(str_on, parse_##var##_on, parse_##var##_on, 1); \
+ early_param(str_on, parse_##var##_on); \
\
static int __init parse_##var##_off(char *arg) \
{ \
var = 0; \
return 0; \
} \
- __setup_param(str_off, parse_##var##_off, parse_##var##_off, 1)
+ early_param(str_off, parse_##var##_off)
/* Relies on boot_command_line being set */
void __init parse_early_param(void);
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
index 8db6f8c8030b..85c15717af34 100644
--- a/include/linux/initrd.h
+++ b/include/linux/initrd.h
@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_INITRD_H
+#define __LINUX_INITRD_H
+
#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
/* starting block # of image */
@@ -15,6 +18,12 @@ extern int initrd_below_start_ok;
extern unsigned long initrd_start, initrd_end;
extern void free_initrd_mem(unsigned long, unsigned long);
+#ifdef CONFIG_BLK_DEV_INITRD
+extern void __init reserve_initrd_mem(void);
+#else
+static inline void __init reserve_initrd_mem(void) {}
+#endif
+
extern phys_addr_t phys_initrd_start;
extern unsigned long phys_initrd_size;
@@ -24,3 +33,5 @@ extern char __initramfs_start[];
extern unsigned long __initramfs_size;
void console_on_rootfs(void);
+
+#endif /* __LINUX_INITRD_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 09c6a0bf3892..1bc46b88711a 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -42,6 +42,8 @@
#define DMA_FL_PTE_PRESENT BIT_ULL(0)
#define DMA_FL_PTE_US BIT_ULL(2)
+#define DMA_FL_PTE_ACCESS BIT_ULL(5)
+#define DMA_FL_PTE_DIRTY BIT_ULL(6)
#define DMA_FL_PTE_XD BIT_ULL(63)
#define ADDR_WIDTH_5LEVEL (57)
@@ -168,34 +170,37 @@
* Extended Capability Register
*/
+#define ecap_rps(e) (((e) >> 49) & 0x1)
#define ecap_smpwc(e) (((e) >> 48) & 0x1)
#define ecap_flts(e) (((e) >> 47) & 0x1)
#define ecap_slts(e) (((e) >> 46) & 0x1)
+#define ecap_slads(e) (((e) >> 45) & 0x1)
#define ecap_vcs(e) (((e) >> 44) & 0x1)
#define ecap_smts(e) (((e) >> 43) & 0x1)
-#define ecap_dit(e) ((e >> 41) & 0x1)
-#define ecap_pasid(e) ((e >> 40) & 0x1)
-#define ecap_pss(e) ((e >> 35) & 0x1f)
-#define ecap_eafs(e) ((e >> 34) & 0x1)
-#define ecap_nwfs(e) ((e >> 33) & 0x1)
-#define ecap_srs(e) ((e >> 31) & 0x1)
-#define ecap_ers(e) ((e >> 30) & 0x1)
-#define ecap_prs(e) ((e >> 29) & 0x1)
-#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
-#define ecap_dis(e) ((e >> 27) & 0x1)
-#define ecap_nest(e) ((e >> 26) & 0x1)
-#define ecap_mts(e) ((e >> 25) & 0x1)
-#define ecap_ecs(e) ((e >> 24) & 0x1)
+#define ecap_dit(e) (((e) >> 41) & 0x1)
+#define ecap_pds(e) (((e) >> 42) & 0x1)
+#define ecap_pasid(e) (((e) >> 40) & 0x1)
+#define ecap_pss(e) (((e) >> 35) & 0x1f)
+#define ecap_eafs(e) (((e) >> 34) & 0x1)
+#define ecap_nwfs(e) (((e) >> 33) & 0x1)
+#define ecap_srs(e) (((e) >> 31) & 0x1)
+#define ecap_ers(e) (((e) >> 30) & 0x1)
+#define ecap_prs(e) (((e) >> 29) & 0x1)
+#define ecap_broken_pasid(e) (((e) >> 28) & 0x1)
+#define ecap_dis(e) (((e) >> 27) & 0x1)
+#define ecap_nest(e) (((e) >> 26) & 0x1)
+#define ecap_mts(e) (((e) >> 25) & 0x1)
+#define ecap_ecs(e) (((e) >> 24) & 0x1)
#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
#define ecap_coherent(e) ((e) & 0x1)
#define ecap_qis(e) ((e) & 0x2)
-#define ecap_pass_through(e) ((e >> 6) & 0x1)
-#define ecap_eim_support(e) ((e >> 4) & 0x1)
-#define ecap_ir_support(e) ((e >> 3) & 0x1)
+#define ecap_pass_through(e) (((e) >> 6) & 0x1)
+#define ecap_eim_support(e) (((e) >> 4) & 0x1)
+#define ecap_ir_support(e) (((e) >> 3) & 0x1)
#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
-#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
-#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
+#define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
+#define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */
/* Virtual command interface capability */
#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
@@ -662,7 +667,7 @@ static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
* 7: super page
* 8-10: available
* 11: snoop behavior
- * 12-63: Host physcial address
+ * 12-63: Host physical address
*/
struct dma_pte {
u64 val;
diff --git a/include/linux/intel-pti.h b/include/linux/intel-pti.h
deleted file mode 100644
index fcd841a90f2f..000000000000
--- a/include/linux/intel-pti.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) Intel 2011
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * The PTI (Parallel Trace Interface) driver directs trace data routed from
- * various parts in the system out through the Intel Penwell PTI port and
- * out of the mobile device for analysis with a debugging tool
- * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
- * compact JTAG, standard.
- *
- * This header file will allow other parts of the OS to use the
- * interface to write out it's contents for debugging a mobile system.
- */
-
-#ifndef LINUX_INTEL_PTI_H_
-#define LINUX_INTEL_PTI_H_
-
-/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
-#define PTI_LASTDWORD_DTS 0x30
-
-/* basic structure used as a write address to the PTI HW */
-struct pti_masterchannel {
- u8 master;
- u8 channel;
-};
-
-/* the following functions are defined in misc/pti.c */
-void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
-struct pti_masterchannel *pti_request_masterchannel(u8 type,
- const char *thread_name);
-void pti_release_masterchannel(struct pti_masterchannel *mc);
-
-#endif /* LINUX_INTEL_PTI_H_ */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index bb8ff9083e7d..967e25767153 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -569,15 +569,6 @@ struct softirq_action
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
-#ifdef __ARCH_HAS_DO_SOFTIRQ
-void do_softirq_own_stack(void);
-#else
-static inline void do_softirq_own_stack(void)
-{
- __do_softirq();
-}
-#endif
-
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index ea727eb1a1a9..a4c9ca2c31f1 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -15,6 +15,7 @@ enum io_pgtable_fmt {
ARM_64_LPAE_S2,
ARM_V7S,
ARM_MALI_LPAE,
+ AMD_IOMMU_V1,
IO_PGTABLE_NUM_FMTS,
};
@@ -68,13 +69,9 @@ struct io_pgtable_cfg {
* hardware which does not implement the permissions of a given
* format, and/or requires some format-specific default value.
*
- * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
- * (unmapped) entries but the hardware might do so anyway, perform
- * TLB maintenance when mapping as well as when unmapping.
- *
* IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend
- * to support up to 34 bits PA where the bit32 and bit33 are
- * encoded in the bit9 and bit4 of the PTE respectively.
+ * to support up to 35 bits PA where the bit32, bit33 and bit34 are
+ * encoded in the bit9, bit4 and bit5 of the PTE respectively.
*
* IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
* on unmap, for DMA domains using the flush queue mechanism for
@@ -88,7 +85,6 @@ struct io_pgtable_cfg {
*/
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
- #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
#define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
#define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
#define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
@@ -214,14 +210,16 @@ struct io_pgtable_domain_attr {
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
{
- iop->cfg.tlb->tlb_flush_all(iop->cookie);
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all)
+ iop->cfg.tlb->tlb_flush_all(iop->cookie);
}
static inline void
io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
size_t size, size_t granule)
{
- iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_walk)
+ iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
}
static inline void
@@ -229,7 +227,7 @@ io_pgtable_tlb_add_page(struct io_pgtable *iop,
struct iommu_iotlb_gather * gather, unsigned long iova,
size_t granule)
{
- if (iop->cfg.tlb->tlb_add_page)
+ if (iop->cfg.tlb && iop->cfg.tlb->tlb_add_page)
iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
}
@@ -251,5 +249,6 @@ extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns;
#endif /* __IO_PGTABLE_H */
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 35b2d845704d..51ede771cd99 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -5,21 +5,13 @@
#include <linux/sched.h>
#include <linux/xarray.h>
-struct io_identity {
- struct files_struct *files;
- struct mm_struct *mm;
-#ifdef CONFIG_BLK_CGROUP
- struct cgroup_subsys_state *blkcg_css;
-#endif
- const struct cred *creds;
- struct nsproxy *nsproxy;
- struct fs_struct *fs;
- unsigned long fsize;
-#ifdef CONFIG_AUDIT
- kuid_t loginuid;
- unsigned int sessionid;
-#endif
- refcount_t count;
+struct io_wq_work_node {
+ struct io_wq_work_node *next;
+};
+
+struct io_wq_work_list {
+ struct io_wq_work_node *first;
+ struct io_wq_work_node *last;
};
struct io_uring_task {
@@ -27,11 +19,15 @@ struct io_uring_task {
struct xarray xa;
struct wait_queue_head wait;
struct file *last;
+ void *io_wq;
struct percpu_counter inflight;
- struct io_identity __identity;
- struct io_identity *identity;
atomic_t in_idle;
bool sqpoll;
+
+ spinlock_t task_lock;
+ struct io_wq_work_list task_list;
+ unsigned long task_state;
+ struct callback_head task_work;
};
#if defined(CONFIG_IO_URING)
@@ -47,7 +43,7 @@ static inline void io_uring_task_cancel(void)
}
static inline void io_uring_files_cancel(struct files_struct *files)
{
- if (current->io_uring && !xa_empty(&current->io_uring->xa))
+ if (current->io_uring)
__io_uring_files_cancel(files);
}
static inline void io_uring_free(struct task_struct *tsk)
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 5bd3cac4df9c..d202fd2d0f91 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -55,6 +55,7 @@ struct vm_fault;
#define IOMAP_F_SHARED 0x04
#define IOMAP_F_MERGED 0x08
#define IOMAP_F_BUFFER_HEAD 0x10
+#define IOMAP_F_ZONE_APPEND 0x20
/*
* Flags set by the core iomap code during operations:
@@ -122,6 +123,7 @@ struct iomap_page_ops {
#define IOMAP_FAULT (1 << 3) /* mapping for page fault */
#define IOMAP_DIRECT (1 << 4) /* direct I/O */
#define IOMAP_NOWAIT (1 << 5) /* do not block */
+#define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */
struct iomap_ops {
/*
@@ -256,12 +258,25 @@ struct iomap_dio_ops {
struct bio *bio, loff_t file_offset);
};
+/*
+ * Wait for the I/O to complete in iomap_dio_rw even if the kiocb is not
+ * synchronous.
+ */
+#define IOMAP_DIO_FORCE_WAIT (1 << 0)
+
+/*
+ * Do not allocate blocks or zero partial blocks, but instead fall back to
+ * the caller by returning -EAGAIN. Used to optimize direct I/O writes that
+ * are not aligned to the file system block size.
+ */
+#define IOMAP_DIO_OVERWRITE_ONLY (1 << 1)
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
- bool wait_for_completion);
+ unsigned int dio_flags);
struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
- bool wait_for_completion);
+ unsigned int dio_flags);
ssize_t iomap_dio_complete(struct iomap_dio *dio);
int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index efa96263b81b..5e7fe519430a 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -170,7 +170,7 @@ enum iommu_dev_features {
* struct iommu_iotlb_gather - Range information for a pending IOTLB flush
*
* @start: IOVA representing the start of the range to be flushed
- * @end: IOVA representing the end of the range to be flushed (exclusive)
+ * @end: IOVA representing the end of the range to be flushed (inclusive)
* @pgsize: The interval at which to perform the flush
*
* This structure is intended to be updated by multiple calls to the
@@ -246,7 +246,8 @@ struct iommu_ops {
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size, struct iommu_iotlb_gather *iotlb_gather);
void (*flush_iotlb_all)(struct iommu_domain *domain);
- void (*iotlb_sync_map)(struct iommu_domain *domain);
+ void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
void (*iotlb_sync)(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
@@ -376,6 +377,7 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
void iommu_device_sysfs_remove(struct iommu_device *iommu);
int iommu_device_link(struct iommu_device *iommu, struct device *link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
+int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
static inline void __iommu_device_set_ops(struct iommu_device *iommu,
const struct iommu_ops *ops)
@@ -514,7 +516,6 @@ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t offset, u64 size,
int prot);
-extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
@@ -538,7 +539,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size)
{
- unsigned long start = iova, end = start + size;
+ unsigned long start = iova, end = start + size - 1;
/*
* If the new page is disjoint from the current range or is mapped at
@@ -630,7 +631,6 @@ static inline void dev_iommu_priv_set(struct device *dev, void *priv)
int iommu_probe_device(struct device *dev);
void iommu_release_device(struct device *dev);
-bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f);
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
@@ -749,11 +749,6 @@ static inline int iommu_domain_window_enable(struct iommu_domain *domain,
return -ENODEV;
}
-static inline void iommu_domain_window_disable(struct iommu_domain *domain,
- u32 wnd_nr)
-{
-}
-
static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
{
return 0;
@@ -985,12 +980,6 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
}
static inline bool
-iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
-{
- return false;
-}
-
-static inline bool
iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
{
return false;
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index fe48b7840665..55de385c839c 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -334,11 +334,7 @@ static inline void irqresource_disabled(struct resource *res, u32 irq)
res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
}
-#ifdef CONFIG_IO_STRICT_DEVMEM
-void revoke_devmem(struct resource *res);
-#else
-static inline void revoke_devmem(struct resource *res) { };
-#endif
+extern struct address_space *iomem_get_mapping(void);
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 76e16ae20729..c834c01c0a5b 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -150,10 +150,8 @@ unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
-void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn);
-bool has_iova_flush_queue(struct iova_domain *iovad);
int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
@@ -212,22 +210,12 @@ static inline struct iova *reserve_iova(struct iova_domain *iovad,
return NULL;
}
-static inline void copy_reserved_iova(struct iova_domain *from,
- struct iova_domain *to)
-{
-}
-
static inline void init_iova_domain(struct iova_domain *iovad,
unsigned long granule,
unsigned long start_pfn)
{
}
-static inline bool has_iova_flush_queue(struct iova_domain *iovad)
-{
- return false;
-}
-
static inline int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb,
iova_entry_dtor entry_dtor)
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index dda61d150a13..70b2ad3b9884 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -31,6 +31,7 @@ struct ipv6_devconf {
__s32 max_desync_factor;
__s32 max_addresses;
__s32 accept_ra_defrtr;
+ __u32 ra_defrtr_metric;
__s32 accept_ra_min_hop_limit;
__s32 accept_ra_pinfo;
__s32 ignore_routes_with_linkdown;
@@ -84,7 +85,6 @@ struct ipv6_params {
__s32 autoconf;
};
extern struct ipv6_params ipv6_defaults;
-#include <linux/icmpv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 8de0e1373de7..600c10da321a 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -149,6 +149,17 @@ do { \
# define start_critical_timings() do { } while (0)
#endif
+#ifdef CONFIG_DEBUG_IRQFLAGS
+extern void warn_bogus_irq_restore(void);
+#define raw_check_bogus_irq_restore() \
+ do { \
+ if (unlikely(!arch_irqs_disabled())) \
+ warn_bogus_irq_restore(); \
+ } while (0)
+#else
+#define raw_check_bogus_irq_restore() do { } while (0)
+#endif
+
/*
* Wrap the arch provided IRQ routines to provide appropriate checks.
*/
@@ -162,6 +173,7 @@ do { \
#define raw_local_irq_restore(flags) \
do { \
typecheck(unsigned long, flags); \
+ raw_check_bogus_irq_restore(); \
arch_local_irq_restore(flags); \
} while (0)
#define raw_local_save_flags(flags) \
diff --git a/include/linux/isa.h b/include/linux/isa.h
index 41336da0f4e7..e30963190968 100644
--- a/include/linux/isa.h
+++ b/include/linux/isa.h
@@ -13,7 +13,7 @@
struct isa_driver {
int (*match)(struct device *, unsigned int);
int (*probe)(struct device *, unsigned int);
- int (*remove)(struct device *, unsigned int);
+ void (*remove)(struct device *, unsigned int);
void (*shutdown)(struct device *, unsigned int);
int (*suspend)(struct device *, unsigned int, pm_message_t);
int (*resume)(struct device *, unsigned int);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 32809624d422..d92691262f51 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -261,14 +261,14 @@ static __always_inline void jump_label_init(void)
static __always_inline bool static_key_false(struct static_key *key)
{
- if (unlikely(static_key_count(key) > 0))
+ if (unlikely_notrace(static_key_count(key) > 0))
return true;
return false;
}
static __always_inline bool static_key_true(struct static_key *key)
{
- if (likely(static_key_count(key) > 0))
+ if (likely_notrace(static_key_count(key) > 0))
return true;
return false;
}
@@ -460,7 +460,7 @@ extern bool ____wrong_branch_error(void);
branch = !arch_static_branch_jump(&(x)->key, true); \
else \
branch = ____wrong_branch_error(); \
- likely(branch); \
+ likely_notrace(branch); \
})
#define static_branch_unlikely(x) \
@@ -472,13 +472,13 @@ extern bool ____wrong_branch_error(void);
branch = arch_static_branch(&(x)->key, false); \
else \
branch = ____wrong_branch_error(); \
- unlikely(branch); \
+ unlikely_notrace(branch); \
})
#else /* !CONFIG_JUMP_LABEL */
-#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
-#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
+#define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key))
+#define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key))
#endif /* CONFIG_JUMP_LABEL */
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 481273f0c72d..465060acc981 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -71,15 +71,14 @@ static inline void *dereference_symbol_descriptor(void *ptr)
return ptr;
}
-#ifdef CONFIG_KALLSYMS
-/* Lookup the address for a symbol. Returns 0 if not found. */
-unsigned long kallsyms_lookup_name(const char *name);
-
-/* Call a function on each kallsyms symbol in the core kernel */
int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
unsigned long),
void *data);
+#ifdef CONFIG_KALLSYMS
+/* Lookup the address for a symbol. Returns 0 if not found. */
+unsigned long kallsyms_lookup_name(const char *name);
+
extern int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset);
@@ -108,14 +107,6 @@ static inline unsigned long kallsyms_lookup_name(const char *name)
return 0;
}
-static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
- struct module *,
- unsigned long),
- void *data)
-{
- return 0;
-}
-
static inline int kallsyms_lookup_size_offset(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset)
diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
index ca5e89fb10d3..3d6d22a25bdc 100644
--- a/include/linux/kasan-checks.h
+++ b/include/linux/kasan-checks.h
@@ -5,6 +5,12 @@
#include <linux/types.h>
/*
+ * The annotations present in this file are only relevant for the software
+ * KASAN modes that rely on compiler instrumentation, and will be optimized
+ * away for the hardware tag-based KASAN mode. Use kasan_check_byte() instead.
+ */
+
+/*
* __kasan_check_*: Always available when KASAN is enabled. This may be used
* even in compilation units that selectively disable KASAN, but must use KASAN
* to validate access to an address. Never use these in header files!
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 0aea9e2a2a01..b91732bd05d7 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -83,6 +83,7 @@ static inline void kasan_disable_current(void) {}
struct kasan_cache {
int alloc_meta_offset;
int free_meta_offset;
+ bool is_kmalloc;
};
#ifdef CONFIG_KASAN_HW_TAGS
@@ -143,6 +144,13 @@ static __always_inline void kasan_cache_create(struct kmem_cache *cache,
__kasan_cache_create(cache, size, flags);
}
+void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
+static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
+{
+ if (kasan_enabled())
+ __kasan_cache_create_kmalloc(cache);
+}
+
size_t __kasan_metadata_size(struct kmem_cache *cache);
static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
{
@@ -185,19 +193,25 @@ static __always_inline void * __must_check kasan_init_slab_obj(
}
bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
-static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
- unsigned long ip)
+static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
{
if (kasan_enabled())
- return __kasan_slab_free(s, object, ip);
+ return __kasan_slab_free(s, object, _RET_IP_);
return false;
}
+void __kasan_kfree_large(void *ptr, unsigned long ip);
+static __always_inline void kasan_kfree_large(void *ptr)
+{
+ if (kasan_enabled())
+ __kasan_kfree_large(ptr, _RET_IP_);
+}
+
void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
-static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
+static __always_inline void kasan_slab_free_mempool(void *ptr)
{
if (kasan_enabled())
- __kasan_slab_free_mempool(ptr, ip);
+ __kasan_slab_free_mempool(ptr, _RET_IP_);
}
void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
@@ -240,13 +254,19 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
return (void *)object;
}
-void __kasan_kfree_large(void *ptr, unsigned long ip);
-static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
+/*
+ * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
+ * the hardware tag-based mode that doesn't rely on compiler instrumentation.
+ */
+bool __kasan_check_byte(const void *addr, unsigned long ip);
+static __always_inline bool kasan_check_byte(const void *addr)
{
if (kasan_enabled())
- __kasan_kfree_large(ptr, ip);
+ return __kasan_check_byte(addr, _RET_IP_);
+ return true;
}
+
bool kasan_save_enable_multi_shot(void);
void kasan_restore_multi_shot(bool enabled);
@@ -266,6 +286,7 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}
+static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
@@ -277,12 +298,12 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
{
return (void *)object;
}
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
- unsigned long ip)
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
{
return false;
}
-static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
+static inline void kasan_kfree_large(void *ptr) {}
+static inline void kasan_slab_free_mempool(void *ptr) {}
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags)
{
@@ -302,7 +323,10 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
{
return (void *)object;
}
-static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
+static inline bool kasan_check_byte(const void *address)
+{
+ return true;
+}
#endif /* CONFIG_KASAN */
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index 82f29aa35062..c40811d79769 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -6,8 +6,6 @@
#include <linux/interrupt.h>
#include <linux/keyboard.h>
-extern struct tasklet_struct keyboard_tasklet;
-
extern char *func_table[MAX_NR_FUNC];
/*
@@ -71,12 +69,6 @@ extern void (*kbd_ledfunc)(unsigned int led);
extern int set_console(int nr);
extern void schedule_console_callback(void);
-/* FIXME: review locking for vt.c callers */
-static inline void set_leds(void)
-{
- tasklet_schedule(&keyboard_tasklet);
-}
-
static inline int vc_kbd_mode(struct kbd_struct * kbd, int flag)
{
return ((kbd->modeflags >> flag) & 1);
@@ -135,7 +127,7 @@ static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag)
struct console;
-void compute_shiftstate(void);
+void vt_set_leds_compute_shiftstate(void);
/* defkeymap.c */
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index 9d12c970f18f..e78e17a76dc9 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -72,4 +72,10 @@
*/
#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
+/*
+ * IF_ENABLED(CONFIG_FOO, ptr) evaluates to (ptr) if CONFIG_FOO is set to 'y'
+ * or 'm', NULL otherwise.
+ */
+#define IF_ENABLED(option, ptr) (IS_ENABLED(option) ? (ptr) : NULL)
+
#endif /* __LINUX_KCONFIG_H */
diff --git a/include/linux/kd.h b/include/linux/kd.h
deleted file mode 100644
index b130a18f860f..000000000000
--- a/include/linux/kd.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_KD_H
-#define _LINUX_KD_H
-
-#include <uapi/linux/kd.h>
-
-#define KD_FONT_FLAG_OLD 0x80000000 /* Invoked via old interface [compat] */
-#endif /* _LINUX_KD_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f7902d8c1048..5b7ed6dc99ac 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -15,7 +15,7 @@
#include <linux/typecheck.h>
#include <linux/printk.h>
#include <linux/build_bug.h>
-
+#include <linux/static_call_types.h>
#include <asm/byteorder.h>
#include <uapi/linux/kernel.h>
@@ -81,11 +81,26 @@ struct pt_regs;
struct user;
#ifdef CONFIG_PREEMPT_VOLUNTARY
-extern int _cond_resched(void);
-# define might_resched() _cond_resched()
+
+extern int __cond_resched(void);
+# define might_resched() __cond_resched()
+
+#elif defined(CONFIG_PREEMPT_DYNAMIC)
+
+extern int __cond_resched(void);
+
+DECLARE_STATIC_CALL(might_resched, __cond_resched);
+
+static __always_inline void might_resched(void)
+{
+ static_call_mod(might_resched)();
+}
+
#else
+
# define might_resched() do { } while (0)
-#endif
+
+#endif /* CONFIG_PREEMPT_* */
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
extern void ___might_sleep(const char *file, int line, int preempt_offset);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 9e93bef52968..8a7aa1d7e0e3 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -300,6 +300,11 @@ struct kimage {
/* Information for loading purgatory */
struct purgatory_info purgatory_info;
#endif
+
+#ifdef CONFIG_IMA_KEXEC
+ /* Virtual address of IMA measurement buffer for kexec syscall */
+ void *ima_buffer;
+#endif
};
/* kexec interface functions */
@@ -309,6 +314,8 @@ extern void machine_kexec_cleanup(struct kimage *image);
extern int kernel_kexec(void);
extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order);
+int machine_kexec_post_load(struct kimage *image);
+
extern void __crash_kexec(struct pt_regs *);
extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
diff --git a/include/linux/key.h b/include/linux/key.h
index 0f2e24f13c2b..7febc4881363 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -289,6 +289,7 @@ extern struct key *key_alloc(struct key_type *type,
#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
+#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
@@ -360,7 +361,7 @@ static inline struct key *request_key(struct key_type *type,
* completion of keys undergoing construction with a non-interruptible wait.
*/
#define request_key_net(type, description, net, callout_info) \
- request_key_tag(type, description, net->key_domain, callout_info);
+ request_key_tag(type, description, net->key_domain, callout_info)
/**
* request_key_net_rcu - Request a key for a net namespace under RCU conditions
@@ -372,7 +373,7 @@ static inline struct key *request_key(struct key_type *type,
* network namespace are used.
*/
#define request_key_net_rcu(type, description, net) \
- request_key_rcu(type, description, net->key_domain);
+ request_key_rcu(type, description, net->key_domain)
#endif /* CONFIG_NET */
extern int wait_for_key_construction(struct key *key, bool intr);
diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h
index 18f3f5346843..a27605e2f826 100644
--- a/include/linux/keyslot-manager.h
+++ b/include/linux/keyslot-manager.h
@@ -85,6 +85,9 @@ struct blk_keyslot_manager {
int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots);
+int devm_blk_ksm_init(struct device *dev, struct blk_keyslot_manager *ksm,
+ unsigned int num_slots);
+
blk_status_t blk_ksm_get_slot_for_key(struct blk_keyslot_manager *ksm,
const struct blk_crypto_key *key,
struct blk_ksm_keyslot **slot_ptr);
@@ -103,4 +106,15 @@ void blk_ksm_reprogram_all_keys(struct blk_keyslot_manager *ksm);
void blk_ksm_destroy(struct blk_keyslot_manager *ksm);
+void blk_ksm_intersect_modes(struct blk_keyslot_manager *parent,
+ const struct blk_keyslot_manager *child);
+
+void blk_ksm_init_passthrough(struct blk_keyslot_manager *ksm);
+
+bool blk_ksm_is_superset(struct blk_keyslot_manager *ksm_superset,
+ struct blk_keyslot_manager *ksm_subset);
+
+void blk_ksm_update_capabilities(struct blk_keyslot_manager *target_ksm,
+ struct blk_keyslot_manager *reference_ksm);
+
#endif /* __LINUX_KEYSLOT_MANAGER_H */
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
new file mode 100644
index 000000000000..a70d1ea03532
--- /dev/null
+++ b/include/linux/kfence.h
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Kernel Electric-Fence (KFENCE). Public interface for allocator and fault
+ * handler integration. For more info see Documentation/dev-tools/kfence.rst.
+ *
+ * Copyright (C) 2020, Google LLC.
+ */
+
+#ifndef _LINUX_KFENCE_H
+#define _LINUX_KFENCE_H
+
+#include <linux/mm.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KFENCE
+
+/*
+ * We allocate an even number of pages, as it simplifies calculations to map
+ * address to metadata indices; effectively, the very first page serves as an
+ * extended guard page, but otherwise has no special purpose.
+ */
+#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
+extern char *__kfence_pool;
+
+#ifdef CONFIG_KFENCE_STATIC_KEYS
+#include <linux/static_key.h>
+DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
+#else
+#include <linux/atomic.h>
+extern atomic_t kfence_allocation_gate;
+#endif
+
+/**
+ * is_kfence_address() - check if an address belongs to KFENCE pool
+ * @addr: address to check
+ *
+ * Return: true or false depending on whether the address is within the KFENCE
+ * object range.
+ *
+ * KFENCE objects live in a separate page range and are not to be intermixed
+ * with regular heap objects (e.g. KFENCE objects must never be added to the
+ * allocator freelists). Failing to do so may and will result in heap
+ * corruptions, therefore is_kfence_address() must be used to check whether
+ * an object requires specific handling.
+ *
+ * Note: This function may be used in fast-paths, and is performance critical.
+ * Future changes should take this into account; for instance, we want to avoid
+ * introducing another load and therefore need to keep KFENCE_POOL_SIZE a
+ * constant (until immediate patching support is added to the kernel).
+ */
+static __always_inline bool is_kfence_address(const void *addr)
+{
+ /*
+ * The non-NULL check is required in case the __kfence_pool pointer was
+ * never initialized; keep it in the slow-path after the range-check.
+ */
+ return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr);
+}
+
+/**
+ * kfence_alloc_pool() - allocate the KFENCE pool via memblock
+ */
+void __init kfence_alloc_pool(void);
+
+/**
+ * kfence_init() - perform KFENCE initialization at boot time
+ *
+ * Requires that kfence_alloc_pool() was called before. This sets up the
+ * allocation gate timer, and requires that workqueues are available.
+ */
+void __init kfence_init(void);
+
+/**
+ * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects
+ * @s: cache being shut down
+ *
+ * Before shutting down a cache, one must ensure there are no remaining objects
+ * allocated from it. Because KFENCE objects are not referenced from the cache
+ * directly, we need to check them here.
+ *
+ * Note that shutdown_cache() is internal to SL*B, and kmem_cache_destroy() does
+ * not return if allocated objects still exist: it prints an error message and
+ * simply aborts destruction of a cache, leaking memory.
+ *
+ * If the only such objects are KFENCE objects, we will not leak the entire
+ * cache, but instead try to provide more useful debug info by making allocated
+ * objects "zombie allocations". Objects may then still be used or freed (which
+ * is handled gracefully), but usage will result in showing KFENCE error reports
+ * which include stack traces to the user of the object, the original allocation
+ * site, and caller to shutdown_cache().
+ */
+void kfence_shutdown_cache(struct kmem_cache *s);
+
+/*
+ * Allocate a KFENCE object. Allocators must not call this function directly,
+ * use kfence_alloc() instead.
+ */
+void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
+
+/**
+ * kfence_alloc() - allocate a KFENCE object with a low probability
+ * @s: struct kmem_cache with object requirements
+ * @size: exact size of the object to allocate (can be less than @s->size
+ * e.g. for kmalloc caches)
+ * @flags: GFP flags
+ *
+ * Return:
+ * * NULL - must proceed with allocating as usual,
+ * * non-NULL - pointer to a KFENCE object.
+ *
+ * kfence_alloc() should be inserted into the heap allocation fast path,
+ * allowing it to transparently return KFENCE-allocated objects with a low
+ * probability using a static branch (the probability is controlled by the
+ * kfence.sample_interval boot parameter).
+ */
+static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
+{
+#ifdef CONFIG_KFENCE_STATIC_KEYS
+ if (static_branch_unlikely(&kfence_allocation_key))
+#else
+ if (unlikely(!atomic_read(&kfence_allocation_gate)))
+#endif
+ return __kfence_alloc(s, size, flags);
+ return NULL;
+}
+
+/**
+ * kfence_ksize() - get actual amount of memory allocated for a KFENCE object
+ * @addr: pointer to a heap object
+ *
+ * Return:
+ * * 0 - not a KFENCE object, must call __ksize() instead,
+ * * non-0 - this many bytes can be accessed without causing a memory error.
+ *
+ * kfence_ksize() returns the number of bytes requested for a KFENCE object at
+ * allocation time. This number may be less than the object size of the
+ * corresponding struct kmem_cache.
+ */
+size_t kfence_ksize(const void *addr);
+
+/**
+ * kfence_object_start() - find the beginning of a KFENCE object
+ * @addr: address within a KFENCE-allocated object
+ *
+ * Return: address of the beginning of the object.
+ *
+ * SL[AU]B-allocated objects are laid out within a page one by one, so it is
+ * easy to calculate the beginning of an object given a pointer inside it and
+ * the object size. The same is not true for KFENCE, which places a single
+ * object at either end of the page. This helper function is used to find the
+ * beginning of a KFENCE-allocated object.
+ */
+void *kfence_object_start(const void *addr);
+
+/**
+ * __kfence_free() - release a KFENCE heap object to KFENCE pool
+ * @addr: object to be freed
+ *
+ * Requires: is_kfence_address(addr)
+ *
+ * Release a KFENCE object and mark it as freed.
+ */
+void __kfence_free(void *addr);
+
+/**
+ * kfence_free() - try to release an arbitrary heap object to KFENCE pool
+ * @addr: object to be freed
+ *
+ * Return:
+ * * false - object doesn't belong to KFENCE pool and was ignored,
+ * * true - object was released to KFENCE pool.
+ *
+ * Release a KFENCE object and mark it as freed. May be called on any object,
+ * even non-KFENCE objects, to simplify integration of the hooks into the
+ * allocator's free codepath. The allocator must check the return value to
+ * determine if it was a KFENCE object or not.
+ */
+static __always_inline __must_check bool kfence_free(void *addr)
+{
+ if (!is_kfence_address(addr))
+ return false;
+ __kfence_free(addr);
+ return true;
+}
+
+/**
+ * kfence_handle_page_fault() - perform page fault handling for KFENCE pages
+ * @addr: faulting address
+ * @is_write: is access a write
+ * @regs: current struct pt_regs (can be NULL, but shows full stack trace)
+ *
+ * Return:
+ * * false - address outside KFENCE pool,
+ * * true - page fault handled by KFENCE, no additional handling required.
+ *
+ * A page fault inside KFENCE pool indicates a memory error, such as an
+ * out-of-bounds access, a use-after-free or an invalid memory access. In these
+ * cases KFENCE prints an error message and marks the offending page as
+ * present, so that the kernel can proceed.
+ */
+bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
+
+#else /* CONFIG_KFENCE */
+
+static inline bool is_kfence_address(const void *addr) { return false; }
+static inline void kfence_alloc_pool(void) { }
+static inline void kfence_init(void) { }
+static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
+static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
+static inline size_t kfence_ksize(const void *addr) { return 0; }
+static inline void *kfence_object_start(const void *addr) { return NULL; }
+static inline void __kfence_free(void *addr) { }
+static inline bool __must_check kfence_free(void *addr) { return false; }
+static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write,
+ struct pt_regs *regs)
+{
+ return false;
+}
+
+#endif
+
+#endif /* _LINUX_KFENCE_H */
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 0d6cf64c8bb1..392a3670944c 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -325,7 +325,6 @@ extern char *kgdb_mem2hex(char *mem, char *buf, int count);
extern int kgdb_hex2mem(char *buf, char *mem, int count);
extern int kgdb_isremovedbreak(unsigned long addr);
-extern void kgdb_schedule_breakpoint(void);
extern int kgdb_has_hit_break(unsigned long addr);
extern int
@@ -360,9 +359,11 @@ extern atomic_t kgdb_active;
extern bool dbg_is_early;
extern void __init dbg_late_init(void);
extern void kgdb_panic(const char *msg);
+extern void kgdb_free_init_mem(void);
#else /* ! CONFIG_KGDB */
#define in_dbg_master() (0)
#define dbg_late_init()
static inline void kgdb_panic(const char *msg) {}
+static inline void kgdb_free_init_mem(void) { }
#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index c941b7377321..2fcc01891b47 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -3,6 +3,7 @@
#define _LINUX_KHUGEPAGED_H
#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
+#include <linux/shmem_fs.h>
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -57,6 +58,7 @@ static inline int khugepaged_enter(struct vm_area_struct *vma,
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
if ((khugepaged_always() ||
+ (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) ||
(khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
!(vm_flags & VM_NOHUGEPAGE) &&
!test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h
index 0738389b42b6..1a37a664f915 100644
--- a/include/linux/ks0108.h
+++ b/include/linux/ks0108.h
@@ -4,7 +4,7 @@
* Version: 0.1.0
* Description: ks0108 LCD Controller driver header
*
- * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Author: Copyright (C) Miguel Ojeda <ojeda@kernel.org>
* Date: 2006-10-31
*/
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f3b1013fb22c..1b65e7204344 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -11,6 +11,7 @@
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/bug.h>
+#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/mmu_notifier.h>
#include <linux/preempt.h>
@@ -425,9 +426,8 @@ struct kvm_irq_routing_table {
#define KVM_PRIVATE_MEM_SLOTS 0
#endif
-#ifndef KVM_MEM_SLOTS_NUM
-#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
-#endif
+#define KVM_MEM_SLOTS_NUM SHRT_MAX
+#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS)
#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
@@ -451,7 +451,12 @@ struct kvm_memslots {
};
struct kvm {
+#ifdef KVM_HAVE_MMU_RWLOCK
+ rwlock_t mmu_lock;
+#else
spinlock_t mmu_lock;
+#endif /* KVM_HAVE_MMU_RWLOCK */
+
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
@@ -502,6 +507,8 @@ struct kvm {
struct mmu_notifier mmu_notifier;
unsigned long mmu_notifier_seq;
long mmu_notifier_count;
+ unsigned long mmu_notifier_range_start;
+ unsigned long mmu_notifier_range_end;
#endif
long tlbs_dirty;
struct list_head devices;
@@ -729,7 +736,7 @@ kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
bool atomic, bool *async, bool write_fault,
- bool *writable);
+ bool *writable, hva_t *hva);
void kvm_release_pfn_clean(kvm_pfn_t pfn);
void kvm_release_pfn_dirty(kvm_pfn_t pfn);
@@ -1203,6 +1210,26 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
return 1;
return 0;
}
+
+static inline int mmu_notifier_retry_hva(struct kvm *kvm,
+ unsigned long mmu_seq,
+ unsigned long hva)
+{
+ lockdep_assert_held(&kvm->mmu_lock);
+ /*
+ * If mmu_notifier_count is non-zero, then the range maintained by
+ * kvm_mmu_notifier_invalidate_range_start contains all addresses that
+ * might be being invalidated. Note that it may include some false
+ * positives, due to shortcuts when handing concurrent invalidations.
+ */
+ if (unlikely(kvm->mmu_notifier_count) &&
+ hva >= kvm->mmu_notifier_range_start &&
+ hva < kvm->mmu_notifier_range_end)
+ return 1;
+ if (kvm->mmu_notifier_seq != mmu_seq)
+ return 1;
+ return 0;
+}
#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index 21a3358a1731..612b4cab3819 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -85,6 +85,7 @@ static inline struct led_classdev_flash *lcdev_to_flcdev(
return container_of(lcdev, struct led_classdev_flash, led_cdev);
}
+#if IS_ENABLED(CONFIG_LEDS_CLASS_FLASH)
/**
* led_classdev_flash_register_ext - register a new object of LED class with
* init data and with support for flash LEDs
@@ -98,12 +99,6 @@ int led_classdev_flash_register_ext(struct device *parent,
struct led_classdev_flash *fled_cdev,
struct led_init_data *init_data);
-static inline int led_classdev_flash_register(struct device *parent,
- struct led_classdev_flash *fled_cdev)
-{
- return led_classdev_flash_register_ext(parent, fled_cdev, NULL);
-}
-
/**
* led_classdev_flash_unregister - unregisters an object of led_classdev class
* with support for flash LEDs
@@ -118,15 +113,44 @@ int devm_led_classdev_flash_register_ext(struct device *parent,
struct led_init_data *init_data);
+void devm_led_classdev_flash_unregister(struct device *parent,
+ struct led_classdev_flash *fled_cdev);
+
+#else
+
+static inline int led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data)
+{
+ return 0;
+}
+
+static inline void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev) {};
+static inline int devm_led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data)
+{
+ return 0;
+}
+
+static inline void devm_led_classdev_flash_unregister(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{};
+
+#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_FLASH) */
+
+static inline int led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{
+ return led_classdev_flash_register_ext(parent, fled_cdev, NULL);
+}
+
static inline int devm_led_classdev_flash_register(struct device *parent,
struct led_classdev_flash *fled_cdev)
{
return devm_led_classdev_flash_register_ext(parent, fled_cdev, NULL);
}
-void devm_led_classdev_flash_unregister(struct device *parent,
- struct led_classdev_flash *fled_cdev);
-
/**
* led_set_flash_strobe - setup flash strobe
* @fled_cdev: the flash LED to set strobe on
diff --git a/include/linux/led-class-multicolor.h b/include/linux/led-class-multicolor.h
index 5116f9a866cc..210d57bcd767 100644
--- a/include/linux/led-class-multicolor.h
+++ b/include/linux/led-class-multicolor.h
@@ -44,12 +44,6 @@ int led_classdev_multicolor_register_ext(struct device *parent,
struct led_classdev_mc *mcled_cdev,
struct led_init_data *init_data);
-static inline int led_classdev_multicolor_register(struct device *parent,
- struct led_classdev_mc *mcled_cdev)
-{
- return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL);
-}
-
/**
* led_classdev_multicolor_unregister - unregisters an object of led_classdev
* class with support for multicolor LEDs
@@ -68,13 +62,6 @@ int devm_led_classdev_multicolor_register_ext(struct device *parent,
struct led_classdev_mc *mcled_cdev,
struct led_init_data *init_data);
-static inline int devm_led_classdev_multicolor_register(struct device *parent,
- struct led_classdev_mc *mcled_cdev)
-{
- return devm_led_classdev_multicolor_register_ext(parent, mcled_cdev,
- NULL);
-}
-
void devm_led_classdev_multicolor_unregister(struct device *parent,
struct led_classdev_mc *mcled_cdev);
#else
@@ -83,27 +70,33 @@ static inline int led_classdev_multicolor_register_ext(struct device *parent,
struct led_classdev_mc *mcled_cdev,
struct led_init_data *init_data)
{
- return -EINVAL;
-}
-
-static inline int led_classdev_multicolor_register(struct device *parent,
- struct led_classdev_mc *mcled_cdev)
-{
- return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL);
+ return 0;
}
static inline void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev) {};
static inline int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
enum led_brightness brightness)
{
- return -EINVAL;
+ return 0;
}
static inline int devm_led_classdev_multicolor_register_ext(struct device *parent,
struct led_classdev_mc *mcled_cdev,
struct led_init_data *init_data)
{
- return -EINVAL;
+ return 0;
+}
+
+static inline void devm_led_classdev_multicolor_unregister(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{};
+
+#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) */
+
+static inline int led_classdev_multicolor_register(struct device *parent,
+ struct led_classdev_mc *mcled_cdev)
+{
+ return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL);
}
static inline int devm_led_classdev_multicolor_register(struct device *parent,
@@ -113,9 +106,4 @@ static inline int devm_led_classdev_multicolor_register(struct device *parent,
NULL);
}
-static inline void devm_led_classdev_multicolor_unregister(struct device *parent,
- struct led_classdev_mc *mcled_cdev)
-{};
-
-#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) */
#endif /* _LINUX_MULTICOLOR_LEDS_H_INCLUDED */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 6a8d6409c993..329fd914cf24 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -63,8 +63,8 @@ struct led_hw_trigger_type {
struct led_classdev {
const char *name;
- enum led_brightness brightness;
- enum led_brightness max_brightness;
+ unsigned int brightness;
+ unsigned int max_brightness;
int flags;
/* Lower 16 bits reflect status */
@@ -253,8 +253,7 @@ void led_blink_set_oneshot(struct led_classdev *led_cdev,
* software blink timer that implements blinking when the
* hardware doesn't. This function is guaranteed not to sleep.
*/
-void led_set_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness);
+void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness);
/**
* led_set_brightness_sync - set LED brightness synchronously
@@ -267,8 +266,7 @@ void led_set_brightness(struct led_classdev *led_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-int led_set_brightness_sync(struct led_classdev *led_cdev,
- enum led_brightness value);
+int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value);
/**
* led_update_brightness - update LED brightness
@@ -565,7 +563,7 @@ static inline void ledtrig_cpu(enum cpu_led_event evt)
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
void led_classdev_notify_brightness_hw_changed(
- struct led_classdev *led_cdev, enum led_brightness brightness);
+ struct led_classdev *led_cdev, unsigned int brightness);
#else
static inline void led_classdev_notify_brightness_hw_changed(
struct led_classdev *led_cdev, enum led_brightness brightness) { }
diff --git a/include/linux/list.h b/include/linux/list.h
index 89bdc92e75c3..f2af4b4aa4e9 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -901,7 +901,7 @@ static inline void hlist_add_before(struct hlist_node *n,
}
/**
- * hlist_add_behing - add a new entry after the one specified
+ * hlist_add_behind - add a new entry after the one specified
* @n: new entry to be added
* @prev: hlist node to add it after, which must be non-NULL
*/
diff --git a/include/linux/litex.h b/include/linux/litex.h
index 40f5be503593..5ea9ccf5cce4 100644
--- a/include/linux/litex.h
+++ b/include/linux/litex.h
@@ -3,9 +3,6 @@
* Common LiteX header providing
* helper functions for accessing CSRs.
*
- * Implementation of the functions is provided by
- * the LiteX SoC Controller driver.
- *
* Copyright (C) 2019-2020 Antmicro <www.antmicro.com>
*/
@@ -13,90 +10,147 @@
#define _LINUX_LITEX_H
#include <linux/io.h>
-#include <linux/types.h>
-#include <linux/compiler_types.h>
+
+/* LiteX SoCs support 8- or 32-bit CSR Bus data width (i.e., subreg. size) */
+#if defined(CONFIG_LITEX_SUBREG_SIZE) && \
+ (CONFIG_LITEX_SUBREG_SIZE == 1 || CONFIG_LITEX_SUBREG_SIZE == 4)
+#define LITEX_SUBREG_SIZE CONFIG_LITEX_SUBREG_SIZE
+#else
+#error LiteX subregister size (LITEX_SUBREG_SIZE) must be 4 or 1!
+#endif
+#define LITEX_SUBREG_SIZE_BIT (LITEX_SUBREG_SIZE * 8)
+
+/* LiteX subregisters of any width are always aligned on a 4-byte boundary */
+#define LITEX_SUBREG_ALIGN 0x4
+
+static inline void _write_litex_subregister(u32 val, void __iomem *addr)
+{
+ writel((u32 __force)cpu_to_le32(val), addr);
+}
+
+static inline u32 _read_litex_subregister(void __iomem *addr)
+{
+ return le32_to_cpu((__le32 __force)readl(addr));
+}
/*
- * The parameters below are true for LiteX SoCs configured for 8-bit CSR Bus,
- * 32-bit aligned.
+ * LiteX SoC Generator, depending on the configuration, can split a single
+ * logical CSR (Control&Status Register) into a series of consecutive physical
+ * registers.
+ *
+ * For example, in the configuration with 8-bit CSR Bus, a 32-bit aligned,
+ * 32-bit wide logical CSR will be laid out as four 32-bit physical
+ * subregisters, each one containing one byte of meaningful data.
*
- * Supporting other configurations will require extending the logic in this
- * header and in the LiteX SoC controller driver.
+ * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus
*/
-#define LITEX_REG_SIZE 0x4
-#define LITEX_SUBREG_SIZE 0x1
-#define LITEX_SUBREG_SIZE_BIT (LITEX_SUBREG_SIZE * 8)
-#define WRITE_LITEX_SUBREGISTER(val, base_offset, subreg_id) \
- writel((u32 __force)cpu_to_le32(val), base_offset + (LITEX_REG_SIZE * subreg_id))
+/* number of LiteX subregisters needed to store a register of given reg_size */
+#define _litex_num_subregs(reg_size) \
+ (((reg_size) - 1) / LITEX_SUBREG_SIZE + 1)
-#define READ_LITEX_SUBREGISTER(base_offset, subreg_id) \
- le32_to_cpu((__le32 __force)readl(base_offset + (LITEX_REG_SIZE * subreg_id)))
+/*
+ * since the number of 4-byte aligned subregisters required to store a single
+ * LiteX CSR (MMIO) register varies with LITEX_SUBREG_SIZE, the offset of the
+ * next adjacent LiteX CSR register w.r.t. the offset of the current one also
+ * depends on how many subregisters the latter is spread across
+ */
+#define _next_reg_off(off, size) \
+ ((off) + _litex_num_subregs(size) * LITEX_SUBREG_ALIGN)
-void litex_set_reg(void __iomem *reg, unsigned long reg_sz, unsigned long val);
+/*
+ * The purpose of `_litex_[set|get]_reg()` is to implement the logic of
+ * writing to/reading from the LiteX CSR in a single place that can be then
+ * reused by all LiteX drivers via the `litex_[write|read][8|16|32|64]()`
+ * accessors for the appropriate data width.
+ * NOTE: direct use of `_litex_[set|get]_reg()` by LiteX drivers is strongly
+ * discouraged, as they perform no error checking on the requested data width!
+ */
-unsigned long litex_get_reg(void __iomem *reg, unsigned long reg_sz);
+/**
+ * _litex_set_reg() - Writes a value to the LiteX CSR (Control&Status Register)
+ * @reg: Address of the CSR
+ * @reg_size: The width of the CSR expressed in the number of bytes
+ * @val: Value to be written to the CSR
+ *
+ * This function splits a single (possibly multi-byte) LiteX CSR write into
+ * a series of subregister writes with a proper offset.
+ * NOTE: caller is responsible for ensuring (0 < reg_size <= sizeof(u64)).
+ */
+static inline void _litex_set_reg(void __iomem *reg, size_t reg_size, u64 val)
+{
+ u8 shift = _litex_num_subregs(reg_size) * LITEX_SUBREG_SIZE_BIT;
+
+ while (shift > 0) {
+ shift -= LITEX_SUBREG_SIZE_BIT;
+ _write_litex_subregister(val >> shift, reg);
+ reg += LITEX_SUBREG_ALIGN;
+ }
+}
+
+/**
+ * _litex_get_reg() - Reads a value of the LiteX CSR (Control&Status Register)
+ * @reg: Address of the CSR
+ * @reg_size: The width of the CSR expressed in the number of bytes
+ *
+ * Return: Value read from the CSR
+ *
+ * This function generates a series of subregister reads with a proper offset
+ * and joins their results into a single (possibly multi-byte) LiteX CSR value.
+ * NOTE: caller is responsible for ensuring (0 < reg_size <= sizeof(u64)).
+ */
+static inline u64 _litex_get_reg(void __iomem *reg, size_t reg_size)
+{
+ u64 r;
+ u8 i;
+
+ r = _read_litex_subregister(reg);
+ for (i = 1; i < _litex_num_subregs(reg_size); i++) {
+ r <<= LITEX_SUBREG_SIZE_BIT;
+ reg += LITEX_SUBREG_ALIGN;
+ r |= _read_litex_subregister(reg);
+ }
+ return r;
+}
static inline void litex_write8(void __iomem *reg, u8 val)
{
- WRITE_LITEX_SUBREGISTER(val, reg, 0);
+ _litex_set_reg(reg, sizeof(u8), val);
}
static inline void litex_write16(void __iomem *reg, u16 val)
{
- WRITE_LITEX_SUBREGISTER(val >> 8, reg, 0);
- WRITE_LITEX_SUBREGISTER(val, reg, 1);
+ _litex_set_reg(reg, sizeof(u16), val);
}
static inline void litex_write32(void __iomem *reg, u32 val)
{
- WRITE_LITEX_SUBREGISTER(val >> 24, reg, 0);
- WRITE_LITEX_SUBREGISTER(val >> 16, reg, 1);
- WRITE_LITEX_SUBREGISTER(val >> 8, reg, 2);
- WRITE_LITEX_SUBREGISTER(val, reg, 3);
+ _litex_set_reg(reg, sizeof(u32), val);
}
static inline void litex_write64(void __iomem *reg, u64 val)
{
- WRITE_LITEX_SUBREGISTER(val >> 56, reg, 0);
- WRITE_LITEX_SUBREGISTER(val >> 48, reg, 1);
- WRITE_LITEX_SUBREGISTER(val >> 40, reg, 2);
- WRITE_LITEX_SUBREGISTER(val >> 32, reg, 3);
- WRITE_LITEX_SUBREGISTER(val >> 24, reg, 4);
- WRITE_LITEX_SUBREGISTER(val >> 16, reg, 5);
- WRITE_LITEX_SUBREGISTER(val >> 8, reg, 6);
- WRITE_LITEX_SUBREGISTER(val, reg, 7);
+ _litex_set_reg(reg, sizeof(u64), val);
}
static inline u8 litex_read8(void __iomem *reg)
{
- return READ_LITEX_SUBREGISTER(reg, 0);
+ return _litex_get_reg(reg, sizeof(u8));
}
static inline u16 litex_read16(void __iomem *reg)
{
- return (READ_LITEX_SUBREGISTER(reg, 0) << 8)
- | (READ_LITEX_SUBREGISTER(reg, 1));
+ return _litex_get_reg(reg, sizeof(u16));
}
static inline u32 litex_read32(void __iomem *reg)
{
- return (READ_LITEX_SUBREGISTER(reg, 0) << 24)
- | (READ_LITEX_SUBREGISTER(reg, 1) << 16)
- | (READ_LITEX_SUBREGISTER(reg, 2) << 8)
- | (READ_LITEX_SUBREGISTER(reg, 3));
+ return _litex_get_reg(reg, sizeof(u32));
}
static inline u64 litex_read64(void __iomem *reg)
{
- return ((u64)READ_LITEX_SUBREGISTER(reg, 0) << 56)
- | ((u64)READ_LITEX_SUBREGISTER(reg, 1) << 48)
- | ((u64)READ_LITEX_SUBREGISTER(reg, 2) << 40)
- | ((u64)READ_LITEX_SUBREGISTER(reg, 3) << 32)
- | ((u64)READ_LITEX_SUBREGISTER(reg, 4) << 24)
- | ((u64)READ_LITEX_SUBREGISTER(reg, 5) << 16)
- | ((u64)READ_LITEX_SUBREGISTER(reg, 6) << 8)
- | ((u64)READ_LITEX_SUBREGISTER(reg, 7));
+ return _litex_get_reg(reg, sizeof(u64));
}
#endif /* _LINUX_LITEX_H */
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 4a8795b21d77..ded90b097e6e 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -18,6 +18,7 @@ typedef struct {
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
+ .lock_type = LD_LOCK_PERCPU, \
}
#else
# define LL_DEP_MAP_INIT(lockname)
@@ -30,7 +31,9 @@ do { \
static struct lock_class_key __key; \
\
debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
- lockdep_init_map_wait(&(lock)->dep_map, #lock, &__key, 0, LD_WAIT_CONFIG);\
+ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \
+ LD_WAIT_CONFIG, LD_WAIT_INV, \
+ LD_LOCK_PERCPU); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b9e9adec73e8..7b7ebf2e28ec 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -185,12 +185,19 @@ extern void lockdep_unregister_key(struct lock_class_key *key);
* to lockdep:
*/
-extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass, short inner, short outer);
+extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
+
+static inline void
+lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, u8 inner, u8 outer)
+{
+ lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL);
+}
static inline void
lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass, short inner)
+ struct lock_class_key *key, int subclass, u8 inner)
{
lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
}
@@ -340,6 +347,8 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
+# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
+ do { (void)(name); (void)(key); } while (0)
# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_init_map_wait(lock, name, key, sub, inner) \
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index 9a1fd49df17f..2ec9ff5a7fff 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -30,6 +30,12 @@ enum lockdep_wait_type {
LD_WAIT_MAX, /* must be last */
};
+enum lockdep_lock_type {
+ LD_LOCK_NORMAL = 0, /* normal, catch all */
+ LD_LOCK_PERCPU, /* percpu */
+ LD_LOCK_MAX,
+};
+
#ifdef CONFIG_LOCKDEP
/*
@@ -119,8 +125,10 @@ struct lock_class {
int name_version;
const char *name;
- short wait_type_inner;
- short wait_type_outer;
+ u8 wait_type_inner;
+ u8 wait_type_outer;
+ u8 lock_type;
+ /* u8 hole; */
#ifdef CONFIG_LOCK_STAT
unsigned long contention_point[LOCKSTAT_POINTS];
@@ -169,8 +177,10 @@ struct lockdep_map {
struct lock_class_key *key;
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
const char *name;
- short wait_type_outer; /* can be taken in this context */
- short wait_type_inner; /* presents this context */
+ u8 wait_type_outer; /* can be taken in this context */
+ u8 wait_type_inner; /* presents this context */
+ u8 lock_type;
+ /* u8 hole; */
#ifdef CONFIG_LOCK_STAT
int cpu;
unsigned long ip;
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 7aaa753b8608..477a597db013 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -113,6 +113,8 @@ LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode)
LSM_HOOK(int, 0, inode_init_security, struct inode *inode,
struct inode *dir, const struct qstr *qstr, const char **name,
void **value, size_t *len)
+LSM_HOOK(int, 0, inode_init_security_anon, struct inode *inode,
+ const struct qstr *name, const struct inode *context_inode)
LSM_HOOK(int, 0, inode_create, struct inode *dir, struct dentry *dentry,
umode_t mode)
LSM_HOOK(int, 0, inode_link, struct dentry *old_dentry, struct inode *dir,
@@ -133,17 +135,20 @@ LSM_HOOK(int, 0, inode_follow_link, struct dentry *dentry, struct inode *inode,
LSM_HOOK(int, 0, inode_permission, struct inode *inode, int mask)
LSM_HOOK(int, 0, inode_setattr, struct dentry *dentry, struct iattr *attr)
LSM_HOOK(int, 0, inode_getattr, const struct path *path)
-LSM_HOOK(int, 0, inode_setxattr, struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+LSM_HOOK(int, 0, inode_setxattr, struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
LSM_HOOK(void, LSM_RET_VOID, inode_post_setxattr, struct dentry *dentry,
const char *name, const void *value, size_t size, int flags)
LSM_HOOK(int, 0, inode_getxattr, struct dentry *dentry, const char *name)
LSM_HOOK(int, 0, inode_listxattr, struct dentry *dentry)
-LSM_HOOK(int, 0, inode_removexattr, struct dentry *dentry, const char *name)
+LSM_HOOK(int, 0, inode_removexattr, struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name)
LSM_HOOK(int, 0, inode_need_killpriv, struct dentry *dentry)
-LSM_HOOK(int, 0, inode_killpriv, struct dentry *dentry)
-LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct inode *inode,
- const char *name, void **buffer, bool alloc)
+LSM_HOOK(int, 0, inode_killpriv, struct user_namespace *mnt_userns,
+ struct dentry *dentry)
+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct user_namespace *mnt_userns,
+ struct inode *inode, const char *name, void **buffer, bool alloc)
LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode,
const char *name, const void *value, size_t size, int flags)
LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer,
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index a19adef1f088..fb7f3193753d 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -233,6 +233,15 @@
* Returns 0 if @name and @value have been successfully set,
* -EOPNOTSUPP if no security attribute is needed, or
* -ENOMEM on memory allocation failure.
+ * @inode_init_security_anon:
+ * Set up the incore security field for the new anonymous inode
+ * and return whether the inode creation is permitted by the security
+ * module or not.
+ * @inode contains the inode structure
+ * @name name of the anonymous inode class
+ * @context_inode optional related inode
+ * Returns 0 on success, -EACCES if the security module denies the
+ * creation of this inode, or another -errno upon other errors.
* @inode_create:
* Check permission to create a regular file.
* @dir contains inode structure of the parent of the new file.
@@ -444,6 +453,7 @@
* @inode_killpriv:
* The setuid bit is being removed. Remove similar security labels.
* Called with the dentry->d_inode->i_mutex held.
+ * @mnt_userns: user namespace of the mount
* @dentry is the dentry being changed.
* Return 0 on success. If error is returned, then the operation
* causing setuid bit removal is failed.
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index 9004375c462e..27eb383cb95d 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -42,7 +42,7 @@ struct device *mdev_get_iommu_device(struct device *dev);
* @mdev: mdev_device structure on of mediated device
* that is being created
* Returns integer: success (0) or error (< 0)
- * @remove: Called to free resources in parent device's driver for a
+ * @remove: Called to free resources in parent device's driver for
* a mediated device. It is mandatory to provide 'remove'
* ops.
* @mdev: mdev_device device structure which is being
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index dbd69b3d170b..ffb787d5ebde 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -49,7 +49,11 @@ struct mdio_device {
unsigned int reset_assert_delay;
unsigned int reset_deassert_delay;
};
-#define to_mdio_device(d) container_of(d, struct mdio_device, dev)
+
+static inline struct mdio_device *to_mdio_device(const struct device *dev)
+{
+ return container_of(dev, struct mdio_device, dev);
+}
/* struct mdio_driver_common: Common to all MDIO drivers */
struct mdio_driver_common {
@@ -57,8 +61,12 @@ struct mdio_driver_common {
int flags;
};
#define MDIO_DEVICE_FLAG_PHY 1
-#define to_mdio_common_driver(d) \
- container_of(d, struct mdio_driver_common, driver)
+
+static inline struct mdio_driver_common *
+to_mdio_common_driver(const struct device_driver *driver)
+{
+ return container_of(driver, struct mdio_driver_common, driver);
+}
/* struct mdio_driver: Generic MDIO driver */
struct mdio_driver {
@@ -73,8 +81,13 @@ struct mdio_driver {
/* Clears up any memory if needed */
void (*remove)(struct mdio_device *mdiodev);
};
-#define to_mdio_driver(d) \
- container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv)
+
+static inline struct mdio_driver *
+to_mdio_driver(const struct device_driver *driver)
+{
+ return container_of(to_mdio_common_driver(driver), struct mdio_driver,
+ mdiodrv);
+}
/* device driver data */
static inline void mdiodev_set_drvdata(struct mdio_device *mdio, void *data)
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 959ad7d850b4..07f5ef8fc456 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -68,7 +68,7 @@ struct mei_cl_driver {
int (*probe)(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id);
- int (*remove)(struct mei_cl_device *cldev);
+ void (*remove)(struct mei_cl_device *cldev);
};
int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index b93c44b9121e..c88bc24e31aa 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -117,7 +117,7 @@ int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
-unsigned long memblock_free_all(void);
+void memblock_free_all(void);
void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);
@@ -272,7 +272,7 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
unsigned long *out_spfn,
unsigned long *out_epfn);
/**
- * for_each_free_mem_range_in_zone - iterate through zone specific free
+ * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free
* memblock areas
* @i: u64 used as loop variable
* @zone: zone in which all of the memory blocks reside
@@ -292,7 +292,7 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
__next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
/**
- * for_each_free_mem_range_in_zone_from - iterate through zone specific
+ * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
* free memblock areas from a given point
* @i: u64 used as loop variable
* @zone: zone in which all of the memory blocks reside
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index eeb0b52203e9..e6dc793d587d 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -92,6 +92,10 @@ struct lruvec_stat {
long count[NR_VM_NODE_STAT_ITEMS];
};
+struct batched_lruvec_stat {
+ s32 count[NR_VM_NODE_STAT_ITEMS];
+};
+
/*
* Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
* which have elements charged to this memcg.
@@ -107,11 +111,17 @@ struct memcg_shrinker_map {
struct mem_cgroup_per_node {
struct lruvec lruvec;
- /* Legacy local VM stats */
+ /*
+ * Legacy local VM stats. This should be struct lruvec_stat and
+ * cannot be optimized to struct batched_lruvec_stat. Because
+ * the threshold of the lruvec_stat_cpu can be as big as
+ * MEMCG_CHARGE_BATCH * PAGE_SIZE. It can fit into s32. But this
+ * filed has no upper limit.
+ */
struct lruvec_stat __percpu *lruvec_stat_local;
/* Subtree VM stats (batched updates) */
- struct lruvec_stat __percpu *lruvec_stat_cpu;
+ struct batched_lruvec_stat __percpu *lruvec_stat_cpu;
atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
@@ -475,19 +485,6 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page)
return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
}
-/*
- * set_page_objcgs - associate a page with a object cgroups vector
- * @page: a pointer to the page struct
- * @objcgs: a pointer to the object cgroups vector
- *
- * Atomically associates a page with a vector of object cgroups.
- */
-static inline bool set_page_objcgs(struct page *page,
- struct obj_cgroup **objcgs)
-{
- return !cmpxchg(&page->memcg_data, 0, (unsigned long)objcgs |
- MEMCG_DATA_OBJCGS);
-}
#else
static inline struct obj_cgroup **page_objcgs(struct page *page)
{
@@ -498,12 +495,6 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page)
{
return NULL;
}
-
-static inline bool set_page_objcgs(struct page *page,
- struct obj_cgroup **objcgs)
-{
- return true;
-}
#endif
static __always_inline bool memcg_stat_item_in_bytes(int idx)
@@ -689,8 +680,6 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
-struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
-
struct lruvec *lock_page_lruvec(struct page *page);
struct lruvec *lock_page_lruvec_irq(struct page *page);
struct lruvec *lock_page_lruvec_irqsave(struct page *page,
@@ -1200,11 +1189,6 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
return NULL;
}
-static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
-{
- return NULL;
-}
-
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
@@ -1601,9 +1585,6 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
#endif
#ifdef CONFIG_MEMCG_KMEM
-int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
- unsigned int nr_pages);
-void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge_page(struct page *page, int order);
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 439a89e758d8..4da95e684e20 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -27,9 +27,8 @@ struct memory_block {
unsigned long start_section_nr;
unsigned long state; /* serialized by the dev->lock */
int online_type; /* for passing data to online routine */
- int phys_device; /* to which fru does this belong? */
- struct device dev;
int nid; /* NID for this memory block */
+ struct device dev;
};
int arch_get_memory_phys_device(unsigned long start_pfn);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 15acce5ab106..7288aa5ef73b 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -16,22 +16,7 @@ struct resource;
struct vmem_altmap;
#ifdef CONFIG_MEMORY_HOTPLUG
-/*
- * Return page for the valid pfn only if the page is online. All pfn
- * walkers which rely on the fully initialized page->flags and others
- * should use this rather than pfn_valid && pfn_to_page
- */
-#define pfn_to_online_page(pfn) \
-({ \
- struct page *___page = NULL; \
- unsigned long ___pfn = pfn; \
- unsigned long ___nr = pfn_to_section_nr(___pfn); \
- \
- if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \
- pfn_valid_within(___pfn)) \
- ___page = pfn_to_page(___pfn); \
- ___page; \
-})
+struct page *pfn_to_online_page(unsigned long pfn);
/*
* Types for free bootmem stored in page->lru.next. These have to be in
@@ -68,7 +53,7 @@ typedef int __bitwise mhp_t;
* with this flag set, the resource pointer must no longer be used as it
* might be stale, or the resource might have changed.
*/
-#define MEMHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
+#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
/*
* Extended parameters for memory hotplug:
@@ -81,6 +66,9 @@ struct mhp_params {
pgprot_t pgprot;
};
+bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
+struct range mhp_get_pluggable_range(bool need_mapping);
+
/*
* Zone resizing functions
*
@@ -131,10 +119,10 @@ extern int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_params *params);
extern u64 max_mem_size;
-extern int memhp_online_type_from_str(const char *str);
+extern int mhp_online_type_from_str(const char *str);
/* Default online_type (MMOP_*) when new memory blocks are added. */
-extern int memhp_default_online_type;
+extern int mhp_default_online_type;
/* If movable_node boot option specified */
extern bool movable_node_enabled;
static inline bool movable_node_is_enabled(void)
@@ -281,6 +269,13 @@ static inline bool movable_node_is_enabled(void)
}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
+/*
+ * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
+ * platforms might override and use arch_get_mappable_range()
+ * for internal non memory hotplug purposes.
+ */
+struct range arch_get_mappable_range(void);
+
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
* pgdat resizing functions
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 79c49e7f5c30..f5b464daeeca 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -137,6 +137,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
struct dev_pagemap *pgmap);
+bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
@@ -165,6 +166,11 @@ static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
return NULL;
}
+static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
+{
+ return false;
+}
+
static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
{
return 0;
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 524a7e4702c2..302a330c5c84 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -368,7 +368,6 @@ struct ab8500 {
int it_latchhier_num;
};
-struct ab8500_regulator_platform_data;
struct ab8500_codec_platform_data;
struct ab8500_sysctrl_platform_data;
@@ -376,11 +375,9 @@ struct ab8500_sysctrl_platform_data;
* struct ab8500_platform_data - AB8500 platform data
* @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
* @init: board-specific initialization after detection of ab8500
- * @regulator: machine-specific constraints for regulators
*/
struct ab8500_platform_data {
void (*init) (struct ab8500 *);
- struct ab8500_regulator_platform_data *regulator;
struct ab8500_codec_platform_data *codec;
struct ab8500_sysctrl_platform_data *sysctrl;
};
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index fd5957c042da..9ab0e2fca7ea 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -696,6 +696,6 @@ int axp20x_device_probe(struct axp20x_dev *axp20x);
*
* This tells the axp20x core to remove the associated mfd devices
*/
-int axp20x_device_remove(struct axp20x_dev *axp20x);
+void axp20x_device_remove(struct axp20x_dev *axp20x);
#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/bd9571mwv.h b/include/linux/mfd/bd9571mwv.h
index eb05569f752b..8efd99d07c9e 100644
--- a/include/linux/mfd/bd9571mwv.h
+++ b/include/linux/mfd/bd9571mwv.h
@@ -1,16 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * ROHM BD9571MWV-M driver
+ * ROHM BD9571MWV-M and BD9574MWF-M driver
*
* Copyright (C) 2017 Marek Vasut <marek.vasut+renesas@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
+ * Copyright (C) 2020 Renesas Electronics Corporation
*
* Based on the TPS65086 driver
*/
@@ -21,11 +14,12 @@
#include <linux/device.h>
#include <linux/regmap.h>
-/* List of registers for BD9571MWV */
+/* List of registers for BD9571MWV and BD9574MWF */
#define BD9571MWV_VENDOR_CODE 0x00
#define BD9571MWV_VENDOR_CODE_VAL 0xdb
#define BD9571MWV_PRODUCT_CODE 0x01
-#define BD9571MWV_PRODUCT_CODE_VAL 0x60
+#define BD9571MWV_PRODUCT_CODE_BD9571MWV 0x60
+#define BD9571MWV_PRODUCT_CODE_BD9574MWF 0x74
#define BD9571MWV_PRODUCT_REVISION 0x02
#define BD9571MWV_I2C_FUSA_MODE 0x10
@@ -55,6 +49,7 @@
#define BD9571MWV_VD33_VID 0x44
#define BD9571MWV_DVFS_VINIT 0x50
+#define BD9574MWF_VD09_VINIT 0x51
#define BD9571MWV_DVFS_SETVMAX 0x52
#define BD9571MWV_DVFS_BOOSTVID 0x53
#define BD9571MWV_DVFS_SETVID 0x54
@@ -68,6 +63,7 @@
#define BD9571MWV_GPIO_INT_SET 0x64
#define BD9571MWV_GPIO_INT 0x65
#define BD9571MWV_GPIO_INTMASK 0x66
+#define BD9574MWF_GPIO_MUX 0x67
#define BD9571MWV_REG_KEEP(n) (0x70 + (n))
@@ -77,6 +73,8 @@
#define BD9571MWV_PROT_ERROR_STATUS2 0x83
#define BD9571MWV_PROT_ERROR_STATUS3 0x84
#define BD9571MWV_PROT_ERROR_STATUS4 0x85
+#define BD9574MWF_PROT_ERROR_STATUS5 0x86
+#define BD9574MWF_SYSTEM_ERROR_STATUS 0x87
#define BD9571MWV_INT_INTREQ 0x90
#define BD9571MWV_INT_INTREQ_MD1_INT BIT(0)
@@ -89,6 +87,12 @@
#define BD9571MWV_INT_INTREQ_BKUP_TRG_INT BIT(7)
#define BD9571MWV_INT_INTMASK 0x91
+#define BD9574MWF_SSCG_CNT 0xA0
+#define BD9574MWF_POFFB_MRB 0xA1
+#define BD9574MWF_SMRB_WR_PROT 0xA2
+#define BD9574MWF_SMRB_ASSERT 0xA3
+#define BD9574MWF_SMRB_STATUS 0xA4
+
#define BD9571MWV_ACCESS_KEY 0xff
/* Define the BD9571MWV IRQ numbers */
@@ -98,23 +102,8 @@ enum bd9571mwv_irqs {
BD9571MWV_IRQ_MD2_E2,
BD9571MWV_IRQ_PROT_ERR,
BD9571MWV_IRQ_GP,
- BD9571MWV_IRQ_128H_OF,
+ BD9571MWV_IRQ_128H_OF, /* BKUP_HOLD on BD9574MWF */
BD9571MWV_IRQ_WDT_OF,
BD9571MWV_IRQ_BKUP_TRG,
};
-
-/**
- * struct bd9571mwv - state holder for the bd9571mwv driver
- *
- * Device data may be used to access the BD9571MWV chip
- */
-struct bd9571mwv {
- struct device *dev;
- struct regmap *regmap;
-
- /* IRQ Data */
- int irq;
- struct regmap_irq_chip_data *irq_data;
-};
-
#endif /* __LINUX_MFD_BD9571MWV_H */
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index 4b35baa14d30..2009c4b936d9 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -28,13 +28,13 @@
.id = (_id), \
}
-#define OF_MFD_CELL_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \
+#define MFD_CELL_OF_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \
MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, true, NULL)
-#define OF_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _compat) \
+#define MFD_CELL_OF(_name, _res, _pdata, _pdsize, _id, _compat) \
MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, 0, false, NULL)
-#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \
+#define MFD_CELL_ACPI(_name, _res, _pdata, _pdsize, _id, _match) \
MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, _match)
#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \
diff --git a/include/linux/mfd/hi6421-spmi-pmic.h b/include/linux/mfd/hi6421-spmi-pmic.h
index 2c8896fd852e..2660226138b8 100644
--- a/include/linux/mfd/hi6421-spmi-pmic.h
+++ b/include/linux/mfd/hi6421-spmi-pmic.h
@@ -4,6 +4,7 @@
*
* Copyright (c) 2013 Linaro Ltd.
* Copyright (C) 2011 Hisilicon.
+ * Copyright (c) 2020-2021 Huawei Technologies Co., Ltd
*
* Guodong Xu <guodong.xu@linaro.org>
*/
@@ -12,10 +13,7 @@
#define __HISI_PMIC_H
#include <linux/irqdomain.h>
-
-#define HISI_REGS_ENA_PROTECT_TIME (0) /* in microseconds */
-#define HISI_ECO_MODE_ENABLE (1)
-#define HISI_ECO_MODE_DISABLE (0)
+#include <linux/regmap.h>
struct hi6421_spmi_pmic {
struct resource *res;
@@ -26,28 +24,7 @@ struct hi6421_spmi_pmic {
int irq;
int gpio;
unsigned int *irqs;
+ struct regmap *regmap;
};
-int hi6421_spmi_pmic_read(struct hi6421_spmi_pmic *pmic, int reg);
-int hi6421_spmi_pmic_write(struct hi6421_spmi_pmic *pmic, int reg, u32 val);
-int hi6421_spmi_pmic_rmw(struct hi6421_spmi_pmic *pmic, int reg,
- u32 mask, u32 bits);
-
-enum hi6421_spmi_pmic_irq_list {
- OTMP = 0,
- VBUS_CONNECT,
- VBUS_DISCONNECT,
- ALARMON_R,
- HOLD_6S,
- HOLD_1S,
- POWERKEY_UP,
- POWERKEY_DOWN,
- OCP_SCP_R,
- COUL_R,
- SIM0_HPD_R,
- SIM0_HPD_F,
- SIM1_HPD_R,
- SIM1_HPD_F,
- PMIC_IRQ_LIST_MAX,
-};
#endif /* __HISI_PMIC_H */
diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
index c8ef2f1654a4..74d4e193966a 100644
--- a/include/linux/mfd/intel-m10-bmc.h
+++ b/include/linux/mfd/intel-m10-bmc.h
@@ -15,6 +15,15 @@
/* Register offset of system registers */
#define NIOS2_FW_VERSION 0x0
+#define M10BMC_MAC_LOW 0x10
+#define M10BMC_MAC_BYTE4 GENMASK(7, 0)
+#define M10BMC_MAC_BYTE3 GENMASK(15, 8)
+#define M10BMC_MAC_BYTE2 GENMASK(23, 16)
+#define M10BMC_MAC_BYTE1 GENMASK(31, 24)
+#define M10BMC_MAC_HIGH 0x14
+#define M10BMC_MAC_BYTE6 GENMASK(7, 0)
+#define M10BMC_MAC_BYTE5 GENMASK(15, 8)
+#define M10BMC_MAC_COUNT GENMASK(23, 16)
#define M10BMC_TEST_REG 0x3c
#define M10BMC_BUILD_VER 0x68
#define M10BMC_VER_MAJOR_MSK GENMASK(23, 16)
diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h
deleted file mode 100644
index 317e8608cf41..000000000000
--- a/include/linux/mfd/intel_msic.h
+++ /dev/null
@@ -1,453 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Core interface for Intel MSIC
- *
- * Copyright (C) 2011, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- */
-
-#ifndef __LINUX_MFD_INTEL_MSIC_H__
-#define __LINUX_MFD_INTEL_MSIC_H__
-
-/* ID */
-#define INTEL_MSIC_ID0 0x000 /* RO */
-#define INTEL_MSIC_ID1 0x001 /* RO */
-
-/* IRQ */
-#define INTEL_MSIC_IRQLVL1 0x002
-#define INTEL_MSIC_ADC1INT 0x003
-#define INTEL_MSIC_CCINT 0x004
-#define INTEL_MSIC_PWRSRCINT 0x005
-#define INTEL_MSIC_PWRSRCINT1 0x006
-#define INTEL_MSIC_CHRINT 0x007
-#define INTEL_MSIC_CHRINT1 0x008
-#define INTEL_MSIC_RTCIRQ 0x009
-#define INTEL_MSIC_GPIO0LVIRQ 0x00a
-#define INTEL_MSIC_GPIO1LVIRQ 0x00b
-#define INTEL_MSIC_GPIOHVIRQ 0x00c
-#define INTEL_MSIC_VRINT 0x00d
-#define INTEL_MSIC_OCAUDIO 0x00e
-#define INTEL_MSIC_ACCDET 0x00f
-#define INTEL_MSIC_RESETIRQ1 0x010
-#define INTEL_MSIC_RESETIRQ2 0x011
-#define INTEL_MSIC_MADC1INT 0x012
-#define INTEL_MSIC_MCCINT 0x013
-#define INTEL_MSIC_MPWRSRCINT 0x014
-#define INTEL_MSIC_MPWRSRCINT1 0x015
-#define INTEL_MSIC_MCHRINT 0x016
-#define INTEL_MSIC_MCHRINT1 0x017
-#define INTEL_MSIC_RTCIRQMASK 0x018
-#define INTEL_MSIC_GPIO0LVIRQMASK 0x019
-#define INTEL_MSIC_GPIO1LVIRQMASK 0x01a
-#define INTEL_MSIC_GPIOHVIRQMASK 0x01b
-#define INTEL_MSIC_VRINTMASK 0x01c
-#define INTEL_MSIC_OCAUDIOMASK 0x01d
-#define INTEL_MSIC_ACCDETMASK 0x01e
-#define INTEL_MSIC_RESETIRQ1MASK 0x01f
-#define INTEL_MSIC_RESETIRQ2MASK 0x020
-#define INTEL_MSIC_IRQLVL1MSK 0x021
-#define INTEL_MSIC_PBCONFIG 0x03e
-#define INTEL_MSIC_PBSTATUS 0x03f /* RO */
-
-/* GPIO */
-#define INTEL_MSIC_GPIO0LV7CTLO 0x040
-#define INTEL_MSIC_GPIO0LV6CTLO 0x041
-#define INTEL_MSIC_GPIO0LV5CTLO 0x042
-#define INTEL_MSIC_GPIO0LV4CTLO 0x043
-#define INTEL_MSIC_GPIO0LV3CTLO 0x044
-#define INTEL_MSIC_GPIO0LV2CTLO 0x045
-#define INTEL_MSIC_GPIO0LV1CTLO 0x046
-#define INTEL_MSIC_GPIO0LV0CTLO 0x047
-#define INTEL_MSIC_GPIO1LV7CTLOS 0x048
-#define INTEL_MSIC_GPIO1LV6CTLO 0x049
-#define INTEL_MSIC_GPIO1LV5CTLO 0x04a
-#define INTEL_MSIC_GPIO1LV4CTLO 0x04b
-#define INTEL_MSIC_GPIO1LV3CTLO 0x04c
-#define INTEL_MSIC_GPIO1LV2CTLO 0x04d
-#define INTEL_MSIC_GPIO1LV1CTLO 0x04e
-#define INTEL_MSIC_GPIO1LV0CTLO 0x04f
-#define INTEL_MSIC_GPIO0LV7CTLI 0x050
-#define INTEL_MSIC_GPIO0LV6CTLI 0x051
-#define INTEL_MSIC_GPIO0LV5CTLI 0x052
-#define INTEL_MSIC_GPIO0LV4CTLI 0x053
-#define INTEL_MSIC_GPIO0LV3CTLI 0x054
-#define INTEL_MSIC_GPIO0LV2CTLI 0x055
-#define INTEL_MSIC_GPIO0LV1CTLI 0x056
-#define INTEL_MSIC_GPIO0LV0CTLI 0x057
-#define INTEL_MSIC_GPIO1LV7CTLIS 0x058
-#define INTEL_MSIC_GPIO1LV6CTLI 0x059
-#define INTEL_MSIC_GPIO1LV5CTLI 0x05a
-#define INTEL_MSIC_GPIO1LV4CTLI 0x05b
-#define INTEL_MSIC_GPIO1LV3CTLI 0x05c
-#define INTEL_MSIC_GPIO1LV2CTLI 0x05d
-#define INTEL_MSIC_GPIO1LV1CTLI 0x05e
-#define INTEL_MSIC_GPIO1LV0CTLI 0x05f
-#define INTEL_MSIC_PWM0CLKDIV1 0x061
-#define INTEL_MSIC_PWM0CLKDIV0 0x062
-#define INTEL_MSIC_PWM1CLKDIV1 0x063
-#define INTEL_MSIC_PWM1CLKDIV0 0x064
-#define INTEL_MSIC_PWM2CLKDIV1 0x065
-#define INTEL_MSIC_PWM2CLKDIV0 0x066
-#define INTEL_MSIC_PWM0DUTYCYCLE 0x067
-#define INTEL_MSIC_PWM1DUTYCYCLE 0x068
-#define INTEL_MSIC_PWM2DUTYCYCLE 0x069
-#define INTEL_MSIC_GPIO0HV3CTLO 0x06d
-#define INTEL_MSIC_GPIO0HV2CTLO 0x06e
-#define INTEL_MSIC_GPIO0HV1CTLO 0x06f
-#define INTEL_MSIC_GPIO0HV0CTLO 0x070
-#define INTEL_MSIC_GPIO1HV3CTLO 0x071
-#define INTEL_MSIC_GPIO1HV2CTLO 0x072
-#define INTEL_MSIC_GPIO1HV1CTLO 0x073
-#define INTEL_MSIC_GPIO1HV0CTLO 0x074
-#define INTEL_MSIC_GPIO0HV3CTLI 0x075
-#define INTEL_MSIC_GPIO0HV2CTLI 0x076
-#define INTEL_MSIC_GPIO0HV1CTLI 0x077
-#define INTEL_MSIC_GPIO0HV0CTLI 0x078
-#define INTEL_MSIC_GPIO1HV3CTLI 0x079
-#define INTEL_MSIC_GPIO1HV2CTLI 0x07a
-#define INTEL_MSIC_GPIO1HV1CTLI 0x07b
-#define INTEL_MSIC_GPIO1HV0CTLI 0x07c
-
-/* SVID */
-#define INTEL_MSIC_SVIDCTRL0 0x080
-#define INTEL_MSIC_SVIDCTRL1 0x081
-#define INTEL_MSIC_SVIDCTRL2 0x082
-#define INTEL_MSIC_SVIDTXLASTPKT3 0x083 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT2 0x084 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT1 0x085 /* RO */
-#define INTEL_MSIC_SVIDTXLASTPKT0 0x086 /* RO */
-#define INTEL_MSIC_SVIDPKTOUTBYTE3 0x087
-#define INTEL_MSIC_SVIDPKTOUTBYTE2 0x088
-#define INTEL_MSIC_SVIDPKTOUTBYTE1 0x089
-#define INTEL_MSIC_SVIDPKTOUTBYTE0 0x08a
-#define INTEL_MSIC_SVIDRXVPDEBUG1 0x08b
-#define INTEL_MSIC_SVIDRXVPDEBUG0 0x08c
-#define INTEL_MSIC_SVIDRXLASTPKT3 0x08d /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT2 0x08e /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT1 0x08f /* RO */
-#define INTEL_MSIC_SVIDRXLASTPKT0 0x090 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS3 0x091 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS2 0x092 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS1 0x093 /* RO */
-#define INTEL_MSIC_SVIDRXCHKSTATUS0 0x094 /* RO */
-
-/* VREG */
-#define INTEL_MSIC_VCCLATCH 0x0c0
-#define INTEL_MSIC_VNNLATCH 0x0c1
-#define INTEL_MSIC_VCCCNT 0x0c2
-#define INTEL_MSIC_SMPSRAMP 0x0c3
-#define INTEL_MSIC_VNNCNT 0x0c4
-#define INTEL_MSIC_VNNAONCNT 0x0c5
-#define INTEL_MSIC_VCC122AONCNT 0x0c6
-#define INTEL_MSIC_V180AONCNT 0x0c7
-#define INTEL_MSIC_V500CNT 0x0c8
-#define INTEL_MSIC_VIHFCNT 0x0c9
-#define INTEL_MSIC_LDORAMP1 0x0ca
-#define INTEL_MSIC_LDORAMP2 0x0cb
-#define INTEL_MSIC_VCC108AONCNT 0x0cc
-#define INTEL_MSIC_VCC108ASCNT 0x0cd
-#define INTEL_MSIC_VCC108CNT 0x0ce
-#define INTEL_MSIC_VCCA100ASCNT 0x0cf
-#define INTEL_MSIC_VCCA100CNT 0x0d0
-#define INTEL_MSIC_VCC180AONCNT 0x0d1
-#define INTEL_MSIC_VCC180CNT 0x0d2
-#define INTEL_MSIC_VCC330CNT 0x0d3
-#define INTEL_MSIC_VUSB330CNT 0x0d4
-#define INTEL_MSIC_VCCSDIOCNT 0x0d5
-#define INTEL_MSIC_VPROG1CNT 0x0d6
-#define INTEL_MSIC_VPROG2CNT 0x0d7
-#define INTEL_MSIC_VEMMCSCNT 0x0d8
-#define INTEL_MSIC_VEMMC1CNT 0x0d9
-#define INTEL_MSIC_VEMMC2CNT 0x0da
-#define INTEL_MSIC_VAUDACNT 0x0db
-#define INTEL_MSIC_VHSPCNT 0x0dc
-#define INTEL_MSIC_VHSNCNT 0x0dd
-#define INTEL_MSIC_VHDMICNT 0x0de
-#define INTEL_MSIC_VOTGCNT 0x0df
-#define INTEL_MSIC_V1P35CNT 0x0e0
-#define INTEL_MSIC_V330AONCNT 0x0e1
-
-/* RESET */
-#define INTEL_MSIC_CHIPCNTRL 0x100 /* WO */
-#define INTEL_MSIC_ERCONFIG 0x101
-
-/* BURST */
-#define INTEL_MSIC_BATCURRENTLIMIT12 0x102
-#define INTEL_MSIC_BATTIMELIMIT12 0x103
-#define INTEL_MSIC_BATTIMELIMIT3 0x104
-#define INTEL_MSIC_BATTIMEDB 0x105
-#define INTEL_MSIC_BRSTCONFIGOUTPUTS 0x106
-#define INTEL_MSIC_BRSTCONFIGACTIONS 0x107
-#define INTEL_MSIC_BURSTCONTROLSTATUS 0x108
-
-/* RTC */
-#define INTEL_MSIC_RTCB1 0x140 /* RO */
-#define INTEL_MSIC_RTCB2 0x141 /* RO */
-#define INTEL_MSIC_RTCB3 0x142 /* RO */
-#define INTEL_MSIC_RTCB4 0x143 /* RO */
-#define INTEL_MSIC_RTCOB1 0x144
-#define INTEL_MSIC_RTCOB2 0x145
-#define INTEL_MSIC_RTCOB3 0x146
-#define INTEL_MSIC_RTCOB4 0x147
-#define INTEL_MSIC_RTCAB1 0x148
-#define INTEL_MSIC_RTCAB2 0x149
-#define INTEL_MSIC_RTCAB3 0x14a
-#define INTEL_MSIC_RTCAB4 0x14b
-#define INTEL_MSIC_RTCWAB1 0x14c
-#define INTEL_MSIC_RTCWAB2 0x14d
-#define INTEL_MSIC_RTCWAB3 0x14e
-#define INTEL_MSIC_RTCWAB4 0x14f
-#define INTEL_MSIC_RTCSC1 0x150
-#define INTEL_MSIC_RTCSC2 0x151
-#define INTEL_MSIC_RTCSC3 0x152
-#define INTEL_MSIC_RTCSC4 0x153
-#define INTEL_MSIC_RTCSTATUS 0x154 /* RO */
-#define INTEL_MSIC_RTCCONFIG1 0x155
-#define INTEL_MSIC_RTCCONFIG2 0x156
-
-/* CHARGER */
-#define INTEL_MSIC_BDTIMER 0x180
-#define INTEL_MSIC_BATTRMV 0x181
-#define INTEL_MSIC_VBUSDET 0x182
-#define INTEL_MSIC_VBUSDET1 0x183
-#define INTEL_MSIC_ADPHVDET 0x184
-#define INTEL_MSIC_ADPLVDET 0x185
-#define INTEL_MSIC_ADPDETDBDM 0x186
-#define INTEL_MSIC_LOWBATTDET 0x187
-#define INTEL_MSIC_CHRCTRL 0x188
-#define INTEL_MSIC_CHRCVOLTAGE 0x189
-#define INTEL_MSIC_CHRCCURRENT 0x18a
-#define INTEL_MSIC_SPCHARGER 0x18b
-#define INTEL_MSIC_CHRTTIME 0x18c
-#define INTEL_MSIC_CHRCTRL1 0x18d
-#define INTEL_MSIC_PWRSRCLMT 0x18e
-#define INTEL_MSIC_CHRSTWDT 0x18f
-#define INTEL_MSIC_WDTWRITE 0x190 /* WO */
-#define INTEL_MSIC_CHRSAFELMT 0x191
-#define INTEL_MSIC_SPWRSRCINT 0x192 /* RO */
-#define INTEL_MSIC_SPWRSRCINT1 0x193 /* RO */
-#define INTEL_MSIC_CHRLEDPWM 0x194
-#define INTEL_MSIC_CHRLEDCTRL 0x195
-
-/* ADC */
-#define INTEL_MSIC_ADC1CNTL1 0x1c0
-#define INTEL_MSIC_ADC1CNTL2 0x1c1
-#define INTEL_MSIC_ADC1CNTL3 0x1c2
-#define INTEL_MSIC_ADC1OFFSETH 0x1c3 /* RO */
-#define INTEL_MSIC_ADC1OFFSETL 0x1c4 /* RO */
-#define INTEL_MSIC_ADC1ADDR0 0x1c5
-#define INTEL_MSIC_ADC1ADDR1 0x1c6
-#define INTEL_MSIC_ADC1ADDR2 0x1c7
-#define INTEL_MSIC_ADC1ADDR3 0x1c8
-#define INTEL_MSIC_ADC1ADDR4 0x1c9
-#define INTEL_MSIC_ADC1ADDR5 0x1ca
-#define INTEL_MSIC_ADC1ADDR6 0x1cb
-#define INTEL_MSIC_ADC1ADDR7 0x1cc
-#define INTEL_MSIC_ADC1ADDR8 0x1cd
-#define INTEL_MSIC_ADC1ADDR9 0x1ce
-#define INTEL_MSIC_ADC1ADDR10 0x1cf
-#define INTEL_MSIC_ADC1ADDR11 0x1d0
-#define INTEL_MSIC_ADC1ADDR12 0x1d1
-#define INTEL_MSIC_ADC1ADDR13 0x1d2
-#define INTEL_MSIC_ADC1ADDR14 0x1d3
-#define INTEL_MSIC_ADC1SNS0H 0x1d4 /* RO */
-#define INTEL_MSIC_ADC1SNS0L 0x1d5 /* RO */
-#define INTEL_MSIC_ADC1SNS1H 0x1d6 /* RO */
-#define INTEL_MSIC_ADC1SNS1L 0x1d7 /* RO */
-#define INTEL_MSIC_ADC1SNS2H 0x1d8 /* RO */
-#define INTEL_MSIC_ADC1SNS2L 0x1d9 /* RO */
-#define INTEL_MSIC_ADC1SNS3H 0x1da /* RO */
-#define INTEL_MSIC_ADC1SNS3L 0x1db /* RO */
-#define INTEL_MSIC_ADC1SNS4H 0x1dc /* RO */
-#define INTEL_MSIC_ADC1SNS4L 0x1dd /* RO */
-#define INTEL_MSIC_ADC1SNS5H 0x1de /* RO */
-#define INTEL_MSIC_ADC1SNS5L 0x1df /* RO */
-#define INTEL_MSIC_ADC1SNS6H 0x1e0 /* RO */
-#define INTEL_MSIC_ADC1SNS6L 0x1e1 /* RO */
-#define INTEL_MSIC_ADC1SNS7H 0x1e2 /* RO */
-#define INTEL_MSIC_ADC1SNS7L 0x1e3 /* RO */
-#define INTEL_MSIC_ADC1SNS8H 0x1e4 /* RO */
-#define INTEL_MSIC_ADC1SNS8L 0x1e5 /* RO */
-#define INTEL_MSIC_ADC1SNS9H 0x1e6 /* RO */
-#define INTEL_MSIC_ADC1SNS9L 0x1e7 /* RO */
-#define INTEL_MSIC_ADC1SNS10H 0x1e8 /* RO */
-#define INTEL_MSIC_ADC1SNS10L 0x1e9 /* RO */
-#define INTEL_MSIC_ADC1SNS11H 0x1ea /* RO */
-#define INTEL_MSIC_ADC1SNS11L 0x1eb /* RO */
-#define INTEL_MSIC_ADC1SNS12H 0x1ec /* RO */
-#define INTEL_MSIC_ADC1SNS12L 0x1ed /* RO */
-#define INTEL_MSIC_ADC1SNS13H 0x1ee /* RO */
-#define INTEL_MSIC_ADC1SNS13L 0x1ef /* RO */
-#define INTEL_MSIC_ADC1SNS14H 0x1f0 /* RO */
-#define INTEL_MSIC_ADC1SNS14L 0x1f1 /* RO */
-#define INTEL_MSIC_ADC1BV0H 0x1f2 /* RO */
-#define INTEL_MSIC_ADC1BV0L 0x1f3 /* RO */
-#define INTEL_MSIC_ADC1BV1H 0x1f4 /* RO */
-#define INTEL_MSIC_ADC1BV1L 0x1f5 /* RO */
-#define INTEL_MSIC_ADC1BV2H 0x1f6 /* RO */
-#define INTEL_MSIC_ADC1BV2L 0x1f7 /* RO */
-#define INTEL_MSIC_ADC1BV3H 0x1f8 /* RO */
-#define INTEL_MSIC_ADC1BV3L 0x1f9 /* RO */
-#define INTEL_MSIC_ADC1BI0H 0x1fa /* RO */
-#define INTEL_MSIC_ADC1BI0L 0x1fb /* RO */
-#define INTEL_MSIC_ADC1BI1H 0x1fc /* RO */
-#define INTEL_MSIC_ADC1BI1L 0x1fd /* RO */
-#define INTEL_MSIC_ADC1BI2H 0x1fe /* RO */
-#define INTEL_MSIC_ADC1BI2L 0x1ff /* RO */
-#define INTEL_MSIC_ADC1BI3H 0x200 /* RO */
-#define INTEL_MSIC_ADC1BI3L 0x201 /* RO */
-#define INTEL_MSIC_CCCNTL 0x202
-#define INTEL_MSIC_CCOFFSETH 0x203 /* RO */
-#define INTEL_MSIC_CCOFFSETL 0x204 /* RO */
-#define INTEL_MSIC_CCADCHA 0x205 /* RO */
-#define INTEL_MSIC_CCADCLA 0x206 /* RO */
-
-/* AUDIO */
-#define INTEL_MSIC_AUDPLLCTRL 0x240
-#define INTEL_MSIC_DMICBUF0123 0x241
-#define INTEL_MSIC_DMICBUF45 0x242
-#define INTEL_MSIC_DMICGPO 0x244
-#define INTEL_MSIC_DMICMUX 0x245
-#define INTEL_MSIC_DMICCLK 0x246
-#define INTEL_MSIC_MICBIAS 0x247
-#define INTEL_MSIC_ADCCONFIG 0x248
-#define INTEL_MSIC_MICAMP1 0x249
-#define INTEL_MSIC_MICAMP2 0x24a
-#define INTEL_MSIC_NOISEMUX 0x24b
-#define INTEL_MSIC_AUDIOMUX12 0x24c
-#define INTEL_MSIC_AUDIOMUX34 0x24d
-#define INTEL_MSIC_AUDIOSINC 0x24e
-#define INTEL_MSIC_AUDIOTXEN 0x24f
-#define INTEL_MSIC_HSEPRXCTRL 0x250
-#define INTEL_MSIC_IHFRXCTRL 0x251
-#define INTEL_MSIC_VOICETXVOL 0x252
-#define INTEL_MSIC_SIDETONEVOL 0x253
-#define INTEL_MSIC_MUSICSHARVOL 0x254
-#define INTEL_MSIC_VOICETXCTRL 0x255
-#define INTEL_MSIC_HSMIXER 0x256
-#define INTEL_MSIC_DACCONFIG 0x257
-#define INTEL_MSIC_SOFTMUTE 0x258
-#define INTEL_MSIC_HSLVOLCTRL 0x259
-#define INTEL_MSIC_HSRVOLCTRL 0x25a
-#define INTEL_MSIC_IHFLVOLCTRL 0x25b
-#define INTEL_MSIC_IHFRVOLCTRL 0x25c
-#define INTEL_MSIC_DRIVEREN 0x25d
-#define INTEL_MSIC_LINEOUTCTRL 0x25e
-#define INTEL_MSIC_VIB1CTRL1 0x25f
-#define INTEL_MSIC_VIB1CTRL2 0x260
-#define INTEL_MSIC_VIB1CTRL3 0x261
-#define INTEL_MSIC_VIB1SPIPCM_1 0x262
-#define INTEL_MSIC_VIB1SPIPCM_2 0x263
-#define INTEL_MSIC_VIB1CTRL5 0x264
-#define INTEL_MSIC_VIB2CTRL1 0x265
-#define INTEL_MSIC_VIB2CTRL2 0x266
-#define INTEL_MSIC_VIB2CTRL3 0x267
-#define INTEL_MSIC_VIB2SPIPCM_1 0x268
-#define INTEL_MSIC_VIB2SPIPCM_2 0x269
-#define INTEL_MSIC_VIB2CTRL5 0x26a
-#define INTEL_MSIC_BTNCTRL1 0x26b
-#define INTEL_MSIC_BTNCTRL2 0x26c
-#define INTEL_MSIC_PCM1TXSLOT01 0x26d
-#define INTEL_MSIC_PCM1TXSLOT23 0x26e
-#define INTEL_MSIC_PCM1TXSLOT45 0x26f
-#define INTEL_MSIC_PCM1RXSLOT0123 0x270
-#define INTEL_MSIC_PCM1RXSLOT045 0x271
-#define INTEL_MSIC_PCM2TXSLOT01 0x272
-#define INTEL_MSIC_PCM2TXSLOT23 0x273
-#define INTEL_MSIC_PCM2TXSLOT45 0x274
-#define INTEL_MSIC_PCM2RXSLOT01 0x275
-#define INTEL_MSIC_PCM2RXSLOT23 0x276
-#define INTEL_MSIC_PCM2RXSLOT45 0x277
-#define INTEL_MSIC_PCM1CTRL1 0x278
-#define INTEL_MSIC_PCM1CTRL2 0x279
-#define INTEL_MSIC_PCM1CTRL3 0x27a
-#define INTEL_MSIC_PCM2CTRL1 0x27b
-#define INTEL_MSIC_PCM2CTRL2 0x27c
-
-/* HDMI */
-#define INTEL_MSIC_HDMIPUEN 0x280
-#define INTEL_MSIC_HDMISTATUS 0x281 /* RO */
-
-/* Physical address of the start of the MSIC interrupt tree in SRAM */
-#define INTEL_MSIC_IRQ_PHYS_BASE 0xffff7fc0
-
-/**
- * struct intel_msic_gpio_pdata - platform data for the MSIC GPIO driver
- * @gpio_base: base number for the GPIOs
- */
-struct intel_msic_gpio_pdata {
- unsigned gpio_base;
-};
-
-/**
- * struct intel_msic_ocd_pdata - platform data for the MSIC OCD driver
- * @gpio: GPIO number used for OCD interrupts
- *
- * The MSIC MFD driver converts @gpio into an IRQ number and passes it to
- * the OCD driver as %IORESOURCE_IRQ.
- */
-struct intel_msic_ocd_pdata {
- unsigned gpio;
-};
-
-/* MSIC embedded blocks (subdevices) */
-enum intel_msic_block {
- INTEL_MSIC_BLOCK_TOUCH,
- INTEL_MSIC_BLOCK_ADC,
- INTEL_MSIC_BLOCK_BATTERY,
- INTEL_MSIC_BLOCK_GPIO,
- INTEL_MSIC_BLOCK_AUDIO,
- INTEL_MSIC_BLOCK_HDMI,
- INTEL_MSIC_BLOCK_THERMAL,
- INTEL_MSIC_BLOCK_POWER_BTN,
- INTEL_MSIC_BLOCK_OCD,
-
- INTEL_MSIC_BLOCK_LAST,
-};
-
-/**
- * struct intel_msic_platform_data - platform data for the MSIC driver
- * @irq: array of interrupt numbers, one per device. If @irq is set to %0
- * for a given block, the corresponding platform device is not
- * created. For devices which don't have an interrupt, use %0xff
- * (this is same as in SFI spec).
- * @gpio: platform data for the MSIC GPIO driver
- * @ocd: platform data for the MSIC OCD driver
- *
- * Once the MSIC driver is initialized, the register interface is ready to
- * use. All the platform devices for subdevices are created after the
- * register interface is ready so that we can guarantee its availability to
- * the subdevice drivers.
- *
- * Interrupt numbers are passed to the subdevices via %IORESOURCE_IRQ
- * resources of the created platform device.
- */
-struct intel_msic_platform_data {
- int irq[INTEL_MSIC_BLOCK_LAST];
- struct intel_msic_gpio_pdata *gpio;
- struct intel_msic_ocd_pdata *ocd;
-};
-
-struct intel_msic;
-
-extern int intel_msic_reg_read(unsigned short reg, u8 *val);
-extern int intel_msic_reg_write(unsigned short reg, u8 val);
-extern int intel_msic_reg_update(unsigned short reg, u8 val, u8 mask);
-extern int intel_msic_bulk_read(unsigned short *reg, u8 *buf, size_t count);
-extern int intel_msic_bulk_write(unsigned short *reg, u8 *buf, size_t count);
-
-/*
- * pdev_to_intel_msic - gets an MSIC instance from the platform device
- * @pdev: platform device pointer
- *
- * The client drivers need to have pointer to the MSIC instance if they
- * want to call intel_msic_irq_read(). This macro can be used for
- * convenience to get the MSIC pointer from @pdev where needed. This is
- * _only_ valid for devices which are managed by the MSIC.
- */
-#define pdev_to_intel_msic(pdev) (dev_get_drvdata(pdev->dev.parent))
-
-extern int intel_msic_irq_read(struct intel_msic *msic, unsigned short reg,
- u8 *val);
-
-#endif /* __LINUX_MFD_INTEL_MSIC_H__ */
diff --git a/include/linux/mfd/iqs62x.h b/include/linux/mfd/iqs62x.h
index 043d3b6de9ec..5ced55eae11b 100644
--- a/include/linux/mfd/iqs62x.h
+++ b/include/linux/mfd/iqs62x.h
@@ -28,7 +28,7 @@
#define IQS620_GLBL_EVENT_MASK_PMU BIT(6)
#define IQS62X_NUM_KEYS 16
-#define IQS62X_NUM_EVENTS (IQS62X_NUM_KEYS + 5)
+#define IQS62X_NUM_EVENTS (IQS62X_NUM_KEYS + 6)
#define IQS62X_EVENT_SIZE 10
@@ -78,6 +78,7 @@ enum iqs62x_event_flag {
/* everything else */
IQS62X_EVENT_SYS_RESET,
+ IQS62X_EVENT_SYS_ATI,
};
struct iqs62x_event_data {
@@ -97,12 +98,10 @@ struct iqs62x_dev_desc {
const char *dev_name;
const struct mfd_cell *sub_devs;
int num_sub_devs;
-
u8 prod_num;
u8 sw_num;
const u8 *cal_regs;
int num_cal_regs;
-
u8 prox_mask;
u8 sar_mask;
u8 hall_mask;
@@ -110,16 +109,12 @@ struct iqs62x_dev_desc {
u8 temp_mask;
u8 als_mask;
u8 ir_mask;
-
u8 prox_settings;
u8 als_flags;
u8 hall_flags;
u8 hyst_shift;
-
u8 interval;
u8 interval_div;
-
- u8 clk_div;
const char *fw_name;
const enum iqs62x_event_reg (*event_regs)[IQS62X_EVENT_SIZE];
};
@@ -130,8 +125,10 @@ struct iqs62x_core {
struct regmap *regmap;
struct blocking_notifier_head nh;
struct list_head fw_blk_head;
+ struct completion ati_done;
struct completion fw_done;
enum iqs62x_ui_sel ui_sel;
+ unsigned long event_cache;
};
extern const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS];
diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
index 4283b5b33e04..66f673c35303 100644
--- a/include/linux/mfd/rohm-generic.h
+++ b/include/linux/mfd/rohm-generic.h
@@ -12,6 +12,8 @@ enum rohm_chip_type {
ROHM_CHIP_TYPE_BD71847,
ROHM_CHIP_TYPE_BD70528,
ROHM_CHIP_TYPE_BD71828,
+ ROHM_CHIP_TYPE_BD9571,
+ ROHM_CHIP_TYPE_BD9574,
ROHM_CHIP_TYPE_AMOUNT
};
@@ -20,14 +22,12 @@ struct rohm_regmap_dev {
struct regmap *regmap;
};
-enum {
- ROHM_DVS_LEVEL_UNKNOWN,
- ROHM_DVS_LEVEL_RUN,
- ROHM_DVS_LEVEL_IDLE,
- ROHM_DVS_LEVEL_SUSPEND,
- ROHM_DVS_LEVEL_LPSR,
- ROHM_DVS_LEVEL_MAX = ROHM_DVS_LEVEL_LPSR,
-};
+#define ROHM_DVS_LEVEL_RUN BIT(0)
+#define ROHM_DVS_LEVEL_IDLE BIT(1)
+#define ROHM_DVS_LEVEL_SUSPEND BIT(2)
+#define ROHM_DVS_LEVEL_LPSR BIT(3)
+#define ROHM_DVS_LEVEL_VALID_AMOUNT 4
+#define ROHM_DVS_LEVEL_UNKNOWN 0
/**
* struct rohm_dvs_config - dynamic voltage scaling register descriptions
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 562862ff819c..d26acc8b21cd 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -279,7 +279,7 @@ struct mhi_controller_config {
u32 num_channels;
const struct mhi_channel_config *ch_cfg;
u32 num_events;
- const struct mhi_event_config *event_cfg;
+ struct mhi_event_config *event_cfg;
bool use_bounce_buf;
bool m2_no_db;
};
@@ -347,12 +347,14 @@ struct mhi_controller_config {
* @unmap_single: CB function to destroy TRE buffer
* @read_reg: Read a MHI register via the physical link (required)
* @write_reg: Write a MHI register via the physical link (required)
+ * @reset: Controller specific reset function (optional)
* @buffer_len: Bounce buffer length
* @index: Index of the MHI controller instance
* @bounce_buf: Use of bounce buffer
* @fbc_download: MHI host needs to do complete image transfer (optional)
* @pre_init: MHI host needs to do pre-initialization before power up
* @wake_set: Device wakeup set flag
+ * @irq_flags: irq flags passed to request_irq (optional)
*
* Fields marked as (required) need to be populated by the controller driver
* before calling mhi_register_controller(). For the fields marked as (optional)
@@ -437,6 +439,7 @@ struct mhi_controller {
u32 *out);
void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
u32 val);
+ void (*reset)(struct mhi_controller *mhi_cntrl);
size_t buffer_len;
int index;
@@ -444,6 +447,7 @@ struct mhi_controller {
bool fbc_download;
bool pre_init;
bool wake_set;
+ unsigned long irq_flags;
};
/**
@@ -599,6 +603,15 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason);
/**
+ * mhi_get_free_desc_count - Get transfer ring length
+ * Get # of TD available to queue buffers
+ * @mhi_dev: Device associated with the channels
+ * @dir: Direction of the channel
+ */
+int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
+ enum dma_data_direction dir);
+
+/**
* mhi_prepare_for_power_up - Do pre-initialization before power up.
* This is optional, call this before power up if
* the controller does not want bus framework to
@@ -673,6 +686,13 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
/**
+ * mhi_soc_reset - Trigger a device reset. This can be used as a last resort
+ * to reset and recover a device.
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_soc_reset(struct mhi_controller *mhi_cntrl);
+
+/**
* mhi_device_get - Disable device low power mode
* @mhi_dev: Device associated with the channel
*/
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 4594838a0f7c..3a389633b68f 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -89,7 +89,7 @@ extern int PageMovable(struct page *page);
extern void __SetPageMovable(struct page *page, struct address_space *mapping);
extern void __ClearPageMovable(struct page *page);
#else
-static inline int PageMovable(struct page *page) { return 0; };
+static inline int PageMovable(struct page *page) { return 0; }
static inline void __SetPageMovable(struct page *page,
struct address_space *mapping)
{
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index f1de49d64a98..dc3d2508f5c6 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -359,6 +359,10 @@ enum mlx5_event {
MLX5_EVENT_TYPE_MAX = 0x100,
};
+enum mlx5_driver_event {
+ MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
+};
+
enum {
MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
@@ -578,7 +582,10 @@ struct mlx5_init_seg {
__be32 internal_timer_l;
__be32 rsvd3[2];
__be32 health_counter;
- __be32 rsvd4[1019];
+ __be32 rsvd4[11];
+ __be32 real_time_h;
+ __be32 real_time_l;
+ __be32 rsvd5[1006];
__be64 ieee1588_clk;
__be32 ieee1588_clk_type;
__be32 clr_intx;
@@ -899,6 +906,11 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
return (u64)lo | ((u64)hi << 32);
}
+static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
+{
+ return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
+}
+
#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9)
#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 4672e12f1aa5..53b89631a1d9 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -143,6 +143,7 @@ enum {
MLX5_REG_MPCNT = 0x9051,
MLX5_REG_MTPPS = 0x9053,
MLX5_REG_MTPPSE = 0x9054,
+ MLX5_REG_MTUTC = 0x9055,
MLX5_REG_MPEGC = 0x9056,
MLX5_REG_MCQS = 0x9060,
MLX5_REG_MCQI = 0x9061,
@@ -193,7 +194,8 @@ enum port_state_policy {
enum mlx5_coredev_type {
MLX5_COREDEV_PF,
- MLX5_COREDEV_VF
+ MLX5_COREDEV_VF,
+ MLX5_COREDEV_SF,
};
struct mlx5_field_desc {
@@ -305,13 +307,6 @@ struct mlx5_cmd {
struct mlx5_cmd_stats *stats;
};
-struct mlx5_port_caps {
- int gid_table_len;
- int pkey_table_len;
- u8 ext_port_cap;
- bool has_smi;
-};
-
struct mlx5_cmd_mailbox {
void *buf;
dma_addr_t dma;
@@ -373,6 +368,8 @@ struct mlx5_core_mkey {
u32 key;
u32 pd;
u32 type;
+ struct wait_queue_head wait;
+ refcount_t usecount;
};
#define MLX5_24BIT_MASK ((1 << 24) - 1)
@@ -507,6 +504,10 @@ struct mlx5_devcom;
struct mlx5_fw_reset;
struct mlx5_eq_table;
struct mlx5_irq_table;
+struct mlx5_vhca_state_notifier;
+struct mlx5_sf_dev_table;
+struct mlx5_sf_hw_table;
+struct mlx5_sf_table;
struct mlx5_rate_limit {
u32 rate;
@@ -564,6 +565,7 @@ struct mlx5_priv {
int host_pf_pages;
struct mlx5_core_health health;
+ struct list_head traps;
/* start: qp staff */
struct dentry *qp_debugfs;
@@ -582,7 +584,6 @@ struct mlx5_priv {
/* end: alloc staff */
struct dentry *dbg_root;
- struct list_head dev_list;
struct list_head ctx_list;
spinlock_t ctx_lock;
struct mlx5_adev **adev;
@@ -603,6 +604,15 @@ struct mlx5_priv {
struct mlx5_bfreg_data bfregs;
struct mlx5_uars_page *uar;
+#ifdef CONFIG_MLX5_SF
+ struct mlx5_vhca_state_notifier *vhca_state_notifier;
+ struct mlx5_sf_dev_table *sf_dev_table;
+ struct mlx5_core_dev *parent_mdev;
+#endif
+#ifdef CONFIG_MLX5_SF_MANAGER
+ struct mlx5_sf_hw_table *sf_hw_table;
+ struct mlx5_sf_table *sf_table;
+#endif
};
enum mlx5_device_state {
@@ -661,18 +671,22 @@ struct mlx5_pps {
u8 enabled;
};
-struct mlx5_clock {
- struct mlx5_nb pps_nb;
- seqlock_t lock;
+struct mlx5_timer {
struct cyclecounter cycles;
struct timecounter tc;
- struct hwtstamp_config hwtstamp_config;
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
+};
+
+struct mlx5_clock {
+ struct mlx5_nb pps_nb;
+ seqlock_t lock;
+ struct hwtstamp_config hwtstamp_config;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
+ struct mlx5_timer timer;
};
struct mlx5_dm;
@@ -694,7 +708,6 @@ struct mlx5_core_dev {
u8 rev_id;
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
- struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
struct {
u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
@@ -1072,11 +1085,26 @@ enum {
MAX_MR_CACHE_ENTRIES
};
+/* Async-atomic event notifier used by mlx5 core to forward FW
+ * evetns recived from event queue to mlx5 consumers.
+ * Optimise event queue dipatching.
+ */
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+
+/* Async-atomic event notifier used for forwarding
+ * evetns from the event queue into the to mlx5 events dispatcher,
+ * eswitch, clock and others.
+ */
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
+/* Blocking event notifier used to forward SW events, used for slow path */
+int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
+ void *data);
+
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 29fd832950e0..994c2c8cb4fd 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -96,6 +96,35 @@ static inline u32 mlx5_eswitch_get_vport_metadata_mask(void)
u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
u16 vport_num);
+u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
+ u16 vport_num);
+
+/* Reg C1 usage:
+ * Reg C1 = < ESW_TUN_ID(12) | ESW_TUN_OPTS(12) | ESW_ZONE_ID(8) >
+ *
+ * Highest 12 bits of reg c1 is the encapsulation tunnel id, next 12 bits is
+ * encapsulation tunnel options, and the lowest 8 bits are used for zone id.
+ *
+ * Zone id is used to restore CT flow when packet misses on chain.
+ *
+ * Tunnel id and options are used together to restore the tunnel info metadata
+ * on miss and to support inner header rewrite by means of implicit chain 0
+ * flows.
+ */
+#define ESW_ZONE_ID_BITS 8
+#define ESW_TUN_OPTS_BITS 12
+#define ESW_TUN_ID_BITS 12
+#define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS
+#define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET
+#define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0)
+#define ESW_TUN_OPTS_MASK GENMASK(32 - ESW_TUN_ID_BITS - 1, ESW_TUN_OPTS_OFFSET)
+#define ESW_TUN_MASK GENMASK(31, ESW_TUN_OFFSET)
+#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
+#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT 0xFFF /* 0xFFF is a reserved mapping */
+#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
+ ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
+#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
+
u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 442c0160caab..df5d91c8b2d4 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -842,11 +842,16 @@ struct mlx5_ifc_qos_cap_bits {
u8 reserved_at_4[0x1];
u8 packet_pacing_burst_bound[0x1];
u8 packet_pacing_typical_size[0x1];
- u8 reserved_at_7[0x4];
+ u8 reserved_at_7[0x1];
+ u8 nic_sq_scheduling[0x1];
+ u8 nic_bw_share[0x1];
+ u8 nic_rate_limit[0x1];
u8 packet_pacing_uid[0x1];
u8 reserved_at_c[0x14];
- u8 reserved_at_20[0x20];
+ u8 reserved_at_20[0xb];
+ u8 log_max_qos_nic_queue_group[0x5];
+ u8 reserved_at_30[0x10];
u8 packet_pacing_max_rate[0x20];
@@ -932,11 +937,18 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 reserved_at_200[0x600];
};
+enum {
+ MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
+ MLX5_QP_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
+ MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_roce_cap_bits {
u8 roce_apm[0x1];
u8 reserved_at_1[0x3];
u8 sw_r_roce_src_udp_port[0x1];
- u8 reserved_at_5[0x1b];
+ u8 reserved_at_5[0x19];
+ u8 qp_ts_format[0x2];
u8 reserved_at_20[0x60];
@@ -1253,6 +1265,18 @@ enum {
MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
};
+enum {
+ MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
+ MLX5_SQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
+ MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
+};
+
+enum {
+ MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
+ MLX5_RQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
+ MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x1f];
u8 vhca_resource_manager[0x1];
@@ -1278,7 +1302,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_a0[0x3];
u8 ece_support[0x1];
- u8 reserved_at_a4[0x7];
+ u8 reserved_at_a4[0x5];
+ u8 reg_c_preserve[0x1];
+ u8 reserved_at_aa[0x1];
u8 log_max_srq[0x5];
u8 reserved_at_b0[0x1];
u8 uplink_follow[0x1];
@@ -1564,7 +1590,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 general_obj_types[0x40];
- u8 reserved_at_440[0x4];
+ u8 sq_ts_format[0x2];
+ u8 rq_ts_format[0x2];
u8 steering_format_version[0x4];
u8 create_qp_start_hint[0x18];
@@ -1634,7 +1661,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 sf_set_partition[0x1];
u8 reserved_at_682[0x1];
u8 log_max_sf[0x5];
- u8 reserved_at_688[0x8];
+ u8 apu[0x1];
+ u8 reserved_at_689[0x7];
u8 log_min_sf_size[0x8];
u8 max_num_sf_partitions[0x8];
@@ -2868,6 +2896,12 @@ enum {
MLX5_QPC_CS_RES_UP_TO_64B = 0x2,
};
+enum {
+ MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
+ MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT = 0x1,
+ MLX5_QPC_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_qpc_bits {
u8 state[0x4];
u8 lag_tx_port_affinity[0x4];
@@ -2896,7 +2930,9 @@ struct mlx5_ifc_qpc_bits {
u8 log_rq_stride[0x3];
u8 no_sq[0x1];
u8 log_sq_size[0x4];
- u8 reserved_at_55[0x6];
+ u8 reserved_at_55[0x3];
+ u8 ts_format[0x2];
+ u8 reserved_at_5a[0x1];
u8 rlky[0x1];
u8 ulp_stateless_offload_mode[0x4];
@@ -3312,6 +3348,12 @@ enum {
MLX5_SQC_STATE_ERR = 0x3,
};
+enum {
+ MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
+ MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT = 0x1,
+ MLX5_SQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_sqc_bits {
u8 rlky[0x1];
u8 cd_master[0x1];
@@ -3323,7 +3365,9 @@ struct mlx5_ifc_sqc_bits {
u8 reg_umr[0x1];
u8 allow_swp[0x1];
u8 hairpin[0x1];
- u8 reserved_at_f[0x11];
+ u8 reserved_at_f[0xb];
+ u8 ts_format[0x2];
+ u8 reserved_at_1c[0x4];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -3345,7 +3389,7 @@ struct mlx5_ifc_sqc_bits {
u8 reserved_at_e0[0x10];
u8 packet_pacing_rate_limit_index[0x10];
u8 tis_lst_sz[0x10];
- u8 reserved_at_110[0x10];
+ u8 qos_queue_group_id[0x10];
u8 reserved_at_120[0x40];
@@ -3360,6 +3404,7 @@ enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP = 0x4,
};
enum {
@@ -3414,6 +3459,12 @@ enum {
MLX5_RQC_STATE_ERR = 0x3,
};
+enum {
+ MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
+ MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT = 0x1,
+ MLX5_RQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
+};
+
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
u8 delay_drop_en[0x1];
@@ -3424,7 +3475,9 @@ struct mlx5_ifc_rqc_bits {
u8 reserved_at_c[0x1];
u8 flush_in_error_en[0x1];
u8 hairpin[0x1];
- u8 reserved_at_f[0x11];
+ u8 reserved_at_f[0xb];
+ u8 ts_format[0x2];
+ u8 reserved_at_1c[0x4];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
@@ -3816,7 +3869,7 @@ struct mlx5_ifc_cqc_bits {
u8 status[0x4];
u8 reserved_at_4[0x2];
u8 dbr_umem_valid[0x1];
- u8 reserved_at_7[0x1];
+ u8 apu_thread_cq[0x1];
u8 cqe_sz[0x3];
u8 cc[0x1];
u8 reserved_at_c[0x1];
@@ -4803,6 +4856,7 @@ struct mlx5_ifc_query_scheduling_element_out_bits {
enum {
SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
+ SCHEDULING_HIERARCHY_NIC = 0x3,
};
struct mlx5_ifc_query_scheduling_element_in_bits {
@@ -5904,6 +5958,18 @@ struct mlx5_ifc_dealloc_modify_header_context_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_query_modify_header_context_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 modify_header_id[0x20];
+
+ u8 reserved_at_60[0xa0];
+};
+
struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -9094,6 +9160,28 @@ struct mlx5_ifc_mpegc_reg_bits {
u8 reserved_at_60[0x100];
};
+enum {
+ MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 0x1,
+ MLX5_MTUTC_OPERATION_ADJUST_TIME = 0x2,
+ MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC = 0x3,
+};
+
+struct mlx5_ifc_mtutc_reg_bits {
+ u8 reserved_at_0[0x1c];
+ u8 operation[0x4];
+
+ u8 freq_adjustment[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 utc_sec[0x20];
+
+ u8 reserved_at_a0[0x2];
+ u8 utc_nsec[0x1e];
+
+ u8 time_adjustment[0x20];
+};
+
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x68];
u8 fec_50G_per_lane_in_pplm[0x1];
@@ -9152,7 +9240,9 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x6e];
+ u8 reserved_at_0[0x6b];
+ u8 ptpcyc2realtime_modify[0x1];
+ u8 reserved_at_6c[0x2];
u8 pci_status_and_power[0x1];
u8 reserved_at_6f[0x5];
u8 mark_tx_action_cnp[0x1];
@@ -9175,7 +9265,8 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 regs_95_to_87[0x9];
u8 mpegc[0x1];
- u8 regs_85_to_68[0x12];
+ u8 mtutc[0x1];
+ u8 regs_84_to_68[0x11];
u8 tracer_registers[0x4];
u8 regs_63_to_32[0x20];
@@ -9908,6 +9999,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mcda_reg_bits mcda_reg;
struct mlx5_ifc_mirc_reg_bits mirc_reg;
struct mlx5_ifc_mfrl_reg_bits mfrl_reg;
+ struct mlx5_ifc_mtutc_reg_bits mtutc_reg;
u8 reserved_at_0[0x60e0];
};
diff --git a/include/linux/mm-arch-hooks.h b/include/linux/mm-arch-hooks.h
deleted file mode 100644
index 9c4bedc95504..000000000000
--- a/include/linux/mm-arch-hooks.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Generic mm no-op hooks.
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- */
-#ifndef _LINUX_MM_ARCH_HOOKS_H
-#define _LINUX_MM_ARCH_HOOKS_H
-
-#include <asm/mm-arch-hooks.h>
-
-#ifndef arch_remap
-static inline void arch_remap(struct mm_struct *mm,
- unsigned long old_start, unsigned long old_end,
- unsigned long new_start, unsigned long new_end)
-{
-}
-#define arch_remap arch_remap
-#endif
-
-#endif /* _LINUX_MM_ARCH_HOOKS_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ecdf8a8cd6ae..77e64e3eac80 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -514,11 +514,14 @@ static inline bool fault_flag_allow_retry_first(unsigned int flags)
* pgoff should be used in favour of virtual_address, if possible.
*/
struct vm_fault {
- struct vm_area_struct *vma; /* Target VMA */
- unsigned int flags; /* FAULT_FLAG_xxx flags */
- gfp_t gfp_mask; /* gfp mask to be used for allocations */
- pgoff_t pgoff; /* Logical page offset based on vma */
- unsigned long address; /* Faulting virtual address */
+ const struct {
+ struct vm_area_struct *vma; /* Target VMA */
+ gfp_t gfp_mask; /* gfp mask to be used for allocations */
+ pgoff_t pgoff; /* Logical page offset based on vma */
+ unsigned long address; /* Faulting virtual address */
+ };
+ unsigned int flags; /* FAULT_FLAG_xxx flags
+ * XXX: should really be 'const' */
pmd_t *pmd; /* Pointer to pmd entry matching
* the 'address' */
pud_t *pud; /* Pointer to pud entry matching
@@ -542,8 +545,8 @@ struct vm_fault {
* is not NULL, otherwise pmd.
*/
pgtable_t prealloc_pte; /* Pre-allocated pte page table.
- * vm_ops->map_pages() calls
- * alloc_set_pte() from atomic context.
+ * vm_ops->map_pages() sets up a page
+ * table from atomic context.
* do_fault_around() pre-allocates
* page table to avoid allocation from
* atomic context.
@@ -578,7 +581,7 @@ struct vm_operations_struct {
vm_fault_t (*fault)(struct vm_fault *vmf);
vm_fault_t (*huge_fault)(struct vm_fault *vmf,
enum page_entry_size pe_size);
- void (*map_pages)(struct vm_fault *vmf,
+ vm_fault_t (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
unsigned long (*pagesize)(struct vm_area_struct * area);
@@ -590,7 +593,8 @@ struct vm_operations_struct {
vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically
- * for use by special VMAs that can switch between memory and hardware
+ * for use by special VMAs. See also generic_access_phys() for a generic
+ * implementation useful for any iomem mapping.
*/
int (*access)(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
@@ -988,7 +992,9 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte;
}
-vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page);
+vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
+void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
+
vm_fault_t finish_fault(struct vm_fault *vmf);
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#endif
@@ -1181,6 +1187,9 @@ static inline void get_page(struct page *page)
}
bool __must_check try_grab_page(struct page *page, unsigned int flags);
+__maybe_unused struct page *try_grab_compound_head(struct page *page, int refs,
+ unsigned int flags);
+
static inline __must_check bool try_get_page(struct page *page)
{
@@ -1584,7 +1593,7 @@ struct address_space *page_mapping_file(struct page *page);
* ALLOC_NO_WATERMARKS and the low watermark was not
* met implying that the system is under some pressure.
*/
-static inline bool page_is_pfmemalloc(struct page *page)
+static inline bool page_is_pfmemalloc(const struct page *page)
{
/*
* Page index cannot be this large so this must be
@@ -1658,9 +1667,11 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
+int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
+ struct mmu_notifier_range *range, pte_t **ptepp,
+ pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pte(struct mm_struct *mm, unsigned long address,
- struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
- spinlock_t **ptlp);
+ pte_t **ptepp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
@@ -1754,48 +1765,6 @@ int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
struct task_struct *task, bool bypass_rlim);
-/* Container for pinned pfns / pages */
-struct frame_vector {
- unsigned int nr_allocated; /* Number of frames we have space for */
- unsigned int nr_frames; /* Number of frames stored in ptrs array */
- bool got_ref; /* Did we pin pages by getting page ref? */
- bool is_pfns; /* Does array contain pages or pfns? */
- void *ptrs[]; /* Array of pinned pfns / pages. Use
- * pfns_vector_pages() or pfns_vector_pfns()
- * for access */
-};
-
-struct frame_vector *frame_vector_create(unsigned int nr_frames);
-void frame_vector_destroy(struct frame_vector *vec);
-int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
- unsigned int gup_flags, struct frame_vector *vec);
-void put_vaddr_frames(struct frame_vector *vec);
-int frame_vector_to_pages(struct frame_vector *vec);
-void frame_vector_to_pfns(struct frame_vector *vec);
-
-static inline unsigned int frame_vector_count(struct frame_vector *vec)
-{
- return vec->nr_frames;
-}
-
-static inline struct page **frame_vector_pages(struct frame_vector *vec)
-{
- if (vec->is_pfns) {
- int err = frame_vector_to_pages(vec);
-
- if (err)
- return ERR_PTR(err);
- }
- return (struct page **)(vec->ptrs);
-}
-
-static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
-{
- if (!vec->is_pfns)
- frame_vector_to_pfns(vec);
- return (unsigned long *)(vec->ptrs);
-}
-
struct kvec;
int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
struct page **pages);
@@ -2344,32 +2313,20 @@ extern void free_initmem(void);
extern unsigned long free_reserved_area(void *start, void *end,
int poison, const char *s);
-#ifdef CONFIG_HIGHMEM
-/*
- * Free a highmem page into the buddy system, adjusting totalhigh_pages
- * and totalram_pages.
- */
-extern void free_highmem_page(struct page *page);
-#endif
-
extern void adjust_managed_page_count(struct page *page, long count);
extern void mem_init_print_info(const char *str);
extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
/* Free the reserved page into the buddy system, so it gets managed. */
-static inline void __free_reserved_page(struct page *page)
+static inline void free_reserved_page(struct page *page)
{
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
-}
-
-static inline void free_reserved_page(struct page *page)
-{
- __free_reserved_page(page);
adjust_managed_page_count(page, 1);
}
+#define free_highmem_page(page) free_reserved_page(page)
static inline void mark_page_reserved(struct page *page)
{
@@ -2439,9 +2396,10 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn);
#endif
extern void set_dma_reserve(unsigned long new_dma_reserve);
-extern void memmap_init_zone(unsigned long, int, unsigned long,
+extern void memmap_init_range(unsigned long, int, unsigned long,
unsigned long, unsigned long, enum meminit_context,
struct vmem_altmap *, int migratetype);
+extern void memmap_init_zone(struct zone *zone);
extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
@@ -2622,7 +2580,7 @@ extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
extern vm_fault_t filemap_fault(struct vm_fault *vmf);
-extern void filemap_map_pages(struct vm_fault *vmf,
+extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
@@ -3177,5 +3135,7 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping,
extern int sysctl_nr_trim_pages;
+void mem_dump_obj(void *object);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 8fc71e9d7bb0..355ea1ee32bd 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -24,7 +24,7 @@ static inline int page_is_file_lru(struct page *page)
return !PageSwapBacked(page);
}
-static __always_inline void __update_lru_size(struct lruvec *lruvec,
+static __always_inline void update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
int nr_pages)
{
@@ -33,76 +33,27 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec,
__mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
__mod_zone_page_state(&pgdat->node_zones[zid],
NR_ZONE_LRU_BASE + lru, nr_pages);
-}
-
-static __always_inline void update_lru_size(struct lruvec *lruvec,
- enum lru_list lru, enum zone_type zid,
- int nr_pages)
-{
- __update_lru_size(lruvec, lru, zid, nr_pages);
#ifdef CONFIG_MEMCG
mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
#endif
}
-static __always_inline void add_page_to_lru_list(struct page *page,
- struct lruvec *lruvec, enum lru_list lru)
-{
- update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
- list_add(&page->lru, &lruvec->lists[lru]);
-}
-
-static __always_inline void add_page_to_lru_list_tail(struct page *page,
- struct lruvec *lruvec, enum lru_list lru)
-{
- update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
- list_add_tail(&page->lru, &lruvec->lists[lru]);
-}
-
-static __always_inline void del_page_from_lru_list(struct page *page,
- struct lruvec *lruvec, enum lru_list lru)
-{
- list_del(&page->lru);
- update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page));
-}
-
/**
- * page_lru_base_type - which LRU list type should a page be on?
- * @page: the page to test
- *
- * Used for LRU list index arithmetic.
- *
- * Returns the base LRU type - file or anon - @page should be on.
+ * __clear_page_lru_flags - clear page lru flags before releasing a page
+ * @page: the page that was on lru and now has a zero reference
*/
-static inline enum lru_list page_lru_base_type(struct page *page)
+static __always_inline void __clear_page_lru_flags(struct page *page)
{
- if (page_is_file_lru(page))
- return LRU_INACTIVE_FILE;
- return LRU_INACTIVE_ANON;
-}
+ VM_BUG_ON_PAGE(!PageLRU(page), page);
-/**
- * page_off_lru - which LRU list was page on? clearing its lru flags.
- * @page: the page to test
- *
- * Returns the LRU list a page was on, as an index into the array of LRU
- * lists; and clears its Unevictable or Active flags, ready for freeing.
- */
-static __always_inline enum lru_list page_off_lru(struct page *page)
-{
- enum lru_list lru;
+ __ClearPageLRU(page);
- if (PageUnevictable(page)) {
- __ClearPageUnevictable(page);
- lru = LRU_UNEVICTABLE;
- } else {
- lru = page_lru_base_type(page);
- if (PageActive(page)) {
- __ClearPageActive(page);
- lru += LRU_ACTIVE;
- }
- }
- return lru;
+ /* this shouldn't happen, so leave the flags to bad_page() */
+ if (PageActive(page) && PageUnevictable(page))
+ return;
+
+ __ClearPageActive(page);
+ __ClearPageUnevictable(page);
}
/**
@@ -116,13 +67,41 @@ static __always_inline enum lru_list page_lru(struct page *page)
{
enum lru_list lru;
+ VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
+
if (PageUnevictable(page))
- lru = LRU_UNEVICTABLE;
- else {
- lru = page_lru_base_type(page);
- if (PageActive(page))
- lru += LRU_ACTIVE;
- }
+ return LRU_UNEVICTABLE;
+
+ lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
+ if (PageActive(page))
+ lru += LRU_ACTIVE;
+
return lru;
}
+
+static __always_inline void add_page_to_lru_list(struct page *page,
+ struct lruvec *lruvec)
+{
+ enum lru_list lru = page_lru(page);
+
+ update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
+ list_add(&page->lru, &lruvec->lists[lru]);
+}
+
+static __always_inline void add_page_to_lru_list_tail(struct page *page,
+ struct lruvec *lruvec)
+{
+ enum lru_list lru = page_lru(page);
+
+ update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
+ list_add_tail(&page->lru, &lruvec->lists[lru]);
+}
+
+static __always_inline void del_page_from_lru_list(struct page *page,
+ struct lruvec *lruvec)
+{
+ list_del(&page->lru);
+ update_lru_size(lruvec, page_lru(page), page_zonenum(page),
+ -thp_nr_pages(page));
+}
#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 07d9acb5b19c..0974ad501a47 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -588,10 +588,9 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
}
struct mmu_gather;
-extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end);
-extern void tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end);
+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
+extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
+extern void tlb_finish_mmu(struct mmu_gather *tlb);
static inline void init_tlb_flush_pending(struct mm_struct *mm)
{
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 42df06c6b19c..f9ad35dd6012 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -311,7 +311,6 @@ struct mmc_card {
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
unsigned int nr_parts;
- unsigned int bouncesz; /* Bounce buffer size */
struct workqueue_struct *complete_wq; /* Private workqueue */
};
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 29aa50711626..ab19245e9945 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -162,6 +162,12 @@ struct mmc_request {
bool cap_cmd_during_tfr;
int tag;
+
+#ifdef CONFIG_MMC_CRYPTO
+ bool crypto_enabled;
+ int crypto_key_slot;
+ u32 data_unit_num;
+#endif
};
struct mmc_card;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 01bba36545c5..26a3c7bc29ae 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -15,6 +15,7 @@
#include <linux/mmc/card.h>
#include <linux/mmc/pm.h>
#include <linux/dma-direction.h>
+#include <linux/keyslot-manager.h>
struct mmc_ios {
unsigned int clock; /* clock rate */
@@ -79,6 +80,17 @@ struct mmc_ios {
bool enhanced_strobe; /* hs400es selection */
};
+struct mmc_clk_phase {
+ bool valid;
+ u16 in_deg;
+ u16 out_deg;
+};
+
+#define MMC_NUM_CLK_PHASES (MMC_TIMING_MMC_HS400 + 1)
+struct mmc_clk_phase_map {
+ struct mmc_clk_phase phase[MMC_NUM_CLK_PHASES];
+};
+
struct mmc_host;
struct mmc_host_ops {
@@ -384,6 +396,11 @@ struct mmc_host {
#define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */
#define MMC_CAP2_AVOID_3_3V (1 << 25) /* Host must negotiate down from 3.3V */
#define MMC_CAP2_MERGE_CAPABLE (1 << 26) /* Host can merge a segment over the segment size */
+#ifdef CONFIG_MMC_CRYPTO
+#define MMC_CAP2_CRYPTO (1 << 27) /* Host supports inline encryption */
+#else
+#define MMC_CAP2_CRYPTO 0
+#endif
int fixed_drv_type; /* fixed driver type for non-removable media */
@@ -412,7 +429,6 @@ struct mmc_host {
unsigned int doing_retune:1; /* re-tuning in progress */
unsigned int retune_now:1; /* do re-tuning at next req */
unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
- unsigned int use_blk_mq:1; /* use blk-mq */
unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
unsigned int can_dma_map_merge:1; /* merging can be used */
@@ -478,6 +494,11 @@ struct mmc_host {
bool cqe_enabled;
bool cqe_on;
+ /* Inline encryption support */
+#ifdef CONFIG_MMC_CRYPTO
+ struct blk_keyslot_manager ksm;
+#endif
+
/* Host Software Queue support */
bool hsq_enabled;
@@ -490,6 +511,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *);
int mmc_add_host(struct mmc_host *);
void mmc_remove_host(struct mmc_host *);
void mmc_free_host(struct mmc_host *);
+void mmc_of_parse_clk_phase(struct mmc_host *host,
+ struct mmc_clk_phase_map *map);
int mmc_of_parse(struct mmc_host *host);
int mmc_of_parse_voltage(struct device_node *np, u32 *mask);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index b593316bff3d..47946cec7584 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -206,10 +206,30 @@ enum node_stat_item {
NR_KERNEL_SCS_KB, /* measured in KiB */
#endif
NR_PAGETABLE, /* used for pagetables */
+#ifdef CONFIG_SWAP
+ NR_SWAPCACHE,
+#endif
NR_VM_NODE_STAT_ITEMS
};
/*
+ * Returns true if the item should be printed in THPs (/proc/vmstat
+ * currently prints number of anon, file and shmem THPs. But the item
+ * is charged in pages).
+ */
+static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return false;
+
+ return item == NR_ANON_THPS ||
+ item == NR_FILE_THPS ||
+ item == NR_SHMEM_THPS ||
+ item == NR_SHMEM_PMDMAPPED ||
+ item == NR_FILE_PMDMAPPED;
+}
+
+/*
* Returns true if the value is measured in bytes (most vmstat values are
* measured in pages). This defines the API part, the internal representation
* might be different.
@@ -483,6 +503,9 @@ struct zone {
* bootmem allocator):
* managed_pages = present_pages - reserved_pages;
*
+ * cma pages is present pages that are assigned for CMA use
+ * (MIGRATE_CMA).
+ *
* So present_pages may be used by memory hotplug or memory power
* management logic to figure out unmanaged pages by checking
* (present_pages - managed_pages). And managed_pages should be used
@@ -507,6 +530,9 @@ struct zone {
atomic_long_t managed_pages;
unsigned long spanned_pages;
unsigned long present_pages;
+#ifdef CONFIG_CMA
+ unsigned long cma_pages;
+#endif
const char *name;
@@ -604,6 +630,15 @@ static inline unsigned long zone_managed_pages(struct zone *zone)
return (unsigned long)atomic_long_read(&zone->managed_pages);
}
+static inline unsigned long zone_cma_pages(struct zone *zone)
+{
+#ifdef CONFIG_CMA
+ return zone->cma_pages;
+#else
+ return 0;
+#endif
+}
+
static inline unsigned long zone_end_pfn(const struct zone *zone)
{
return zone->zone_start_pfn + zone->spanned_pages;
@@ -872,8 +907,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
#endif
}
-extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
-
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
@@ -885,6 +918,18 @@ static inline int local_memory_node(int node_id) { return node_id; };
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool zone_is_zone_device(struct zone *zone)
+{
+ return zone_idx(zone) == ZONE_DEVICE;
+}
+#else
+static inline bool zone_is_zone_device(struct zone *zone)
+{
+ return false;
+}
+#endif
+
/*
* Returns true if a zone has pages managed by the buddy allocator.
* All the reclaim decisions have to use this function rather than
@@ -1273,13 +1318,14 @@ extern size_t mem_section_usage_size(void);
* which results in PFN_SECTION_SHIFT equal 6.
* To sum it up, at least 6 bits are available.
*/
-#define SECTION_MARKED_PRESENT (1UL<<0)
-#define SECTION_HAS_MEM_MAP (1UL<<1)
-#define SECTION_IS_ONLINE (1UL<<2)
-#define SECTION_IS_EARLY (1UL<<3)
-#define SECTION_MAP_LAST_BIT (1UL<<4)
-#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
-#define SECTION_NID_SHIFT 3
+#define SECTION_MARKED_PRESENT (1UL<<0)
+#define SECTION_HAS_MEM_MAP (1UL<<1)
+#define SECTION_IS_ONLINE (1UL<<2)
+#define SECTION_IS_EARLY (1UL<<3)
+#define SECTION_TAINT_ZONE_DEVICE (1UL<<4)
+#define SECTION_MAP_LAST_BIT (1UL<<5)
+#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
+#define SECTION_NID_SHIFT 3
static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
@@ -1318,6 +1364,13 @@ static inline int online_section(struct mem_section *section)
return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}
+static inline int online_device_section(struct mem_section *section)
+{
+ unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
+
+ return section && ((section->section_mem_map & flags) == flags);
+}
+
static inline int online_section_nr(unsigned long nr)
{
return online_section(__nr_to_section(nr));
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index c425290b21e2..7d45b5f989b0 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -846,4 +846,46 @@ struct auxiliary_device_id {
kernel_ulong_t driver_data;
};
+/* Surface System Aggregator Module */
+
+#define SSAM_MATCH_TARGET 0x1
+#define SSAM_MATCH_INSTANCE 0x2
+#define SSAM_MATCH_FUNCTION 0x4
+
+struct ssam_device_id {
+ __u8 match_flags;
+
+ __u8 domain;
+ __u8 category;
+ __u8 target;
+ __u8 instance;
+ __u8 function;
+
+ kernel_ulong_t driver_data;
+};
+
+/*
+ * DFL (Device Feature List)
+ *
+ * DFL defines a linked list of feature headers within the device MMIO space to
+ * provide an extensible way of adding features. Software can walk through these
+ * predefined data structures to enumerate features. It is now used in the FPGA.
+ * See Documentation/fpga/dfl.rst for more information.
+ *
+ * The dfl bus type is introduced to match the individual feature devices (dfl
+ * devices) for specific dfl drivers.
+ */
+
+/**
+ * struct dfl_device_id - dfl device identifier
+ * @type: DFL FIU type of the device. See enum dfl_id_type.
+ * @feature_id: feature identifier local to its DFL FIU type.
+ * @driver_data: driver specific data.
+ */
+struct dfl_device_id {
+ __u16 type;
+ __u16 feature_id;
+ kernel_ulong_t driver_data;
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 7a0bcb5b1ffc..59f094fa6f74 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -392,18 +392,6 @@ struct module {
const s32 *gpl_crcs;
bool using_gplonly_symbols;
-#ifdef CONFIG_UNUSED_SYMBOLS
- /* unused exported symbols. */
- const struct kernel_symbol *unused_syms;
- const s32 *unused_crcs;
- unsigned int num_unused_syms;
-
- /* GPL-only, unused exported symbols. */
- unsigned int num_unused_gpl_syms;
- const struct kernel_symbol *unused_gpl_syms;
- const s32 *unused_gpl_crcs;
-#endif
-
#ifdef CONFIG_MODULE_SIG
/* Signature was verified. */
bool sig_ok;
@@ -411,11 +399,6 @@ struct module {
bool async_probe_requested;
- /* symbols that will be GPL-only in the near future. */
- const struct kernel_symbol *gpl_future_syms;
- const s32 *gpl_future_crcs;
- unsigned int num_gpl_future_syms;
-
/* Exception table */
unsigned int num_exentries;
struct exception_table_entry *extable;
@@ -550,8 +533,6 @@ static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym)
}
#endif
-extern struct mutex module_mutex;
-
/* FIXME: It'd be nice to isolate modules during init, too, so they
aren't used before they (may) fail. But presently too much code
(IDE & SCSI) require entry into the module during init.*/
@@ -586,20 +567,9 @@ static inline bool within_module(unsigned long addr, const struct module *mod)
return within_module_init(addr, mod) || within_module_core(addr, mod);
}
-/* Search for module by name: must hold module_mutex. */
+/* Search for module by name: must be in a RCU-sched critical section. */
struct module *find_module(const char *name);
-struct symsearch {
- const struct kernel_symbol *start, *stop;
- const s32 *crcs;
- enum mod_license {
- NOT_GPL_ONLY,
- GPL_ONLY,
- WILL_BE_GPL_ONLY,
- } license;
- bool unused;
-};
-
/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
symnum out of range. */
int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
@@ -608,10 +578,6 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
/* Look for this name: can be of form module:name. */
unsigned long module_kallsyms_lookup_name(const char *name);
-int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
- struct module *, unsigned long),
- void *data);
-
extern void __noreturn __module_put_and_exit(struct module *mod,
long code);
#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
@@ -795,14 +761,6 @@ static inline unsigned long module_kallsyms_lookup_name(const char *name)
return 0;
}
-static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
- struct module *,
- unsigned long),
- void *data)
-{
- return 0;
-}
-
static inline int register_module_notifier(struct notifier_block *nb)
{
/* no events will happen anyway, so this can always succeed */
@@ -891,4 +849,8 @@ static inline bool module_sig_ok(struct module *module)
}
#endif /* CONFIG_MODULE_SIG */
+int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ struct module *, unsigned long),
+ void *data);
+
#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/mount.h b/include/linux/mount.h
index aaf343b38671..5d92a7e1a742 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -72,14 +72,20 @@ struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
struct super_block *mnt_sb; /* pointer to superblock */
int mnt_flags;
+ struct user_namespace *mnt_userns;
} __randomize_layout;
+static inline struct user_namespace *mnt_user_ns(const struct vfsmount *mnt)
+{
+ /* Pairs with smp_store_release() in do_idmap_mount(). */
+ return smp_load_acquire(&mnt->mnt_userns);
+}
+
struct file; /* forward dec */
struct path;
extern int mnt_want_write(struct vfsmount *mnt);
extern int mnt_want_write_file(struct file *file);
-extern int mnt_clone_write(struct vfsmount *mnt);
extern void mnt_drop_write(struct vfsmount *mnt);
extern void mnt_drop_write_file(struct file *file);
extern void mntput(struct vfsmount *mnt);
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index d13958de6d8a..a0d572855444 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -53,6 +53,7 @@
#define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */
#define SPINOR_OP_SRSTEN 0x66 /* Software Reset Enable */
#define SPINOR_OP_SRST 0x99 /* Software Reset */
+#define SPINOR_OP_GBULK 0x98 /* Global Block Unlock */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index dcd185cbfe79..0cd631a19727 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -199,29 +199,4 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
-/*
- * These values are chosen such that FAIL and SUCCESS match the
- * values of the regular mutex_trylock().
- */
-enum mutex_trylock_recursive_enum {
- MUTEX_TRYLOCK_FAILED = 0,
- MUTEX_TRYLOCK_SUCCESS = 1,
- MUTEX_TRYLOCK_RECURSIVE,
-};
-
-/**
- * mutex_trylock_recursive - trylock variant that allows recursive locking
- * @lock: mutex to be locked
- *
- * This function should not be used, _ever_. It is purely for hysterical GEM
- * raisins, and once those are gone this will be removed.
- *
- * Returns:
- * - MUTEX_TRYLOCK_FAILED - trylock failed,
- * - MUTEX_TRYLOCK_SUCCESS - lock acquired,
- * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
- */
-extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
-mutex_trylock_recursive(struct mutex *lock);
-
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/namei.h b/include/linux/namei.h
index a4bb992623c4..b9605b2b46e7 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -46,6 +46,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
#define LOOKUP_NO_XDEV 0x040000 /* No mountpoint crossing. */
#define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */
#define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */
+#define LOOKUP_CACHED 0x200000 /* Only do cached lookup */
/* LOOKUP_* flags which do scope-related checks based on the dirfd. */
#define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT)
diff --git a/include/linux/nd.h b/include/linux/nd.h
index 55c735997805..cec526c8043d 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -26,7 +26,7 @@ struct nd_device_driver {
struct device_driver drv;
unsigned long type;
int (*probe)(struct device *dev);
- int (*remove)(struct device *dev);
+ void (*remove)(struct device *dev);
void (*shutdown)(struct device *dev);
void (*notify)(struct device *dev, enum nvdimm_event event);
};
diff --git a/include/linux/net.h b/include/linux/net.h
index 9e2324efc26a..ba736b457a06 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -42,8 +42,6 @@ struct net;
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
-#define PROTO_CMSG_DATA_ONLY 0x0001
-
#ifndef ARCH_HAS_SOCKET_TYPES
/**
* enum sock_type - Socket types
@@ -138,7 +136,6 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
struct proto_ops {
int family;
- unsigned int flags;
struct module *owner;
int (*release) (struct socket *sock);
int (*bind) (struct socket *sock,
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 934de56644e7..3de38d6a0aea 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -84,6 +84,12 @@ enum {
NETIF_F_GRO_FRAGLIST_BIT, /* Fraglist GRO */
NETIF_F_HW_MACSEC_BIT, /* Offload MACsec operations */
+ NETIF_F_GRO_UDP_FWD_BIT, /* Allow UDP GRO for forwarding */
+
+ NETIF_F_HW_HSR_TAG_INS_BIT, /* Offload HSR tag insertion */
+ NETIF_F_HW_HSR_TAG_RM_BIT, /* Offload HSR tag removal */
+ NETIF_F_HW_HSR_FWD_BIT, /* Offload HSR forwarding */
+ NETIF_F_HW_HSR_DUP_BIT, /* Offload HSR duplication */
/*
* Add your fresh new feature above and remember to update
@@ -157,6 +163,11 @@ enum {
#define NETIF_F_GRO_FRAGLIST __NETIF_F(GRO_FRAGLIST)
#define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST)
#define NETIF_F_HW_MACSEC __NETIF_F(HW_MACSEC)
+#define NETIF_F_GRO_UDP_FWD __NETIF_F(GRO_UDP_FWD)
+#define NETIF_F_HW_HSR_TAG_INS __NETIF_F(HW_HSR_TAG_INS)
+#define NETIF_F_HW_HSR_TAG_RM __NETIF_F(HW_HSR_TAG_RM)
+#define NETIF_F_HW_HSR_FWD __NETIF_F(HW_HSR_FWD)
+#define NETIF_F_HW_HSR_DUP __NETIF_F(HW_HSR_DUP)
/* Finds the next feature with the highest number of the range of start till 0.
*/
@@ -234,7 +245,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
/* Changeable features with no special hardware requirements that defaults to off. */
-#define NETIF_F_SOFT_FEATURES_OFF NETIF_F_GRO_FRAGLIST
+#define NETIF_F_SOFT_FEATURES_OFF (NETIF_F_GRO_FRAGLIST | NETIF_F_GRO_UDP_FWD)
#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_CTAG_RX | \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 259be67644e3..f06fbee8638e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -347,6 +347,7 @@ struct napi_struct {
struct list_head dev_list;
struct hlist_node napi_hash_node;
unsigned int napi_id;
+ struct task_struct *thread;
};
enum {
@@ -358,6 +359,7 @@ enum {
NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */
NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */
NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
+ NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
};
enum {
@@ -369,6 +371,7 @@ enum {
NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
+ NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
};
enum gro_result {
@@ -376,7 +379,6 @@ enum gro_result {
GRO_MERGED_FREE,
GRO_HELD,
GRO_NORMAL,
- GRO_DROP,
GRO_CONSUMED,
};
typedef enum gro_result gro_result_t;
@@ -495,6 +497,8 @@ static inline bool napi_complete(struct napi_struct *n)
return napi_complete_done(n, 0);
}
+int dev_set_threaded(struct net_device *dev, bool threaded);
+
/**
* napi_disable - prevent NAPI from scheduling
* @n: NAPI context
@@ -504,20 +508,7 @@ static inline bool napi_complete(struct napi_struct *n)
*/
void napi_disable(struct napi_struct *n);
-/**
- * napi_enable - enable NAPI scheduling
- * @n: NAPI context
- *
- * Resume NAPI from being scheduled on this context.
- * Must be paired with napi_disable.
- */
-static inline void napi_enable(struct napi_struct *n)
-{
- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
- smp_mb__before_atomic();
- clear_bit(NAPI_STATE_SCHED, &n->state);
- clear_bit(NAPI_STATE_NPSVC, &n->state);
-}
+void napi_enable(struct napi_struct *n);
/**
* napi_synchronize - wait until NAPI is not running
@@ -859,6 +850,7 @@ enum tc_setup_type {
TC_SETUP_QDISC_ETS,
TC_SETUP_QDISC_TBF,
TC_SETUP_QDISC_FIFO,
+ TC_SETUP_QDISC_HTB,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -1213,19 +1205,6 @@ struct netdev_net_notifier {
* struct netdev_phys_item_id *ppid)
* Called to get the parent ID of the physical port of this device.
*
- * void (*ndo_udp_tunnel_add)(struct net_device *dev,
- * struct udp_tunnel_info *ti);
- * Called by UDP tunnel to notify a driver about the UDP port and socket
- * address family that a UDP tunnel is listnening to. It is called only
- * when a new port starts listening. The operation is protected by the
- * RTNL.
- *
- * void (*ndo_udp_tunnel_del)(struct net_device *dev,
- * struct udp_tunnel_info *ti);
- * Called by UDP tunnel to notify the driver about a UDP port and socket
- * address family that the UDP tunnel is not listening to anymore. The
- * operation is protected by the RTNL.
- *
* void* (*ndo_dfwd_add_station)(struct net_device *pdev,
* struct net_device *dev)
* Called by upper layer devices to accelerate switching or other
@@ -1412,6 +1391,8 @@ struct net_device_ops {
struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves);
+ struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev,
+ struct sock *sk);
netdev_features_t (*ndo_fix_features)(struct net_device *dev,
netdev_features_t features);
int (*ndo_set_features)(struct net_device *dev,
@@ -1464,10 +1445,6 @@ struct net_device_ops {
struct netdev_phys_item_id *ppid);
int (*ndo_get_phys_port_name)(struct net_device *dev,
char *name, size_t len);
- void (*ndo_udp_tunnel_add)(struct net_device *dev,
- struct udp_tunnel_info *ti);
- void (*ndo_udp_tunnel_del)(struct net_device *dev,
- struct udp_tunnel_info *ti);
void* (*ndo_dfwd_add_station)(struct net_device *pdev,
struct net_device *dev);
void (*ndo_dfwd_del_station)(struct net_device *pdev,
@@ -1607,6 +1584,12 @@ enum netdev_priv_flags {
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
+/* Specifies the type of the struct net_device::ml_priv pointer */
+enum netdev_ml_priv_type {
+ ML_PRIV_NONE,
+ ML_PRIV_CAN,
+};
+
/**
* struct net_device - The DEVICE structure.
*
@@ -1802,6 +1785,7 @@ enum netdev_priv_flags {
* @nd_net: Network namespace this network device is inside
*
* @ml_priv: Mid-layer private
+ * @ml_priv_type: Mid-layer private type
* @lstats: Loopback statistics
* @tstats: Tunnel statistics
* @dstats: Dummy statistics
@@ -1842,6 +1826,8 @@ enum netdev_priv_flags {
*
* @wol_enabled: Wake-on-LAN is enabled
*
+ * @threaded: napi threaded mode is enabled
+ *
* @net_notifier_list: List of per-net netdev notifier block
* that follow this device when it is moved
* to another network namespace.
@@ -1873,7 +1859,6 @@ struct net_device {
unsigned long mem_end;
unsigned long mem_start;
unsigned long base_addr;
- int irq;
/*
* Some hardware also needs these fields (state,dev_list,
@@ -1895,6 +1880,23 @@ struct net_device {
struct list_head lower;
} adj_list;
+ /* Read-mostly cache-line for fast-path access */
+ unsigned int flags;
+ unsigned int priv_flags;
+ const struct net_device_ops *netdev_ops;
+ int ifindex;
+ unsigned short gflags;
+ unsigned short hard_header_len;
+
+ /* Note : dev->mtu is often read without holding a lock.
+ * Writers usually hold RTNL.
+ * It is recommended to use READ_ONCE() to annotate the reads,
+ * and to use WRITE_ONCE() to annotate the writes.
+ */
+ unsigned int mtu;
+ unsigned short needed_headroom;
+ unsigned short needed_tailroom;
+
netdev_features_t features;
netdev_features_t hw_features;
netdev_features_t wanted_features;
@@ -1903,10 +1905,15 @@ struct net_device {
netdev_features_t mpls_features;
netdev_features_t gso_partial_features;
- int ifindex;
+ unsigned int min_mtu;
+ unsigned int max_mtu;
+ unsigned short type;
+ unsigned char min_header_len;
+ unsigned char name_assign_type;
+
int group;
- struct net_device_stats stats;
+ struct net_device_stats stats; /* not used by modern drivers */
atomic_long_t rx_dropped;
atomic_long_t tx_dropped;
@@ -1920,7 +1927,6 @@ struct net_device {
const struct iw_handler_def *wireless_handlers;
struct iw_public_data *wireless_data;
#endif
- const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
#ifdef CONFIG_NET_L3_MASTER_DEV
const struct l3mdev_ops *l3mdev_ops;
@@ -1939,34 +1945,12 @@ struct net_device {
const struct header_ops *header_ops;
- unsigned int flags;
- unsigned int priv_flags;
-
- unsigned short gflags;
- unsigned short padded;
-
unsigned char operstate;
unsigned char link_mode;
unsigned char if_port;
unsigned char dma;
- /* Note : dev->mtu is often read without holding a lock.
- * Writers usually hold RTNL.
- * It is recommended to use READ_ONCE() to annotate the reads,
- * and to use WRITE_ONCE() to annotate the writes.
- */
- unsigned int mtu;
- unsigned int min_mtu;
- unsigned int max_mtu;
- unsigned short type;
- unsigned short hard_header_len;
- unsigned char min_header_len;
- unsigned char name_assign_type;
-
- unsigned short needed_headroom;
- unsigned short needed_tailroom;
-
/* Interface address info. */
unsigned char perm_addr[MAX_ADDR_LEN];
unsigned char addr_assign_type;
@@ -1977,7 +1961,10 @@ struct net_device {
unsigned short neigh_priv_len;
unsigned short dev_id;
unsigned short dev_port;
+ unsigned short padded;
+
spinlock_t addr_list_lock;
+ int irq;
struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc;
@@ -2114,8 +2101,10 @@ struct net_device {
possible_net_t nd_net;
/* mid-layer private */
+ void *ml_priv;
+ enum netdev_ml_priv_type ml_priv_type;
+
union {
- void *ml_priv;
struct pcpu_lstats __percpu *lstats;
struct pcpu_sw_netstats __percpu *tstats;
struct pcpu_dstats __percpu *dstats;
@@ -2159,6 +2148,7 @@ struct net_device {
struct lock_class_key *qdisc_running_key;
bool proto_down;
unsigned wol_enabled:1;
+ unsigned threaded:1;
struct list_head net_notifier_list;
@@ -2305,6 +2295,29 @@ static inline void netdev_reset_rx_headroom(struct net_device *dev)
netdev_set_rx_headroom(dev, -1);
}
+static inline void *netdev_get_ml_priv(struct net_device *dev,
+ enum netdev_ml_priv_type type)
+{
+ if (dev->ml_priv_type != type)
+ return NULL;
+
+ return dev->ml_priv;
+}
+
+static inline void netdev_set_ml_priv(struct net_device *dev,
+ void *ml_priv,
+ enum netdev_ml_priv_type type)
+{
+ WARN(dev->ml_priv_type && dev->ml_priv_type != type,
+ "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
+ dev->ml_priv_type, type);
+ WARN(!dev->ml_priv_type && dev->ml_priv,
+ "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
+
+ dev->ml_priv = ml_priv;
+ dev->ml_priv_type = type;
+}
+
/*
* Net namespace inlines
*/
@@ -2633,6 +2646,7 @@ enum netdev_lag_hash {
NETDEV_LAG_HASH_L23,
NETDEV_LAG_HASH_E23,
NETDEV_LAG_HASH_E34,
+ NETDEV_LAG_HASH_VLAN_SRCMAC,
NETDEV_LAG_HASH_UNKNOWN,
};
@@ -2876,6 +2890,8 @@ int init_dummy_netdev(struct net_device *dev);
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
bool all_slaves);
+struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
+ struct sock *sk);
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
@@ -3918,6 +3934,9 @@ int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
struct netlink_ext_ack *extack);
int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
struct netlink_ext_ack *extack);
+int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
+ struct netlink_ext_ack *extack);
+int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
@@ -3944,14 +3963,42 @@ int xdp_umem_query(struct net_device *dev, u16 queue_id);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
+int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
bool is_skb_forwardable(const struct net_device *dev,
const struct sk_buff *skb);
+static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
+ const struct sk_buff *skb,
+ const bool check_mtu)
+{
+ const u32 vlan_hdr_len = 4; /* VLAN_HLEN */
+ unsigned int len;
+
+ if (!(dev->flags & IFF_UP))
+ return false;
+
+ if (!check_mtu)
+ return true;
+
+ len = dev->mtu + dev->hard_header_len + vlan_hdr_len;
+ if (skb->len <= len)
+ return true;
+
+ /* if TSO is enabled, we don't care about the length as the packet
+ * could be forwarded without being segmented before
+ */
+ if (skb_is_gso(skb))
+ return true;
+
+ return false;
+}
+
static __always_inline int ____dev_forward_skb(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ const bool check_mtu)
{
if (skb_orphan_frags(skb, GFP_ATOMIC) ||
- unlikely(!is_skb_forwardable(dev, skb))) {
+ unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
@@ -4352,6 +4399,7 @@ static inline void netif_tx_disable(struct net_device *dev)
local_bh_disable();
cpu = smp_processor_id();
+ spin_lock(&dev->tx_global_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@@ -4359,6 +4407,7 @@ static inline void netif_tx_disable(struct net_device *dev)
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
+ spin_unlock(&dev->tx_global_lock);
local_bh_enable();
}
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 0101747de549..f0f3a8354c3c 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -463,8 +463,6 @@ extern struct nf_ct_hook __rcu *nf_ct_hook;
struct nlattr;
struct nfnl_ct_hook {
- struct nf_conn *(*get_ct)(const struct sk_buff *skb,
- enum ip_conntrack_info *ctinfo);
size_t (*build_size)(const struct nf_conn *ct);
int (*build)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9f118771e248..0bcf98098c5a 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -11,6 +11,8 @@
struct net;
+void do_trace_netlink_extack(const char *msg);
+
static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
{
return (struct nlmsghdr *)skb->data;
@@ -90,6 +92,8 @@ struct netlink_ext_ack {
static const char __msg[] = msg; \
struct netlink_ext_ack *__extack = (extack); \
\
+ do_trace_netlink_extack(__msg); \
+ \
if (__extack) \
__extack->_msg = __msg; \
} while (0)
@@ -110,6 +114,8 @@ struct netlink_ext_ack {
static const char __msg[] = msg; \
struct netlink_ext_ack *__extack = (extack); \
\
+ do_trace_netlink_extack(__msg); \
+ \
if (__extack) { \
__extack->_msg = __msg; \
__extack->bad_attr = (attr); \
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 681ed98e4ba8..eadaabd18dc7 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -379,18 +379,20 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr);
-extern int nfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
+extern int nfs_getattr(struct user_namespace *, const struct path *,
+ struct kstat *, u32, unsigned int);
extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
-extern int nfs_permission(struct inode *, int);
+extern int nfs_permission(struct user_namespace *, struct inode *, int);
extern int nfs_open(struct inode *, struct file *);
extern int nfs_attribute_cache_expired(struct inode *inode);
extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
+extern int nfs_clear_invalid_mapping(struct address_space *mapping);
extern bool nfs_mapping_need_revalidate_inode(struct inode *inode);
extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
extern int nfs_revalidate_mapping_rcu(struct inode *inode);
-extern int nfs_setattr(struct dentry *, struct iattr *);
+extern int nfs_setattr(struct user_namespace *, struct dentry *, struct iattr *);
extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *);
extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
struct nfs4_label *label);
@@ -570,8 +572,6 @@ nfs_have_writebacks(struct inode *inode)
extern int nfs_readpage(struct file *, struct page *);
extern int nfs_readpages(struct file *, struct address_space *,
struct list_head *, unsigned);
-extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
- struct page *);
/*
* inline functions
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 38e60ec742df..6f76b32a0238 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -142,7 +142,7 @@ struct nfs_server {
struct nlm_host *nlm_host; /* NLM client handle */
struct nfs_iostats __percpu *io_stats; /* I/O statistics */
atomic_long_t writeback; /* number of writeback pages */
- int flags; /* various flags */
+ unsigned int flags; /* various flags */
/* The following are for internal use only. Also see uapi/linux/nfs_mount.h */
#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000
@@ -153,6 +153,8 @@ struct nfs_server {
#define NFS_MOUNT_LOCAL_FCNTL 0x200000
#define NFS_MOUNT_SOFTERR 0x400000
#define NFS_MOUNT_SOFTREVAL 0x800000
+#define NFS_MOUNT_WRITE_EAGER 0x01000000
+#define NFS_MOUNT_WRITE_WAIT 0x02000000
unsigned int caps; /* server capabilities */
unsigned int rsize; /* read size */
diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h
index 103d44695323..0ba99c513649 100644
--- a/include/linux/nfsacl.h
+++ b/include/linux/nfsacl.h
@@ -38,5 +38,8 @@ nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
extern int
nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
struct posix_acl **pacl);
+extern bool
+nfs_stream_decode_acl(struct xdr_stream *xdr, unsigned int *aclcnt,
+ struct posix_acl **pacl);
#endif /* __LINUX_NFSACL_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index bfed36e342cc..b08787cd0881 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -697,7 +697,11 @@ enum nvme_opcode {
nvme_opcode_name(nvme_cmd_resv_register), \
nvme_opcode_name(nvme_cmd_resv_report), \
nvme_opcode_name(nvme_cmd_resv_acquire), \
- nvme_opcode_name(nvme_cmd_resv_release))
+ nvme_opcode_name(nvme_cmd_resv_release), \
+ nvme_opcode_name(nvme_cmd_zone_mgmt_send), \
+ nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \
+ nvme_opcode_name(nvme_cmd_zone_append))
+
/*
@@ -1473,20 +1477,29 @@ enum {
NVME_SC_SGL_INVALID_DATA = 0xf,
NVME_SC_SGL_INVALID_METADATA = 0x10,
NVME_SC_SGL_INVALID_TYPE = 0x11,
-
+ NVME_SC_CMB_INVALID_USE = 0x12,
+ NVME_SC_PRP_INVALID_OFFSET = 0x13,
+ NVME_SC_ATOMIC_WU_EXCEEDED = 0x14,
+ NVME_SC_OP_DENIED = 0x15,
NVME_SC_SGL_INVALID_OFFSET = 0x16,
- NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
-
+ NVME_SC_RESERVED = 0x17,
+ NVME_SC_HOST_ID_INCONSIST = 0x18,
+ NVME_SC_KA_TIMEOUT_EXPIRED = 0x19,
+ NVME_SC_KA_TIMEOUT_INVALID = 0x1A,
+ NVME_SC_ABORTED_PREEMPT_ABORT = 0x1B,
NVME_SC_SANITIZE_FAILED = 0x1C,
NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
-
+ NVME_SC_SGL_INVALID_GRANULARITY = 0x1E,
+ NVME_SC_CMD_NOT_SUP_CMB_QUEUE = 0x1F,
NVME_SC_NS_WRITE_PROTECTED = 0x20,
NVME_SC_CMD_INTERRUPTED = 0x21,
+ NVME_SC_TRANSIENT_TR_ERR = 0x22,
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
NVME_SC_NS_NOT_READY = 0x82,
NVME_SC_RESERVATION_CONFLICT = 0x83,
+ NVME_SC_FORMAT_IN_PROGRESS = 0x84,
/*
* Command Specific Status:
@@ -1519,8 +1532,15 @@ enum {
NVME_SC_NS_NOT_ATTACHED = 0x11a,
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
NVME_SC_CTRL_LIST_INVALID = 0x11c,
+ NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d,
NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
+ NVME_SC_CTRL_ID_INVALID = 0x11f,
+ NVME_SC_SEC_CTRL_STATE_INVALID = 0x120,
+ NVME_SC_CTRL_RES_NUM_INVALID = 0x121,
+ NVME_SC_RES_ID_INVALID = 0x122,
NVME_SC_PMR_SAN_PROHIBITED = 0x123,
+ NVME_SC_ANA_GROUP_ID_INVALID = 0x124,
+ NVME_SC_ANA_ATTACH_FAILED = 0x125,
/*
* I/O Command Set Specific - NVM commands:
diff --git a/include/linux/objtool.h b/include/linux/objtool.h
index 577f51436cf9..7e72d975cb76 100644
--- a/include/linux/objtool.h
+++ b/include/linux/objtool.h
@@ -29,11 +29,14 @@ struct unwind_hint {
*
* UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that
* sp_reg+sp_offset points to the iret return frame.
+ *
+ * UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function.
+ * Useful for code which doesn't have an ELF function annotation.
*/
#define UNWIND_HINT_TYPE_CALL 0
#define UNWIND_HINT_TYPE_REGS 1
#define UNWIND_HINT_TYPE_REGS_PARTIAL 2
-#define UNWIND_HINT_TYPE_RET_OFFSET 3
+#define UNWIND_HINT_TYPE_FUNC 3
#ifdef CONFIG_STACK_VALIDATION
@@ -109,6 +112,12 @@ struct unwind_hint {
.popsection
.endm
+.macro STACK_FRAME_NON_STANDARD func:req
+ .pushsection .discard.func_stack_frame_non_standard, "aw"
+ .long \func - .
+ .popsection
+.endm
+
#endif /* __ASSEMBLY__ */
#else /* !CONFIG_STACK_VALIDATION */
@@ -122,6 +131,8 @@ struct unwind_hint {
#define ANNOTATE_INTRA_FUNCTION_CALL
.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
.endm
+.macro STACK_FRAME_NON_STANDARD func:req
+.endm
#endif
#endif /* CONFIG_STACK_VALIDATION */
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 07ca187fc5e4..1d7992a02e36 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -26,9 +26,6 @@ static inline int of_driver_match_device(struct device *dev,
return of_match_device(drv->of_match_table, dev) != NULL;
}
-extern struct platform_device *of_dev_get(struct platform_device *dev);
-extern void of_dev_put(struct platform_device *dev);
-
extern int of_device_add(struct platform_device *pdev);
extern int of_device_register(struct platform_device *ofdev);
extern void of_device_unregister(struct platform_device *ofdev);
@@ -41,11 +38,6 @@ extern int of_device_request_module(struct device *dev);
extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env);
extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env);
-static inline void of_device_node_put(struct device *dev)
-{
- of_node_put(dev->of_node);
-}
-
static inline struct device_node *of_cpu_device_node_get(int cpu)
{
struct device *cpu_dev;
@@ -97,15 +89,11 @@ static inline int of_device_uevent_modalias(struct device *dev,
return -ENODEV;
}
-static inline void of_device_node_put(struct device *dev) { }
-
-static inline const struct of_device_id *__of_match_device(
+static inline const struct of_device_id *of_match_device(
const struct of_device_id *matches, const struct device *dev)
{
return NULL;
}
-#define of_match_device(matches, dev) \
- __of_match_device(of_match_ptr(matches), (dev))
static inline struct device_node *of_cpu_device_node_get(int cpu)
{
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index e8b78139f78c..aaf219bd0354 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -33,8 +33,6 @@ static inline int of_irq_parse_oldworld(struct device_node *device, int index,
#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq);
-extern int of_irq_parse_one(struct device_node *device, int index,
- struct of_phandle_args *out_irq);
extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
extern int of_irq_to_resource(struct device_node *dev, int index,
struct resource *r);
@@ -42,6 +40,8 @@ extern int of_irq_to_resource(struct device_node *dev, int index,
extern void of_irq_init(const struct of_device_id *matches);
#ifdef CONFIG_OF_IRQ
+extern int of_irq_parse_one(struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
extern int of_irq_count(struct device_node *dev);
extern int of_irq_get(struct device_node *dev, int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
@@ -57,6 +57,11 @@ extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
extern void of_msi_configure(struct device *dev, struct device_node *np);
u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
#else
+static inline int of_irq_parse_one(struct device_node *device, int index,
+ struct of_phandle_args *out_irq)
+{
+ return -EINVAL;
+}
static inline int of_irq_count(struct device_node *dev)
{
return 0;
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index cfe8c607a628..2b05e7f7c238 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -26,9 +26,6 @@ of_phy_connect(struct net_device *dev, struct device_node *phy_np,
struct phy_device *
of_phy_get_and_connect(struct net_device *dev, struct device_node *np,
void (*hndlr)(struct net_device *));
-struct phy_device *
-of_phy_attach(struct net_device *dev, struct device_node *phy_np,
- u32 flags, phy_interface_t iface);
struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
int of_phy_register_fixed_link(struct device_node *np);
@@ -100,13 +97,6 @@ of_phy_get_and_connect(struct net_device *dev, struct device_node *np,
return NULL;
}
-static inline struct phy_device *of_phy_attach(struct net_device *dev,
- struct device_node *phy_np,
- u32 flags, phy_interface_t iface)
-{
- return NULL;
-}
-
static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
{
return NULL;
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
deleted file mode 100644
index b2a0f15f11fe..000000000000
--- a/include/linux/oprofile.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * @file oprofile.h
- *
- * API for machine-specific interrupts to interface
- * to oprofile.
- *
- * @remark Copyright 2002 OProfile authors
- * @remark Read the file COPYING
- *
- * @author John Levon <levon@movementarian.org>
- */
-
-#ifndef OPROFILE_H
-#define OPROFILE_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/printk.h>
-#include <linux/atomic.h>
-
-/* Each escaped entry is prefixed by ESCAPE_CODE
- * then one of the following codes, then the
- * relevant data.
- * These #defines live in this file so that arch-specific
- * buffer sync'ing code can access them.
- */
-#define ESCAPE_CODE ~0UL
-#define CTX_SWITCH_CODE 1
-#define CPU_SWITCH_CODE 2
-#define COOKIE_SWITCH_CODE 3
-#define KERNEL_ENTER_SWITCH_CODE 4
-#define KERNEL_EXIT_SWITCH_CODE 5
-#define MODULE_LOADED_CODE 6
-#define CTX_TGID_CODE 7
-#define TRACE_BEGIN_CODE 8
-#define TRACE_END_CODE 9
-#define XEN_ENTER_SWITCH_CODE 10
-#define SPU_PROFILING_CODE 11
-#define SPU_CTX_SWITCH_CODE 12
-#define IBS_FETCH_CODE 13
-#define IBS_OP_CODE 14
-
-struct dentry;
-struct file_operations;
-struct pt_regs;
-
-/* Operations structure to be filled in */
-struct oprofile_operations {
- /* create any necessary configuration files in the oprofile fs.
- * Optional. */
- int (*create_files)(struct dentry * root);
- /* Do any necessary interrupt setup. Optional. */
- int (*setup)(void);
- /* Do any necessary interrupt shutdown. Optional. */
- void (*shutdown)(void);
- /* Start delivering interrupts. */
- int (*start)(void);
- /* Stop delivering interrupts. */
- void (*stop)(void);
- /* Arch-specific buffer sync functions.
- * Return value = 0: Success
- * Return value = -1: Failure
- * Return value = 1: Run generic sync function
- */
- int (*sync_start)(void);
- int (*sync_stop)(void);
-
- /* Initiate a stack backtrace. Optional. */
- void (*backtrace)(struct pt_regs * const regs, unsigned int depth);
-
- /* Multiplex between different events. Optional. */
- int (*switch_events)(void);
- /* CPU identification string. */
- char * cpu_type;
-};
-
-/**
- * One-time initialisation. *ops must be set to a filled-in
- * operations structure. This is called even in timer interrupt
- * mode so an arch can set a backtrace callback.
- *
- * If an error occurs, the fields should be left untouched.
- */
-int oprofile_arch_init(struct oprofile_operations * ops);
-
-/**
- * One-time exit/cleanup for the arch.
- */
-void oprofile_arch_exit(void);
-
-/**
- * Add a sample. This may be called from any context.
- */
-void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
-
-/**
- * Add an extended sample. Use this when the PC is not from the regs, and
- * we cannot determine if we're in kernel mode from the regs.
- *
- * This function does perform a backtrace.
- *
- */
-void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel);
-
-/**
- * Add an hardware sample.
- */
-void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
- unsigned long event, int is_kernel,
- struct task_struct *task);
-
-/* Use this instead when the PC value is not from the regs. Doesn't
- * backtrace. */
-void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event);
-
-/* add a backtrace entry, to be called from the ->backtrace callback */
-void oprofile_add_trace(unsigned long eip);
-
-
-/**
- * Create a file of the given name as a child of the given root, with
- * the specified file operations.
- */
-int oprofilefs_create_file(struct dentry * root,
- char const * name, const struct file_operations * fops);
-
-int oprofilefs_create_file_perm(struct dentry * root,
- char const * name, const struct file_operations * fops, int perm);
-
-/** Create a file for read/write access to an unsigned long. */
-int oprofilefs_create_ulong(struct dentry * root,
- char const * name, ulong * val);
-
-/** Create a file for read-only access to an unsigned long. */
-int oprofilefs_create_ro_ulong(struct dentry * root,
- char const * name, ulong * val);
-
-/** Create a file for read-only access to an atomic_t. */
-int oprofilefs_create_ro_atomic(struct dentry * root,
- char const * name, atomic_t * val);
-
-/** create a directory */
-struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
-
-/**
- * Write the given asciz string to the given user buffer @buf, updating *offset
- * appropriately. Returns bytes written or -EFAULT.
- */
-ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset);
-
-/**
- * Convert an unsigned long value into ASCII and copy it to the user buffer @buf,
- * updating *offset appropriately. Returns bytes written or -EFAULT.
- */
-ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset);
-
-/**
- * Read an ASCII string for a number from a userspace buffer and fill *val on success.
- * Returns 0 on success, < 0 on error.
- */
-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
-
-/** lock for read/write safety */
-extern raw_spinlock_t oprofilefs_lock;
-
-/**
- * Add the contents of a circular buffer to the event buffer.
- */
-void oprofile_put_buff(unsigned long *buf, unsigned int start,
- unsigned int stop, unsigned int max);
-
-unsigned long oprofile_get_cpu_buffer_size(void);
-void oprofile_cpu_buffer_inc_smpl_lost(void);
-
-/* cpu buffer functions */
-
-struct op_sample;
-
-struct op_entry {
- struct ring_buffer_event *event;
- struct op_sample *sample;
- unsigned long size;
- unsigned long *data;
-};
-
-void oprofile_write_reserve(struct op_entry *entry,
- struct pt_regs * const regs,
- unsigned long pc, int code, int size);
-int oprofile_add_data(struct op_entry *entry, unsigned long val);
-int oprofile_add_data64(struct op_entry *entry, u64 val);
-int oprofile_write_commit(struct op_entry *entry);
-
-#ifdef CONFIG_HW_PERF_EVENTS
-int __init oprofile_perf_init(struct oprofile_operations *ops);
-void oprofile_perf_exit(void);
-char *op_name_from_perf_id(void);
-#else
-static inline int __init oprofile_perf_init(struct oprofile_operations *ops)
-{
- pr_info("oprofile: hardware counters not available\n");
- return -ENODEV;
-}
-static inline void oprofile_perf_exit(void) { }
-#endif /* CONFIG_HW_PERF_EVENTS */
-
-#endif /* OPROFILE_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index ec5d0290e0ee..04a34c08e0a6 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -592,15 +592,9 @@ static inline void ClearPageCompound(struct page *page)
#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page);
int PageHeadHuge(struct page *page);
-bool page_huge_active(struct page *page);
#else
TESTPAGEFLAG_FALSE(Huge)
TESTPAGEFLAG_FALSE(HeadHuge)
-
-static inline bool page_huge_active(struct page *page)
-{
- return 0;
-}
#endif
@@ -816,7 +810,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
/*
* Flags checked when a page is freed. Pages being freed should not have
- * these flags set. It they are, there is a problem.
+ * these flags set. If they are, there is a problem.
*/
#define PAGE_FLAGS_CHECK_AT_FREE \
(1UL << PG_lru | 1UL << PG_locked | \
@@ -827,7 +821,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
/*
* Flags checked when a page is prepped for return by the page allocator.
- * Pages being prepped should not have these flags set. It they are set,
+ * Pages being prepped should not have these flags set. If they are set,
* there has been a kernel bug or struct page corruption.
*
* __PG_HWPOISON is exceptional because it needs to be kept beyond page's
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index 85bd413e784e..679591301994 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -12,7 +12,6 @@ struct page_counter {
unsigned long low;
unsigned long high;
unsigned long max;
- struct page_counter *parent;
/* effective memory.min and memory.min usage tracking */
unsigned long emin;
@@ -27,6 +26,14 @@ struct page_counter {
/* legacy */
unsigned long watermark;
unsigned long failcnt;
+
+ /*
+ * 'parent' is placed here to be far from 'usage' to reduce
+ * cache false sharing, as 'usage' is written mostly while
+ * parent is frequently read for cgroup's hierarchical
+ * counting nature.
+ */
+ struct page_counter *parent;
};
#if BITS_PER_LONG == 32
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index d5570deff400..20225b067583 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -315,6 +315,7 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping,
#define FGP_NOWAIT 0x00000020
#define FGP_FOR_MMAP 0x00000040
#define FGP_HEAD 0x00000080
+#define FGP_ENTRY 0x00000100
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
int fgp_flags, gfp_t cache_gfp_mask);
@@ -450,8 +451,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
}
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
- unsigned int nr_entries, struct page **entries,
- pgoff_t *indices);
+ pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
pgoff_t end, unsigned int nr_pages,
struct page **pages);
@@ -681,8 +681,7 @@ static inline int wait_on_page_locked_killable(struct page *page)
return wait_on_page_bit_killable(compound_head(page), PG_locked);
}
-extern void put_and_wait_on_page_locked(struct page *page);
-
+int put_and_wait_on_page_locked(struct page *page, int state);
void wait_on_page_writeback(struct page *page);
extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page);
@@ -757,9 +756,11 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void delete_from_page_cache(struct page *page);
extern void __delete_from_page_cache(struct page *page, void *shadow);
-int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
+void replace_page_cache_page(struct page *old, struct page *new);
void delete_from_page_cache_batch(struct address_space *mapping,
struct pagevec *pvec);
+loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
+ int whence);
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index ad4ddc17d403..7f3f19065a9f 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -25,10 +25,6 @@ struct pagevec {
void __pagevec_release(struct pagevec *pvec);
void __pagevec_lru_add(struct pagevec *pvec);
-unsigned pagevec_lookup_entries(struct pagevec *pvec,
- struct address_space *mapping,
- pgoff_t start, unsigned nr_entries,
- pgoff_t *indices);
void pagevec_remove_exceptionals(struct pagevec *pvec);
unsigned pagevec_lookup_range(struct pagevec *pvec,
struct address_space *mapping,
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 1fb508c19e83..f981f794c850 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -297,6 +297,37 @@ int __must_check __parport_register_driver(struct parport_driver *,
* parport_register_driver must be a macro so that KBUILD_MODNAME can
* be expanded
*/
+
+/**
+ * parport_register_driver - register a parallel port device driver
+ * @driver: structure describing the driver
+ *
+ * This can be called by a parallel port device driver in order
+ * to receive notifications about ports being found in the
+ * system, as well as ports no longer available.
+ *
+ * If devmodel is true then the new device model is used
+ * for registration.
+ *
+ * The @driver structure is allocated by the caller and must not be
+ * deallocated until after calling parport_unregister_driver().
+ *
+ * If using the non device model:
+ * The driver's attach() function may block. The port that
+ * attach() is given will be valid for the duration of the
+ * callback, but if the driver wants to take a copy of the
+ * pointer it must call parport_get_port() to do so. Calling
+ * parport_register_device() on that port will do this for you.
+ *
+ * The driver's detach() function may block. The port that
+ * detach() is given will be valid for the duration of the
+ * callback, but if the driver wants to take a copy of the
+ * pointer it must call parport_get_port() to do so.
+ *
+ *
+ * Returns 0 on success. The non device model will always succeeds.
+ * but the new device model can fail and will return the error code.
+ **/
#define parport_register_driver(driver) \
__parport_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
diff --git a/include/linux/parser.h b/include/linux/parser.h
index 89e2b23fb888..dd79f45a37b8 100644
--- a/include/linux/parser.h
+++ b/include/linux/parser.h
@@ -29,6 +29,7 @@ typedef struct {
int match_token(char *, const match_table_t table, substring_t args[]);
int match_int(substring_t *, int *result);
+int match_uint(substring_t *s, unsigned int *result);
int match_u64(substring_t *, u64 *result);
int match_octal(substring_t *, int *result);
int match_hex(substring_t *, int *result);
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index cc66bec8be90..b82c9b100e97 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -13,6 +13,12 @@
struct pci_epc;
+enum pci_epc_interface_type {
+ UNKNOWN_INTERFACE = -1,
+ PRIMARY_INTERFACE,
+ SECONDARY_INTERFACE,
+};
+
enum pci_epc_irq_type {
PCI_EPC_IRQ_UNKNOWN,
PCI_EPC_IRQ_LEGACY,
@@ -20,6 +26,19 @@ enum pci_epc_irq_type {
PCI_EPC_IRQ_MSIX,
};
+static inline const char *
+pci_epc_interface_string(enum pci_epc_interface_type type)
+{
+ switch (type) {
+ case PRIMARY_INTERFACE:
+ return "primary";
+ case SECONDARY_INTERFACE:
+ return "secondary";
+ default:
+ return "UNKNOWN interface";
+ }
+}
+
/**
* struct pci_epc_ops - set of function pointers for performing EPC operations
* @write_header: ops to populate configuration space header
@@ -36,6 +55,7 @@ enum pci_epc_irq_type {
* @get_msix: ops to get the number of MSI-X interrupts allocated by the RC
* from the MSI-X capability register
* @raise_irq: ops to raise a legacy, MSI or MSI-X interrupt
+ * @map_msi_irq: ops to map physical address to MSI address and return MSI data
* @start: ops to start the PCI link
* @stop: ops to stop the PCI link
* @owner: the module owner containing the ops
@@ -58,6 +78,10 @@ struct pci_epc_ops {
int (*get_msix)(struct pci_epc *epc, u8 func_no);
int (*raise_irq)(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u16 interrupt_num);
+ int (*map_msi_irq)(struct pci_epc *epc, u8 func_no,
+ phys_addr_t phys_addr, u8 interrupt_num,
+ u32 entry_size, u32 *msi_data,
+ u32 *msi_addr_offset);
int (*start)(struct pci_epc *epc);
void (*stop)(struct pci_epc *epc);
const struct pci_epc_features* (*get_features)(struct pci_epc *epc,
@@ -175,10 +199,12 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct module *owner);
void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc);
void pci_epc_destroy(struct pci_epc *epc);
-int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type);
void pci_epc_linkup(struct pci_epc *epc);
void pci_epc_init_notify(struct pci_epc *epc);
-void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
+ enum pci_epc_interface_type type);
int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *hdr);
int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
@@ -195,14 +221,19 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
enum pci_barno, u32 offset);
int pci_epc_get_msix(struct pci_epc *epc, u8 func_no);
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no,
+ phys_addr_t phys_addr, u8 interrupt_num,
+ u32 entry_size, u32 *msi_data, u32 *msi_addr_offset);
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u16 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
u8 func_no);
-unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features
- *epc_features);
+enum pci_barno
+pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features);
+enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
+ *epc_features, enum pci_barno bar);
struct pci_epc *pci_epc_get(const char *epc_name);
void pci_epc_put(struct pci_epc *epc);
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 6644ff3b0702..6833e2160ef1 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -9,11 +9,13 @@
#ifndef __LINUX_PCI_EPF_H
#define __LINUX_PCI_EPF_H
+#include <linux/configfs.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/pci.h>
struct pci_epf;
+enum pci_epc_interface_type;
enum pci_notify_event {
CORE_INIT,
@@ -21,6 +23,7 @@ enum pci_notify_event {
};
enum pci_barno {
+ NO_BAR = -1,
BAR_0,
BAR_1,
BAR_2,
@@ -60,10 +63,13 @@ struct pci_epf_header {
* @bind: ops to perform when a EPC device has been bound to EPF device
* @unbind: ops to perform when a binding has been lost between a EPC device
* and EPF device
+ * @add_cfs: ops to initialize function specific configfs attributes
*/
struct pci_epf_ops {
int (*bind)(struct pci_epf *epf);
void (*unbind)(struct pci_epf *epf);
+ struct config_group *(*add_cfs)(struct pci_epf *epf,
+ struct config_group *group);
};
/**
@@ -118,6 +124,12 @@ struct pci_epf_bar {
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
* @nb: notifier block to notify EPF of any EPC events (like linkup)
* @lock: mutex to protect pci_epf_ops
+ * @sec_epc: the secondary EPC device to which this EPF device is bound
+ * @sec_epc_list: to add pci_epf as list of PCI endpoint functions to secondary
+ * EPC device
+ * @sec_epc_bar: represents the BAR of EPF device associated with secondary EPC
+ * @sec_epc_func_no: unique (physical) function number within the secondary EPC
+ * @group: configfs group associated with the EPF device
*/
struct pci_epf {
struct device dev;
@@ -134,6 +146,13 @@ struct pci_epf {
struct notifier_block nb;
/* mutex to protect against concurrent access of pci_epf_ops */
struct mutex lock;
+
+ /* Below members are to attach secondary EPC to an endpoint function */
+ struct pci_epc *sec_epc;
+ struct list_head sec_epc_list;
+ struct pci_epf_bar sec_epc_bar[6];
+ u8 sec_epc_func_no;
+ struct config_group *group;
};
/**
@@ -164,16 +183,17 @@ static inline void *epf_get_drvdata(struct pci_epf *epf)
return dev_get_drvdata(&epf->dev);
}
-const struct pci_epf_device_id *
-pci_epf_match_device(const struct pci_epf_device_id *id, struct pci_epf *epf);
struct pci_epf *pci_epf_create(const char *name);
void pci_epf_destroy(struct pci_epf *epf);
int __pci_epf_register_driver(struct pci_epf_driver *driver,
struct module *owner);
void pci_epf_unregister_driver(struct pci_epf_driver *driver);
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
- size_t align);
-void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar);
+ size_t align, enum pci_epc_interface_type type);
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
+ enum pci_epc_interface_type type);
int pci_epf_bind(struct pci_epf *epf);
void pci_epf_unbind(struct pci_epf *epf);
+struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf,
+ struct config_group *group);
#endif /* __LINUX_PCI_EPF_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index b32126d26997..86c799c97b77 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1232,6 +1232,15 @@ void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
void pci_release_resource(struct pci_dev *dev, int resno);
+static inline int pci_rebar_bytes_to_size(u64 bytes)
+{
+ bytes = roundup_pow_of_two(bytes);
+
+ /* Return BAR size as defined in the resizable BAR specification */
+ return max(ilog2(bytes), 20) - 20;
+}
+
+u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
@@ -1917,7 +1926,7 @@ enum pci_fixup_pass {
};
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+#define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
class_shift, hook) \
__ADDRESSABLE(hook) \
asm(".section " #sec ", \"a\" \n" \
@@ -1926,10 +1935,33 @@ enum pci_fixup_pass {
".long " #class ", " #class_shift " \n" \
".long " #hook " - . \n" \
".previous \n");
+
+/*
+ * Clang's LTO may rename static functions in C, but has no way to
+ * handle such renamings when referenced from inline asm. To work
+ * around this, create global C stubs for these cases.
+ */
+#ifdef CONFIG_LTO_CLANG
+#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook, stub) \
+ void stub(struct pci_dev *dev); \
+ void stub(struct pci_dev *dev) \
+ { \
+ hook(dev); \
+ } \
+ ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, stub)
+#else
+#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook, stub) \
+ ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook)
+#endif
+
#define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
class_shift, hook) \
__DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
- class_shift, hook)
+ class_shift, hook, __UNIQUE_ID(hook))
#else
/* Anonymous variables would be nice... */
#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index d8156a5dbee8..a76ccb697bef 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -51,6 +51,7 @@
#define PCI_BASE_CLASS_MEMORY 0x05
#define PCI_CLASS_MEMORY_RAM 0x0500
#define PCI_CLASS_MEMORY_FLASH 0x0501
+#define PCI_CLASS_MEMORY_CXL 0x0502
#define PCI_CLASS_MEMORY_OTHER 0x0580
#define PCI_BASE_CLASS_BRIDGE 0x06
@@ -881,6 +882,7 @@
#define PCI_DEVICE_ID_TI_X620 0xac8d
#define PCI_DEVICE_ID_TI_X420 0xac8e
#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f
+#define PCI_DEVICE_ID_TI_J721E 0xb00d
#define PCI_DEVICE_ID_TI_DRA74x 0xb500
#define PCI_DEVICE_ID_TI_DRA72x 0xb501
@@ -2588,6 +2590,8 @@
#define PCI_VENDOR_ID_REDHAT 0x1b36
+#define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c
+
#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 9a38f579bc76..fab42cfbd350 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -998,7 +998,7 @@ struct perf_sample_data {
struct perf_raw_record *raw;
struct perf_branch_stack *br_stack;
u64 period;
- u64 weight;
+ union perf_sample_weight weight;
u64 txn;
union perf_mem_data_src data_src;
@@ -1047,7 +1047,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
data->raw = NULL;
data->br_stack = NULL;
data->period = period;
- data->weight = 0;
+ data->weight.full = 0;
data->data_src.val = PERF_MEM_NA;
data->txn = 0;
}
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 8fcdfa52eb4b..cdfc4e9f253e 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -432,14 +432,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
* To be differentiate with macro pte_mkyoung, this macro is used on platforms
* where software maintains page access bit.
*/
-#ifndef pte_sw_mkyoung
-static inline pte_t pte_sw_mkyoung(pte_t pte)
-{
- return pte;
-}
-#define pte_sw_mkyoung pte_sw_mkyoung
-#endif
-
#ifndef pte_savedwrite
#define pte_savedwrite pte_write
#endif
@@ -1314,6 +1306,17 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
#endif
}
+/*
+ * the ordering of these checks is important for pmds with _page_devmap set.
+ * if we check pmd_trans_unstable() first we will trip the bad_pmd() check
+ * inside of pmd_none_or_trans_huge_or_clear_bad(). this will end up correctly
+ * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
+ */
+static inline int pmd_devmap_trans_unstable(pmd_t *pmd)
+{
+ return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
+}
+
#ifndef CONFIG_NUMA_BALANCING
/*
* Technically a PTE can be PROTNONE even when not doing NUMA balancing but
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 9effb511acde..1a12e4436b5b 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -71,11 +71,11 @@ extern const int phy_10gbit_features_array[1];
/*
* Set phydev->irq to PHY_POLL if interrupts are not supported,
- * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
- * the attached driver handles the interrupt
+ * or not desired for this PHY. Set to PHY_MAC_INTERRUPT if
+ * the attached MAC driver handles the interrupt
*/
#define PHY_POLL -1
-#define PHY_IGNORE_INTERRUPT -2
+#define PHY_MAC_INTERRUPT -2
#define PHY_IS_INTERNAL 0x00000001
#define PHY_RST_AFTER_CLK_EN 0x00000002
@@ -104,8 +104,10 @@ extern const int phy_10gbit_features_array[1];
* @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax
* @PHY_INTERFACE_MODE_QSGMII: Quad SGMII
* @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII
+ * @PHY_INTERFACE_MODE_100BASEX: 100 BaseX
* @PHY_INTERFACE_MODE_1000BASEX: 1000 BaseX
* @PHY_INTERFACE_MODE_2500BASEX: 2500 BaseX
+ * @PHY_INTERFACE_MODE_5GBASER: 5G BaseR
* @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI
* @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface
* @PHY_INTERFACE_MODE_10GBASER: 10G BaseR
@@ -135,8 +137,10 @@ typedef enum {
PHY_INTERFACE_MODE_MOCA,
PHY_INTERFACE_MODE_QSGMII,
PHY_INTERFACE_MODE_TRGMII,
+ PHY_INTERFACE_MODE_100BASEX,
PHY_INTERFACE_MODE_1000BASEX,
PHY_INTERFACE_MODE_2500BASEX,
+ PHY_INTERFACE_MODE_5GBASER,
PHY_INTERFACE_MODE_RXAUI,
PHY_INTERFACE_MODE_XAUI,
/* 10GBASE-R, XFI, SFI - single lane 10G Serdes */
@@ -207,6 +211,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "1000base-x";
case PHY_INTERFACE_MODE_2500BASEX:
return "2500base-x";
+ case PHY_INTERFACE_MODE_5GBASER:
+ return "5gbase-r";
case PHY_INTERFACE_MODE_RXAUI:
return "rxaui";
case PHY_INTERFACE_MODE_XAUI:
@@ -217,6 +223,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "usxgmii";
case PHY_INTERFACE_MODE_10GKR:
return "10gbase-kr";
+ case PHY_INTERFACE_MODE_100BASEX:
+ return "100base-x";
default:
return "unknown";
}
@@ -484,6 +492,7 @@ struct macsec_ops;
* @sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
* @loopback_enabled: Set true if this PHY has been loopbacked successfully.
* @downshifted_rate: Set true if link speed has been downshifted.
+ * @is_on_sfp_module: Set true if PHY is located on an SFP module.
* @state: State of the PHY for management purposes
* @dev_flags: Device-specific flags used by the PHY driver.
* @irq: IRQ number of the PHY's interrupt (-1 if none)
@@ -499,6 +508,7 @@ struct macsec_ops;
*
* @speed: Current link speed
* @duplex: Current duplex
+ * @port: Current port
* @pause: Current pause
* @asym_pause: Current asymmetric pause
* @supported: Combined MAC/PHY supported linkmodes
@@ -556,6 +566,7 @@ struct phy_device {
unsigned sysfs_links:1;
unsigned loopback_enabled:1;
unsigned downshifted_rate:1;
+ unsigned is_on_sfp_module:1;
unsigned autoneg:1;
/* The most recently read link state */
@@ -577,6 +588,7 @@ struct phy_device {
*/
int speed;
int duplex;
+ int port;
int pause;
int asym_pause;
u8 master_slave_get;
@@ -644,8 +656,11 @@ struct phy_device {
const struct macsec_ops *macsec_ops;
#endif
};
-#define to_phy_device(d) container_of(to_mdio_device(d), \
- struct phy_device, mdio)
+
+static inline struct phy_device *to_phy_device(const struct device *dev)
+{
+ return container_of(to_mdio_device(dev), struct phy_device, mdio);
+}
/**
* struct phy_tdr_config - Configuration of a TDR raw test
@@ -1193,11 +1208,11 @@ static inline int phy_clear_bits_mmd(struct phy_device *phydev, int devad,
* @phydev: the phy_device struct
*
* NOTE: must be kept in sync with addition/removal of PHY_POLL and
- * PHY_IGNORE_INTERRUPT
+ * PHY_MAC_INTERRUPT
*/
static inline bool phy_interrupt_is_valid(struct phy_device *phydev)
{
- return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT;
+ return phydev->irq != PHY_POLL && phydev->irq != PHY_MAC_INTERRUPT;
}
/**
@@ -1284,6 +1299,15 @@ static inline bool phy_is_internal(struct phy_device *phydev)
}
/**
+ * phy_on_sfp - Convenience function for testing if a PHY is on an SFP module
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_on_sfp(struct phy_device *phydev)
+{
+ return phydev->is_on_sfp_module;
+}
+
+/**
* phy_interface_mode_is_rgmii - Convenience function for testing if a
* PHY interface mode is RGMII (all variants)
* @mode: the &phy_interface_t enum
diff --git a/include/linux/platform_data/clk-u300.h b/include/linux/platform_data/clk-u300.h
deleted file mode 100644
index 8429e73911a1..000000000000
--- a/include/linux/platform_data/clk-u300.h
+++ /dev/null
@@ -1 +0,0 @@
-void __init u300_clk_init(void __iomem *base);
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index 86376779ab31..5ff8597ceabd 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -1286,6 +1286,16 @@ enum ec_feature_code {
EC_FEATURE_ISH = 40,
/* New TCPMv2 TYPEC_ prefaced commands supported */
EC_FEATURE_TYPEC_CMD = 41,
+ /*
+ * The EC will wait for direction from the AP to enter Type-C alternate
+ * modes or USB4.
+ */
+ EC_FEATURE_TYPEC_REQUIRE_AP_MODE_ENTRY = 42,
+ /*
+ * The EC will wait for an acknowledge from the AP after setting the
+ * mux.
+ */
+ EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK = 43,
};
#define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32)
@@ -4600,6 +4610,7 @@ enum ec_codec_i2s_rx_subcmd {
EC_CODEC_I2S_RX_SET_SAMPLE_DEPTH = 0x2,
EC_CODEC_I2S_RX_SET_DAIFMT = 0x3,
EC_CODEC_I2S_RX_SET_BCLK = 0x4,
+ EC_CODEC_I2S_RX_RESET = 0x5,
EC_CODEC_I2S_RX_SUBCMD_COUNT,
};
@@ -4731,6 +4742,7 @@ enum ec_reboot_cmd {
EC_REBOOT_DISABLE_JUMP = 5, /* Disable jump until next reboot */
EC_REBOOT_HIBERNATE = 6, /* Hibernate EC */
EC_REBOOT_HIBERNATE_CLEAR_AP_OFF = 7, /* and clears AP_OFF flag */
+ EC_REBOOT_COLD_AP_OFF = 8, /* Cold-reboot and don't boot AP */
};
/* Flags for ec_params_reboot_ec.reboot_flags */
@@ -5567,6 +5579,32 @@ struct ec_response_typec_discovery {
struct svid_mode_info svids[0];
} __ec_align1;
+/* USB Type-C commands for AP-controlled device policy. */
+#define EC_CMD_TYPEC_CONTROL 0x0132
+
+enum typec_control_command {
+ TYPEC_CONTROL_COMMAND_EXIT_MODES,
+ TYPEC_CONTROL_COMMAND_CLEAR_EVENTS,
+ TYPEC_CONTROL_COMMAND_ENTER_MODE,
+};
+
+struct ec_params_typec_control {
+ uint8_t port;
+ uint8_t command; /* enum typec_control_command */
+ uint16_t reserved;
+
+ /*
+ * This section will be interpreted based on |command|. Define a
+ * placeholder structure to avoid having to increase the size and bump
+ * the command version when adding new sub-commands.
+ */
+ union {
+ uint32_t clear_events_mask;
+ uint8_t mode_to_enter; /* enum typec_mode */
+ uint8_t placeholder[128];
+ };
+} __ec_align1;
+
/*
* Gather all status information for a port.
*
@@ -6054,6 +6092,13 @@ struct ec_params_charger_control {
uint8_t allow_charging;
} __ec_align_size1;
+/* Get ACK from the USB-C SS muxes */
+#define EC_CMD_USB_PD_MUX_ACK 0x0603
+
+struct ec_params_usb_pd_mux_ack {
+ uint8_t port; /* USB-C port number */
+} __ec_align1;
+
/*****************************************************************************/
/*
* Reserve a range of host commands for board-specific, experimental, or
diff --git a/include/linux/platform_data/dma-atmel.h b/include/linux/platform_data/dma-atmel.h
deleted file mode 100644
index 069637e6004f..000000000000
--- a/include/linux/platform_data/dma-atmel.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Header file for the Atmel AHB DMA Controller driver
- *
- * Copyright (C) 2008 Atmel Corporation
- */
-#ifndef AT_HDMAC_H
-#define AT_HDMAC_H
-
-#include <linux/dmaengine.h>
-
-/**
- * struct at_dma_platform_data - Controller configuration parameters
- * @nr_channels: Number of channels supported by hardware (max 8)
- * @cap_mask: dma_capability flags supported by the platform
- */
-struct at_dma_platform_data {
- unsigned int nr_channels;
- dma_cap_mask_t cap_mask;
-};
-
-/**
- * struct at_dma_slave - Controller-specific information about a slave
- * @dma_dev: required DMA master device
- * @cfg: Platform-specific initializer for the CFG register
- */
-struct at_dma_slave {
- struct device *dma_dev;
- u32 cfg;
-};
-
-
-/* Platform-configurable bits in CFG */
-#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */
-
-#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */
-#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */
-#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */
-#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */
-#define ATC_SRC_H2SEL_SW (0x0 << 9)
-#define ATC_SRC_H2SEL_HW (0x1 << 9)
-#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */
-#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */
-#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */
-#define ATC_DST_H2SEL_SW (0x0 << 13)
-#define ATC_DST_H2SEL_HW (0x1 << 13)
-#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */
-#define ATC_SOD (0x1 << 16) /* Stop On Done */
-#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */
-#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */
-#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */
-#define ATC_LOCK_IF_L_CHUNK (0x0 << 22)
-#define ATC_LOCK_IF_L_BUFFER (0x1 << 22)
-#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */
-#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */
-#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28)
-#define ATC_FIFOCFG_HALFFIFO (0x1 << 28)
-#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28)
-
-
-#endif /* AT_HDMAC_H */
diff --git a/include/linux/platform_data/dma-coh901318.h b/include/linux/platform_data/dma-coh901318.h
deleted file mode 100644
index 4cca529f8d56..000000000000
--- a/include/linux/platform_data/dma-coh901318.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Platform data for the COH901318 DMA controller
- * Copyright (C) 2007-2013 ST-Ericsson
- */
-
-#ifndef PLAT_COH901318_H
-#define PLAT_COH901318_H
-
-#ifdef CONFIG_COH901318
-
-/* We only support the U300 DMA channels */
-#define U300_DMA_MSL_TX_0 0
-#define U300_DMA_MSL_TX_1 1
-#define U300_DMA_MSL_TX_2 2
-#define U300_DMA_MSL_TX_3 3
-#define U300_DMA_MSL_TX_4 4
-#define U300_DMA_MSL_TX_5 5
-#define U300_DMA_MSL_TX_6 6
-#define U300_DMA_MSL_RX_0 7
-#define U300_DMA_MSL_RX_1 8
-#define U300_DMA_MSL_RX_2 9
-#define U300_DMA_MSL_RX_3 10
-#define U300_DMA_MSL_RX_4 11
-#define U300_DMA_MSL_RX_5 12
-#define U300_DMA_MSL_RX_6 13
-#define U300_DMA_MMCSD_RX_TX 14
-#define U300_DMA_MSPRO_TX 15
-#define U300_DMA_MSPRO_RX 16
-#define U300_DMA_UART0_TX 17
-#define U300_DMA_UART0_RX 18
-#define U300_DMA_APEX_TX 19
-#define U300_DMA_APEX_RX 20
-#define U300_DMA_PCM_I2S0_TX 21
-#define U300_DMA_PCM_I2S0_RX 22
-#define U300_DMA_PCM_I2S1_TX 23
-#define U300_DMA_PCM_I2S1_RX 24
-#define U300_DMA_XGAM_CDI 25
-#define U300_DMA_XGAM_PDI 26
-#define U300_DMA_SPI_TX 27
-#define U300_DMA_SPI_RX 28
-#define U300_DMA_GENERAL_PURPOSE_0 29
-#define U300_DMA_GENERAL_PURPOSE_1 30
-#define U300_DMA_GENERAL_PURPOSE_2 31
-#define U300_DMA_GENERAL_PURPOSE_3 32
-#define U300_DMA_GENERAL_PURPOSE_4 33
-#define U300_DMA_GENERAL_PURPOSE_5 34
-#define U300_DMA_GENERAL_PURPOSE_6 35
-#define U300_DMA_GENERAL_PURPOSE_7 36
-#define U300_DMA_GENERAL_PURPOSE_8 37
-#define U300_DMA_UART1_TX 38
-#define U300_DMA_UART1_RX 39
-
-#define U300_DMA_DEVICE_CHANNELS 32
-#define U300_DMA_CHANNELS 40
-
-/**
- * coh901318_filter_id() - DMA channel filter function
- * @chan: dma channel handle
- * @chan_id: id of dma channel to be filter out
- *
- * In dma_request_channel() it specifies what channel id to be requested
- */
-bool coh901318_filter_id(struct dma_chan *chan, void *chan_id);
-#else
-static inline bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
-{
- return false;
-}
-#endif
-
-#endif /* PLAT_COH901318_H */
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
index 30e676b36b24..725602d9df91 100644
--- a/include/linux/platform_data/dma-imx-sdma.h
+++ b/include/linux/platform_data/dma-imx-sdma.h
@@ -57,15 +57,4 @@ struct sdma_script_start_addrs {
/* End of v4 array */
};
-/**
- * struct sdma_platform_data - platform specific data for SDMA engine
- *
- * @fw_name The firmware name
- * @script_addrs SDMA scripts addresses in SDMA ROM
- */
-struct sdma_platform_data {
- char *fw_name;
- struct sdma_script_start_addrs *script_addrs;
-};
-
#endif /* __MACH_MXC_SDMA_H__ */
diff --git a/include/linux/platform_data/efm32-spi.h b/include/linux/platform_data/efm32-spi.h
deleted file mode 100644
index a2c56fcd0534..000000000000
--- a/include/linux/platform_data/efm32-spi.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__
-#define __LINUX_PLATFORM_DATA_EFM32_SPI_H__
-
-#include <linux/types.h>
-
-/**
- * struct efm32_spi_pdata
- * @location: pinmux location for the I/O pins (to be written to the ROUTE
- * register)
- */
-struct efm32_spi_pdata {
- u8 location;
-};
-#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ */
diff --git a/include/linux/platform_data/efm32-uart.h b/include/linux/platform_data/efm32-uart.h
deleted file mode 100644
index ccbb8f11db75..000000000000
--- a/include/linux/platform_data/efm32-uart.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- *
- *
- */
-#ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__
-#define __LINUX_PLATFORM_DATA_EFM32_UART_H__
-
-#include <linux/types.h>
-
-/**
- * struct efm32_uart_pdata
- * @location: pinmux location for the I/O pins (to be written to the ROUTE
- * register)
- */
-struct efm32_uart_pdata {
- u8 location;
-};
-#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__ */
diff --git a/include/linux/platform_data/i2c-hid.h b/include/linux/platform_data/i2c-hid.h
deleted file mode 100644
index c628bb5e1061..000000000000
--- a/include/linux/platform_data/i2c-hid.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * HID over I2C protocol implementation
- *
- * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
- * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- */
-
-#ifndef __LINUX_I2C_HID_H
-#define __LINUX_I2C_HID_H
-
-#include <linux/regulator/consumer.h>
-#include <linux/types.h>
-
-/**
- * struct i2chid_platform_data - used by hid over i2c implementation.
- * @hid_descriptor_address: i2c register where the HID descriptor is stored.
- * @supplies: regulators for powering on the device.
- * @post_power_delay_ms: delay after powering on before device is usable.
- *
- * Note that it is the responsibility of the platform driver (or the acpi 5.0
- * driver, or the flattened device tree) to setup the irq related to the gpio in
- * the struct i2c_board_info.
- * The platform driver should also setup the gpio according to the device:
- *
- * A typical example is the following:
- * irq = gpio_to_irq(intr_gpio);
- * hkdk4412_i2c_devs5[0].irq = irq; // store the irq in i2c_board_info
- * gpio_request(intr_gpio, "elan-irq");
- * s3c_gpio_setpull(intr_gpio, S3C_GPIO_PULL_UP);
- */
-struct i2c_hid_platform_data {
- u16 hid_descriptor_address;
- struct regulator_bulk_data supplies[2];
- int post_power_delay_ms;
-};
-
-#endif /* __LINUX_I2C_HID_H */
diff --git a/include/linux/platform_data/mlxcpld.h b/include/linux/platform_data/mlxcpld.h
new file mode 100644
index 000000000000..d7610b528856
--- /dev/null
+++ b/include/linux/platform_data/mlxcpld.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/*
+ * Mellanox I2C multiplexer support in CPLD
+ *
+ * Copyright (C) 2016-2020 Mellanox Technologies
+ */
+
+#ifndef _LINUX_I2C_MLXCPLD_H
+#define _LINUX_I2C_MLXCPLD_H
+
+/* Platform data for the CPLD I2C multiplexers */
+
+/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info
+ * @chan_ids - channels array
+ * @num_adaps - number of adapters
+ * @sel_reg_addr - mux select register offset in CPLD space
+ * @reg_size: register size in bytes
+ * @handle: handle to be passed by callback
+ * @completion_notify: callback to notify when all the adapters are created
+ */
+struct mlxcpld_mux_plat_data {
+ int *chan_ids;
+ int num_adaps;
+ int sel_reg_addr;
+ u8 reg_size;
+ void *handle;
+ int (*completion_notify)(void *handle, struct i2c_adapter *parent,
+ struct i2c_adapter *adapters[]);
+};
+
+#endif /* _LINUX_I2C_MLXCPLD_H */
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h
index f0b8947e6b07..91051e9907f3 100644
--- a/include/linux/platform_data/mmc-omap.h
+++ b/include/linux/platform_data/mmc-omap.h
@@ -108,8 +108,7 @@ struct omap_mmc_platform_data {
const char *name;
u32 ocr_mask;
- /* Card detection IRQs */
- int card_detect_irq;
+ /* Card detection */
int (*card_detect)(struct device *dev, int slot);
unsigned int ban_openended:1;
diff --git a/include/linux/platform_data/x86/mlxcpld.h b/include/linux/platform_data/x86/mlxcpld.h
deleted file mode 100644
index b08dcb183fca..000000000000
--- a/include/linux/platform_data/x86/mlxcpld.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * mlxcpld.h - Mellanox I2C multiplexer support in CPLD
- *
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _LINUX_I2C_MLXCPLD_H
-#define _LINUX_I2C_MLXCPLD_H
-
-/* Platform data for the CPLD I2C multiplexers */
-
-/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info
- * @adap_ids - adapter array
- * @num_adaps - number of adapters
- * @sel_reg_addr - mux select register offset in CPLD space
- */
-struct mlxcpld_mux_plat_data {
- int *adap_ids;
- int num_adaps;
- int sel_reg_addr;
-};
-
-#endif /* _LINUX_I2C_MLXCPLD_H */
diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h
new file mode 100644
index 000000000000..a6329003aee7
--- /dev/null
+++ b/include/linux/platform_profile.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Platform profile sysfs interface
+ *
+ * See Documentation/ABI/testing/sysfs-platform_profile.rst for more
+ * information.
+ */
+
+#ifndef _PLATFORM_PROFILE_H_
+#define _PLATFORM_PROFILE_H_
+
+#include <linux/bitops.h>
+
+/*
+ * If more options are added please update profile_names array in
+ * platform_profile.c and sysfs-platform_profile documentation.
+ */
+
+enum platform_profile_option {
+ PLATFORM_PROFILE_LOW_POWER,
+ PLATFORM_PROFILE_COOL,
+ PLATFORM_PROFILE_QUIET,
+ PLATFORM_PROFILE_BALANCED,
+ PLATFORM_PROFILE_BALANCED_PERFORMANCE,
+ PLATFORM_PROFILE_PERFORMANCE,
+ PLATFORM_PROFILE_LAST, /*must always be last */
+};
+
+struct platform_profile_handler {
+ unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int (*profile_get)(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile);
+ int (*profile_set)(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile);
+};
+
+int platform_profile_register(struct platform_profile_handler *pprof);
+int platform_profile_remove(void);
+void platform_profile_notify(void);
+
+#endif /*_PLATFORM_PROFILE_H_*/
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 47aca6bac1d6..482313a8ccfc 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -537,6 +537,8 @@ struct pm_subsys_data {
spinlock_t lock;
unsigned int refcount;
#ifdef CONFIG_PM_CLK
+ unsigned int clock_op_might_sleep;
+ struct mutex clock_mutex;
struct list_head clock_list;
#endif
#ifdef CONFIG_PM_GENERIC_DOMAINS
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 2ca919ae8d36..dfcfbcecc34b 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -9,6 +9,7 @@
#define _LINUX_PM_DOMAIN_H
#include <linux/device.h>
+#include <linux/ktime.h>
#include <linux/mutex.h>
#include <linux/pm.h>
#include <linux/err.h>
@@ -55,6 +56,10 @@
*
* GENPD_FLAG_RPM_ALWAYS_ON: Instructs genpd to always keep the PM domain
* powered on except for system suspend.
+ *
+ * GENPD_FLAG_MIN_RESIDENCY: Enable the genpd governor to consider its
+ * components' next wakeup when determining the
+ * optimal idle state.
*/
#define GENPD_FLAG_PM_CLK (1U << 0)
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
@@ -62,6 +67,7 @@
#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
+#define GENPD_FLAG_MIN_RESIDENCY (1U << 6)
enum gpd_status {
GENPD_STATE_ON = 0, /* PM domain is on */
@@ -129,6 +135,7 @@ struct generic_pm_domain {
unsigned int state);
struct gpd_dev_ops dev_ops;
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
+ ktime_t next_wakeup; /* Maintained by the domain governor */
bool max_off_time_changed;
bool cached_power_down_ok;
bool cached_power_down_state_idx;
@@ -191,6 +198,7 @@ struct generic_pm_domain_data {
struct notifier_block *power_nb;
int cpu;
unsigned int performance_state;
+ ktime_t next_wakeup;
void *data;
};
@@ -217,6 +225,7 @@ int pm_genpd_remove(struct generic_pm_domain *genpd);
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb);
int dev_pm_genpd_remove_notifier(struct device *dev);
+void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
@@ -275,6 +284,9 @@ static inline int dev_pm_genpd_remove_notifier(struct device *dev)
return -EOPNOTSUPP;
}
+static inline void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
+{ }
+
#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
#endif
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 1435c054016a..c0371efa4a0f 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -98,6 +98,9 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp);
+unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
+ unsigned int index);
+
bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
int dev_pm_opp_get_opp_count(struct device *dev);
@@ -111,6 +114,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
bool available);
struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
unsigned int level);
+struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level);
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq);
@@ -143,28 +148,32 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
void dev_pm_opp_put_prop_name(struct opp_table *opp_table);
struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count);
void dev_pm_opp_put_regulators(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name);
+struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name);
void dev_pm_opp_put_clkname(struct opp_table *opp_table);
struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
+struct opp_table *devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs);
void dev_pm_opp_detach_genpd(struct opp_table *opp_table);
+struct opp_table *devm_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs);
+struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp);
int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
-int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp);
+int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
void dev_pm_opp_remove_table(struct device *dev);
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
+int dev_pm_opp_sync_regulators(struct device *dev);
#else
static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *dev, int index)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
@@ -184,6 +193,13 @@ static inline unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
return 0;
}
+static inline
+unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
+ unsigned int index)
+{
+ return 0;
+}
+
static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
{
return false;
@@ -217,31 +233,37 @@ static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq, bool available)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
unsigned int level)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level)
+{
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
unsigned long u_volt)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
@@ -249,7 +271,7 @@ static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
@@ -280,19 +302,19 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
const u32 *versions,
unsigned int count)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {}
@@ -300,57 +322,76 @@ static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {}
static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
int (*set_opp)(struct dev_pm_set_opp_data *data))
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {}
+static inline struct opp_table *
+devm_pm_opp_register_set_opp_helper(struct device *dev,
+ int (*set_opp)(struct dev_pm_set_opp_data *data))
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {}
static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {}
-static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name)
+static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {}
static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs)
{
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {}
+static inline struct opp_table *devm_pm_opp_attach_genpd(struct device *dev,
+ const char **names, struct device ***virt_devs)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table,
+ struct opp_table *dst_table, struct dev_pm_opp *src_opp)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
-static inline int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp)
+static inline int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
{
return -EOPNOTSUPP;
}
static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
@@ -366,11 +407,17 @@ static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask
{
}
+static inline int dev_pm_opp_sync_regulators(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_PM_OPP */
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int dev_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index);
+int dev_pm_opp_of_add_table_noclk(struct device *dev, int index);
void dev_pm_opp_of_remove_table(struct device *dev);
int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
@@ -387,12 +434,17 @@ static inline void dev_pm_opp_of_unregister_em(struct device *dev)
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int dev_pm_opp_of_add_table_noclk(struct device *dev, int index)
+{
+ return -EOPNOTSUPP;
}
static inline void dev_pm_opp_of_remove_table(struct device *dev)
@@ -401,7 +453,7 @@ static inline void dev_pm_opp_of_remove_table(struct device *dev)
static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
@@ -410,7 +462,7 @@ static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpum
static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
@@ -426,7 +478,7 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
static inline int dev_pm_opp_of_register_em(struct device *dev,
struct cpumask *cpus)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void dev_pm_opp_of_unregister_em(struct device *dev)
@@ -435,12 +487,12 @@ static inline void dev_pm_opp_of_unregister_em(struct device *dev)
static inline int of_get_required_opp_performance_state(struct device_node *np, int index)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
#endif
diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h
index 1ea5bae708a1..12cbbf305969 100644
--- a/include/linux/pmbus.h
+++ b/include/linux/pmbus.h
@@ -34,6 +34,15 @@
*/
#define PMBUS_WRITE_PROTECTED BIT(1)
+/*
+ * PMBUS_NO_CAPABILITY
+ *
+ * Some PMBus chips don't respond with valid data when reading the CAPABILITY
+ * register. For such chips, this flag should be set so that the PMBus core
+ * driver doesn't use CAPABILITY to determine it's behavior.
+ */
+#define PMBUS_NO_CAPABILITY BIT(2)
+
struct pmbus_platform_data {
u32 flags; /* Device specific flags */
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 90797f1b421d..307094ebb88c 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -15,6 +15,8 @@
#include <linux/refcount.h>
#include <uapi/linux/posix_acl.h>
+struct user_namespace;
+
struct posix_acl_entry {
short e_tag;
unsigned short e_perm;
@@ -61,23 +63,24 @@ posix_acl_release(struct posix_acl *acl)
extern void posix_acl_init(struct posix_acl *, int);
extern struct posix_acl *posix_acl_alloc(int, gfp_t);
-extern int posix_acl_valid(struct user_namespace *, const struct posix_acl *);
-extern int posix_acl_permission(struct inode *, const struct posix_acl *, int);
extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t);
extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *);
extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *);
extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t);
extern struct posix_acl *get_posix_acl(struct inode *, int);
-extern int set_posix_acl(struct inode *, int, struct posix_acl *);
+extern int set_posix_acl(struct user_namespace *, struct inode *, int,
+ struct posix_acl *);
#ifdef CONFIG_FS_POSIX_ACL
-extern int posix_acl_chmod(struct inode *, umode_t);
+int posix_acl_chmod(struct user_namespace *, struct inode *, umode_t);
extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
struct posix_acl **);
-extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **);
+int posix_acl_update_mode(struct user_namespace *, struct inode *, umode_t *,
+ struct posix_acl **);
-extern int simple_set_acl(struct inode *, struct posix_acl *, int);
+extern int simple_set_acl(struct user_namespace *, struct inode *,
+ struct posix_acl *, int);
extern int simple_acl_create(struct inode *, struct inode *);
struct posix_acl *get_cached_acl(struct inode *inode, int type);
@@ -85,6 +88,9 @@ struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl);
void forget_cached_acl(struct inode *inode, int type);
void forget_all_cached_acls(struct inode *inode);
+int posix_acl_valid(struct user_namespace *, const struct posix_acl *);
+int posix_acl_permission(struct user_namespace *, struct inode *,
+ const struct posix_acl *, int);
static inline void cache_no_acl(struct inode *inode)
{
@@ -92,7 +98,8 @@ static inline void cache_no_acl(struct inode *inode)
inode->i_default_acl = NULL;
}
#else
-static inline int posix_acl_chmod(struct inode *inode, umode_t mode)
+static inline int posix_acl_chmod(struct user_namespace *mnt_userns,
+ struct inode *inode, umode_t mode)
{
return 0;
}
diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
index 2387709991b5..060e8d203181 100644
--- a/include/linux/posix_acl_xattr.h
+++ b/include/linux/posix_acl_xattr.h
@@ -33,13 +33,17 @@ posix_acl_xattr_count(size_t size)
}
#ifdef CONFIG_FS_POSIX_ACL
-void posix_acl_fix_xattr_from_user(void *value, size_t size);
-void posix_acl_fix_xattr_to_user(void *value, size_t size);
+void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
+ void *value, size_t size);
+void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
+ void *value, size_t size);
#else
-static inline void posix_acl_fix_xattr_from_user(void *value, size_t size)
+static inline void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
+ void *value, size_t size)
{
}
-static inline void posix_acl_fix_xattr_to_user(void *value, size_t size)
+static inline void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
+ void *value, size_t size)
{
}
#endif
diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h
deleted file mode 100644
index 02f94a1b323b..000000000000
--- a/include/linux/power/max8903_charger.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * max8903_charger.h - Maxim 8903 USB/Adapter Charger Driver
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- */
-
-#ifndef __MAX8903_CHARGER_H__
-#define __MAX8903_CHARGER_H__
-
-struct max8903_pdata {
- /*
- * GPIOs
- * cen, chg, flt, dcm and usus are optional.
- * dok and uok are not optional depending on the status of
- * dc_valid and usb_valid.
- */
- int cen; /* Charger Enable input */
- int dok; /* DC(Adapter) Power OK output */
- int uok; /* USB Power OK output */
- int chg; /* Charger status output */
- int flt; /* Fault output */
- int dcm; /* Current-Limit Mode input (1: DC, 2: USB) */
- int usus; /* USB Suspend Input (1: suspended) */
-
- /*
- * DC(Adapter/TA) is wired
- * When dc_valid is true,
- * dok should be valid.
- *
- * At least one of dc_valid or usb_valid should be true.
- */
- bool dc_valid;
- /*
- * USB is wired
- * When usb_valid is true,
- * uok should be valid.
- */
- bool usb_valid;
-};
-
-#endif /* __MAX8903_CHARGER_H__ */
diff --git a/include/linux/property.h b/include/linux/property.h
index 0a9001fe7aea..dafccfce0262 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -488,4 +488,11 @@ fwnode_create_software_node(const struct property_entry *properties,
const struct fwnode_handle *parent);
void fwnode_remove_software_node(struct fwnode_handle *fwnode);
+int device_add_software_node(struct device *dev, const struct software_node *swnode);
+void device_remove_software_node(struct device *dev);
+
+int device_create_managed_software_node(struct device *dev,
+ const struct property_entry *properties,
+ const struct software_node *parent);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index 49d155cd2dfe..b801ead1e2bb 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -66,6 +66,7 @@ enum sev_cmd {
SEV_CMD_LAUNCH_MEASURE = 0x033,
SEV_CMD_LAUNCH_UPDATE_SECRET = 0x034,
SEV_CMD_LAUNCH_FINISH = 0x035,
+ SEV_CMD_ATTESTATION_REPORT = 0x036,
/* Guest migration commands (outgoing) */
SEV_CMD_SEND_START = 0x040,
@@ -483,6 +484,22 @@ struct sev_data_dbg {
u32 len; /* In */
} __packed;
+/**
+ * struct sev_data_attestation_report - SEV_ATTESTATION_REPORT command parameters
+ *
+ * @handle: handle of the VM
+ * @mnonce: a random nonce that will be included in the report.
+ * @address: physical address where the report will be copied.
+ * @len: length of the physical buffer.
+ */
+struct sev_data_attestation_report {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 address; /* In */
+ u8 mnonce[16]; /* In */
+ u32 len; /* In/Out */
+} __packed;
+
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
/**
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 2a9df80ea887..b5ebf6c01292 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -171,7 +171,7 @@ static inline void ptrace_event(int event, unsigned long message)
*
* Check whether @event is enabled and, if so, report @event and @pid
* to the ptrace parent. @pid is reported as the pid_t seen from the
- * the ptrace parent's pid namespace.
+ * ptrace parent's pid namespace.
*
* Called without locks.
*/
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 4d58dc8943f0..e339b48de32d 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -470,7 +470,7 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
/**
* @brief qed_chain_reset - Resets the chain to its start state
*
- * @param p_chain pointer to a previously allocted chain
+ * @param p_chain pointer to a previously allocated chain
*/
static inline void qed_chain_reset(struct qed_chain *p_chain)
{
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index d7db17996322..d31ecaf4fdd3 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -141,12 +141,18 @@ static inline void rb_insert_color_cached(struct rb_node *node,
rb_insert_color(node, &root->rb_root);
}
-static inline void rb_erase_cached(struct rb_node *node,
- struct rb_root_cached *root)
+
+static inline struct rb_node *
+rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
{
+ struct rb_node *leftmost = NULL;
+
if (root->rb_leftmost == node)
- root->rb_leftmost = rb_next(node);
+ leftmost = root->rb_leftmost = rb_next(node);
+
rb_erase(node, &root->rb_root);
+
+ return leftmost;
}
static inline void rb_replace_node_cached(struct rb_node *victim,
@@ -158,4 +164,198 @@ static inline void rb_replace_node_cached(struct rb_node *victim,
rb_replace_node(victim, new, &root->rb_root);
}
+/*
+ * The below helper functions use 2 operators with 3 different
+ * calling conventions. The operators are related like:
+ *
+ * comp(a->key,b) < 0 := less(a,b)
+ * comp(a->key,b) > 0 := less(b,a)
+ * comp(a->key,b) == 0 := !less(a,b) && !less(b,a)
+ *
+ * If these operators define a partial order on the elements we make no
+ * guarantee on which of the elements matching the key is found. See
+ * rb_find().
+ *
+ * The reason for this is to allow the find() interface without requiring an
+ * on-stack dummy object, which might not be feasible due to object size.
+ */
+
+/**
+ * rb_add_cached() - insert @node into the leftmost cached tree @tree
+ * @node: node to insert
+ * @tree: leftmost cached tree to insert @node into
+ * @less: operator defining the (partial) node order
+ *
+ * Returns @node when it is the new leftmost, or NULL.
+ */
+static __always_inline struct rb_node *
+rb_add_cached(struct rb_node *node, struct rb_root_cached *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ bool leftmost = true;
+
+ while (*link) {
+ parent = *link;
+ if (less(node, parent)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+ leftmost = false;
+ }
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color_cached(node, tree, leftmost);
+
+ return leftmost ? node : NULL;
+}
+
+/**
+ * rb_add() - insert @node into @tree
+ * @node: node to insert
+ * @tree: tree to insert @node into
+ * @less: operator defining the (partial) node order
+ */
+static __always_inline void
+rb_add(struct rb_node *node, struct rb_root *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*link) {
+ parent = *link;
+ if (less(node, parent))
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color(node, tree);
+}
+
+/**
+ * rb_find_add() - find equivalent @node in @tree, or add @node
+ * @node: node to look-for / insert
+ * @tree: tree to search / modify
+ * @cmp: operator defining the node order
+ *
+ * Returns the rb_node matching @node, or NULL when no match is found and @node
+ * is inserted.
+ */
+static __always_inline struct rb_node *
+rb_find_add(struct rb_node *node, struct rb_root *tree,
+ int (*cmp)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *parent = NULL;
+ int c;
+
+ while (*link) {
+ parent = *link;
+ c = cmp(node, parent);
+
+ if (c < 0)
+ link = &parent->rb_left;
+ else if (c > 0)
+ link = &parent->rb_right;
+ else
+ return parent;
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color(node, tree);
+ return NULL;
+}
+
+/**
+ * rb_find() - find @key in tree @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining the node order
+ *
+ * Returns the rb_node matching @key or NULL.
+ */
+static __always_inline struct rb_node *
+rb_find(const void *key, const struct rb_root *tree,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ struct rb_node *node = tree->rb_node;
+
+ while (node) {
+ int c = cmp(key, node);
+
+ if (c < 0)
+ node = node->rb_left;
+ else if (c > 0)
+ node = node->rb_right;
+ else
+ return node;
+ }
+
+ return NULL;
+}
+
+/**
+ * rb_find_first() - find the first @key in @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ *
+ * Returns the leftmost node matching @key, or NULL.
+ */
+static __always_inline struct rb_node *
+rb_find_first(const void *key, const struct rb_root *tree,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ struct rb_node *node = tree->rb_node;
+ struct rb_node *match = NULL;
+
+ while (node) {
+ int c = cmp(key, node);
+
+ if (c <= 0) {
+ if (!c)
+ match = node;
+ node = node->rb_left;
+ } else if (c > 0) {
+ node = node->rb_right;
+ }
+ }
+
+ return match;
+}
+
+/**
+ * rb_next_match() - find the next @key in @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ *
+ * Returns the next node matching @key, or NULL.
+ */
+static __always_inline struct rb_node *
+rb_next_match(const void *key, struct rb_node *node,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ node = rb_next(node);
+ if (node && cmp(key, node))
+ node = NULL;
+ return node;
+}
+
+/**
+ * rb_for_each() - iterates a subtree matching @key
+ * @node: iterator
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ */
+#define rb_for_each(node, key, tree, cmp) \
+ for ((node) = rb_find_first((key), (tree), (cmp)); \
+ (node); (node) = rb_next_match((key), (node), (cmp)))
+
#endif /* _LINUX_RBTREE_H */
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index b36afe7b22c9..8afe886e85f1 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -63,6 +63,122 @@ struct rcu_cblist {
#define RCU_NEXT_TAIL 3
#define RCU_CBLIST_NSEGS 4
+
+/*
+ * ==NOCB Offloading state machine==
+ *
+ *
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_SOFTIRQ_ONLY |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, without holding nocb_lock. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_OFFLOADED |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, |
+ * | allowing nocb_timer to be armed. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * -----------------------------------
+ * | |
+ * v v
+ * --------------------------------------- ----------------------------------|
+ * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP |
+ * | | | |
+ * | | | |
+ * | CB kthread woke up and | | GP kthread woke up and |
+ * | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED|
+ * | Processes callbacks concurrently | | |
+ * | with rcu_core(), holding | | |
+ * | nocb_lock. | | |
+ * --------------------------------------- -----------------------------------
+ * | |
+ * -----------------------------------
+ * |
+ * v
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_CB | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | |
+ * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
+ * | handling callbacks. |
+ * ----------------------------------------------------------------------------
+ */
+
+
+
+/*
+ * ==NOCB De-Offloading state machine==
+ *
+ *
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_CB | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | |
+ * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
+ * | ignores callbacks. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_KTHREAD_CB | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | |
+ * | CB/GP kthreads and local rcu_core() handle callbacks concurrently |
+ * | holding nocb_lock. Wake up CB and GP kthreads if necessary. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * -----------------------------------
+ * | |
+ * v v
+ * ---------------------------------------------------------------------------|
+ * | |
+ * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP |
+ * | | |
+ * | GP kthread woke up and | CB kthread woke up and |
+ * | acknowledged the fact that | acknowledged the fact that |
+ * | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. |
+ * | | The CB kthread goes to sleep |
+ * | The callbacks from the target CPU | until it ever gets re-offloaded. |
+ * | will be ignored from the GP kthread | |
+ * | loop. | |
+ * ----------------------------------------------------------------------------
+ * | |
+ * -----------------------------------
+ * |
+ * v
+ * ----------------------------------------------------------------------------
+ * | 0 |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. |
+ * | Flush pending nocb_timer. Flush nocb bypass callbacks. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_SOFTIRQ_ONLY |
+ * | |
+ * | Callbacks processed by rcu_core() from softirqs or local |
+ * | rcuc kthread, without holding nocb_lock. |
+ * ----------------------------------------------------------------------------
+ */
+#define SEGCBLIST_ENABLED BIT(0)
+#define SEGCBLIST_SOFTIRQ_ONLY BIT(1)
+#define SEGCBLIST_KTHREAD_CB BIT(2)
+#define SEGCBLIST_KTHREAD_GP BIT(3)
+#define SEGCBLIST_OFFLOADED BIT(4)
+
struct rcu_segcblist {
struct rcu_head *head;
struct rcu_head **tails[RCU_CBLIST_NSEGS];
@@ -72,8 +188,8 @@ struct rcu_segcblist {
#else
long len;
#endif
- u8 enabled;
- u8 offloaded;
+ long seglen[RCU_CBLIST_NSEGS];
+ u8 flags;
};
#define RCU_SEGCBLIST_INITIALIZER(n) \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index fd02c5fa60cb..bd04f722714f 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -33,6 +33,8 @@
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
#define ulong2long(a) (*(long *)(&(a)))
+#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
+#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
/* Exported common interfaces */
void call_rcu(struct rcu_head *head, rcu_callback_t func);
@@ -110,8 +112,14 @@ static inline void rcu_user_exit(void) { }
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
+int rcu_nocb_cpu_offload(int cpu);
+int rcu_nocb_cpu_deoffload(int cpu);
+void rcu_nocb_flush_deferred_wakeup(void);
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
static inline void rcu_init_nohz(void) { }
+static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; }
+static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
+static inline void rcu_nocb_flush_deferred_wakeup(void) { }
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/**
@@ -846,19 +854,11 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
*/
#define __is_kvfree_rcu_offset(offset) ((offset) < 4096)
-/*
- * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
- */
-#define __kvfree_rcu(head, offset) \
- do { \
- BUILD_BUG_ON(!__is_kvfree_rcu_offset(offset)); \
- kvfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
- } while (0)
-
/**
* kfree_rcu() - kfree an object after a grace period.
- * @ptr: pointer to kfree
- * @rhf: the name of the struct rcu_head within the type of @ptr.
+ * @ptr: pointer to kfree for both single- and double-argument invocations.
+ * @rhf: the name of the struct rcu_head within the type of @ptr,
+ * but only for double-argument invocations.
*
* Many rcu callbacks functions just call kfree() on the base structure.
* These functions are trivial, but their size adds up, and furthermore
@@ -871,7 +871,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* Because the functions are not allowed in the low-order 4096 bytes of
* kernel virtual memory, offsets up to 4095 bytes can be accommodated.
* If the offset is larger than 4095 bytes, a compile-time error will
- * be generated in __kvfree_rcu(). If this error is triggered, you can
+ * be generated in kvfree_rcu_arg_2(). If this error is triggered, you can
* either fall back to use of call_rcu() or rearrange the structure to
* position the rcu_head structure into the first 4096 bytes.
*
@@ -881,13 +881,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* The BUILD_BUG_ON check must not involve any function calls, hence the
* checks are done in macros here.
*/
-#define kfree_rcu(ptr, rhf) \
-do { \
- typeof (ptr) ___p = (ptr); \
- \
- if (___p) \
- __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \
-} while (0)
+#define kfree_rcu kvfree_rcu
/**
* kvfree_rcu() - kvfree an object after a grace period.
@@ -919,7 +913,17 @@ do { \
kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__)
#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME
-#define kvfree_rcu_arg_2(ptr, rhf) kfree_rcu(ptr, rhf)
+#define kvfree_rcu_arg_2(ptr, rhf) \
+do { \
+ typeof (ptr) ___p = (ptr); \
+ \
+ if (___p) { \
+ BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \
+ kvfree_call_rcu(&((___p)->rhf), (rcu_callback_t)(unsigned long) \
+ (offsetof(typeof(*(ptr)), rhf))); \
+ } \
+} while (0)
+
#define kvfree_rcu_arg_1(ptr) \
do { \
typeof(ptr) ___p = (ptr); \
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
deleted file mode 100644
index 3ab1ddf151a2..000000000000
--- a/include/linux/regulator/ab8500.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
- * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
- * Daniel Willerud <daniel.willerud@stericsson.com> for ST-Ericsson
- */
-
-#ifndef __LINUX_MFD_AB8500_REGULATOR_H
-#define __LINUX_MFD_AB8500_REGULATOR_H
-
-#include <linux/platform_device.h>
-
-/* AB8500 regulators */
-enum ab8500_regulator_id {
- AB8500_LDO_AUX1,
- AB8500_LDO_AUX2,
- AB8500_LDO_AUX3,
- AB8500_LDO_INTCORE,
- AB8500_LDO_TVOUT,
- AB8500_LDO_AUDIO,
- AB8500_LDO_ANAMIC1,
- AB8500_LDO_ANAMIC2,
- AB8500_LDO_DMIC,
- AB8500_LDO_ANA,
- AB8500_NUM_REGULATORS,
-};
-
-/* AB8505 regulators */
-enum ab8505_regulator_id {
- AB8505_LDO_AUX1,
- AB8505_LDO_AUX2,
- AB8505_LDO_AUX3,
- AB8505_LDO_AUX4,
- AB8505_LDO_AUX5,
- AB8505_LDO_AUX6,
- AB8505_LDO_INTCORE,
- AB8505_LDO_ADC,
- AB8505_LDO_AUDIO,
- AB8505_LDO_ANAMIC1,
- AB8505_LDO_ANAMIC2,
- AB8505_LDO_AUX8,
- AB8505_LDO_ANA,
- AB8505_NUM_REGULATORS,
-};
-
-/* AB8500 and AB8505 register initialization */
-struct ab8500_regulator_reg_init {
- int id;
- u8 mask;
- u8 value;
-};
-
-#define INIT_REGULATOR_REGISTER(_id, _mask, _value) \
- { \
- .id = _id, \
- .mask = _mask, \
- .value = _value, \
- }
-
-/* AB8500 registers */
-enum ab8500_regulator_reg {
- AB8500_REGUREQUESTCTRL2,
- AB8500_REGUREQUESTCTRL3,
- AB8500_REGUREQUESTCTRL4,
- AB8500_REGUSYSCLKREQ1HPVALID1,
- AB8500_REGUSYSCLKREQ1HPVALID2,
- AB8500_REGUHWHPREQ1VALID1,
- AB8500_REGUHWHPREQ1VALID2,
- AB8500_REGUHWHPREQ2VALID1,
- AB8500_REGUHWHPREQ2VALID2,
- AB8500_REGUSWHPREQVALID1,
- AB8500_REGUSWHPREQVALID2,
- AB8500_REGUSYSCLKREQVALID1,
- AB8500_REGUSYSCLKREQVALID2,
- AB8500_REGUMISC1,
- AB8500_VAUDIOSUPPLY,
- AB8500_REGUCTRL1VAMIC,
- AB8500_VPLLVANAREGU,
- AB8500_VREFDDR,
- AB8500_EXTSUPPLYREGU,
- AB8500_VAUX12REGU,
- AB8500_VRF1VAUX3REGU,
- AB8500_VAUX1SEL,
- AB8500_VAUX2SEL,
- AB8500_VRF1VAUX3SEL,
- AB8500_REGUCTRL2SPARE,
- AB8500_REGUCTRLDISCH,
- AB8500_REGUCTRLDISCH2,
- AB8500_NUM_REGULATOR_REGISTERS,
-};
-
-/* AB8505 registers */
-enum ab8505_regulator_reg {
- AB8505_REGUREQUESTCTRL1,
- AB8505_REGUREQUESTCTRL2,
- AB8505_REGUREQUESTCTRL3,
- AB8505_REGUREQUESTCTRL4,
- AB8505_REGUSYSCLKREQ1HPVALID1,
- AB8505_REGUSYSCLKREQ1HPVALID2,
- AB8505_REGUHWHPREQ1VALID1,
- AB8505_REGUHWHPREQ1VALID2,
- AB8505_REGUHWHPREQ2VALID1,
- AB8505_REGUHWHPREQ2VALID2,
- AB8505_REGUSWHPREQVALID1,
- AB8505_REGUSWHPREQVALID2,
- AB8505_REGUSYSCLKREQVALID1,
- AB8505_REGUSYSCLKREQVALID2,
- AB8505_REGUVAUX4REQVALID,
- AB8505_REGUMISC1,
- AB8505_VAUDIOSUPPLY,
- AB8505_REGUCTRL1VAMIC,
- AB8505_VSMPSAREGU,
- AB8505_VSMPSBREGU,
- AB8505_VSAFEREGU, /* NOTE! PRCMU register */
- AB8505_VPLLVANAREGU,
- AB8505_EXTSUPPLYREGU,
- AB8505_VAUX12REGU,
- AB8505_VRF1VAUX3REGU,
- AB8505_VSMPSASEL1,
- AB8505_VSMPSASEL2,
- AB8505_VSMPSASEL3,
- AB8505_VSMPSBSEL1,
- AB8505_VSMPSBSEL2,
- AB8505_VSMPSBSEL3,
- AB8505_VSAFESEL1, /* NOTE! PRCMU register */
- AB8505_VSAFESEL2, /* NOTE! PRCMU register */
- AB8505_VSAFESEL3, /* NOTE! PRCMU register */
- AB8505_VAUX1SEL,
- AB8505_VAUX2SEL,
- AB8505_VRF1VAUX3SEL,
- AB8505_VAUX4REQCTRL,
- AB8505_VAUX4REGU,
- AB8505_VAUX4SEL,
- AB8505_REGUCTRLDISCH,
- AB8505_REGUCTRLDISCH2,
- AB8505_REGUCTRLDISCH3,
- AB8505_CTRLVAUX5,
- AB8505_CTRLVAUX6,
- AB8505_NUM_REGULATOR_REGISTERS,
-};
-
-/* AB8500 external regulators */
-struct ab8500_ext_regulator_cfg {
- bool hwreq; /* requires hw mode or high power mode */
-};
-
-enum ab8500_ext_regulator_id {
- AB8500_EXT_SUPPLY1,
- AB8500_EXT_SUPPLY2,
- AB8500_EXT_SUPPLY3,
- AB8500_NUM_EXT_REGULATORS,
-};
-
-/* AB8500 regulator platform data */
-struct ab8500_regulator_platform_data {
- int num_reg_init;
- struct ab8500_regulator_reg_init *reg_init;
- int num_regulator;
- struct regulator_init_data *regulator;
- int num_ext_regulator;
- struct regulator_init_data *ext_regulator;
-};
-
-#endif
diff --git a/include/linux/regulator/mt6315-regulator.h b/include/linux/regulator/mt6315-regulator.h
new file mode 100644
index 000000000000..3b80d3f3910c
--- /dev/null
+++ b/include/linux/regulator/mt6315-regulator.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6315_H
+#define __LINUX_REGULATOR_MT6315_H
+
+#define MT6315_RP 3
+#define MT6315_PP 6
+#define MT6315_SP 7
+
+enum {
+ MT6315_VBUCK1 = 0,
+ MT6315_VBUCK2,
+ MT6315_VBUCK3,
+ MT6315_VBUCK4,
+ MT6315_VBUCK_MAX,
+};
+
+/* Register */
+#define MT6315_TOP2_ELR7 0x139
+#define MT6315_TOP_TMA_KEY 0x39F
+#define MT6315_TOP_TMA_KEY_H 0x3A0
+#define MT6315_BUCK_TOP_CON0 0x1440
+#define MT6315_BUCK_TOP_CON1 0x1443
+#define MT6315_BUCK_TOP_ELR0 0x1449
+#define MT6315_BUCK_TOP_ELR2 0x144B
+#define MT6315_BUCK_TOP_ELR4 0x144D
+#define MT6315_BUCK_TOP_ELR6 0x144F
+#define MT6315_VBUCK1_DBG0 0x1499
+#define MT6315_VBUCK1_DBG4 0x149D
+#define MT6315_VBUCK2_DBG0 0x1519
+#define MT6315_VBUCK2_DBG4 0x151D
+#define MT6315_VBUCK3_DBG0 0x1599
+#define MT6315_VBUCK3_DBG4 0x159D
+#define MT6315_VBUCK4_DBG0 0x1619
+#define MT6315_VBUCK4_DBG4 0x161D
+#define MT6315_BUCK_TOP_4PHASE_ANA_CON42 0x16B1
+
+#define PROTECTION_KEY_H 0x9C
+#define PROTECTION_KEY 0xEA
+
+#endif /* __LINUX_REGULATOR_MT6315_H */
diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h
index 1bbd3014f906..ccdb5320a240 100644
--- a/include/linux/regulator/pca9450.h
+++ b/include/linux/regulator/pca9450.h
@@ -216,4 +216,11 @@ enum {
#define IRQ_THERM_105 0x02
#define IRQ_THERM_125 0x01
+/* PCA9450_REG_RESET_CTRL bits */
+#define WDOG_B_CFG_MASK 0xC0
+#define WDOG_B_CFG_NONE 0x00
+#define WDOG_B_CFG_WARM 0x40
+#define WDOG_B_CFG_COLD_LDO12 0x80
+#define WDOG_B_CFG_COLD 0xC0
+
#endif /* __LINUX_REG_PCA9450_H__ */
diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h
index 647051662174..82b211518136 100644
--- a/include/linux/remoteproc/qcom_rproc.h
+++ b/include/linux/remoteproc/qcom_rproc.h
@@ -3,8 +3,6 @@
struct notifier_block;
-#if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON)
-
/**
* enum qcom_ssr_notify_type - Startup/Shutdown events related to a remoteproc
* processor.
@@ -26,6 +24,8 @@ struct qcom_ssr_notify_data {
bool crashed;
};
+#if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON)
+
void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb);
int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb);
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 439fec7112a9..b9109efa2a5c 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -363,6 +363,25 @@ __must_check devm_reset_control_get_exclusive_released(struct device *dev,
}
/**
+ * devm_reset_control_get_optional_exclusive_released - resource managed
+ * reset_control_get_optional_exclusive_released()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed-and-optional variant of reset_control_get_exclusive_released(). For
+ * reset controllers returned from this function, reset_control_put() is called
+ * automatically on driver detach.
+ *
+ * See reset_control_get_exclusive_released() for more information.
+ */
+static inline struct reset_control *
+__must_check devm_reset_control_get_optional_exclusive_released(struct device *dev,
+ const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, false, true, false);
+}
+
+/**
* devm_reset_control_get_shared - resource managed reset_control_get_shared()
* @dev: device to be reset by the controller
* @id: reset line name
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 70085ca1a3fc..def5c62c93b3 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -213,7 +213,8 @@ struct page_vma_mapped_walk {
static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
{
- if (pvmw->pte)
+ /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
+ if (pvmw->pte && !PageHuge(pvmw->page))
pte_unmap(pvmw->pte);
if (pvmw->ptl)
spin_unlock(pvmw->ptl);
diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h
index daded9fddf36..22fc3a69b683 100644
--- a/include/linux/rpmsg/qcom_glink.h
+++ b/include/linux/rpmsg/qcom_glink.h
@@ -7,12 +7,17 @@
struct qcom_glink;
+#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK)
+void qcom_glink_ssr_notify(const char *ssr_name);
+#else
+static inline void qcom_glink_ssr_notify(const char *ssr_name) {}
+#endif
+
#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SMEM)
struct qcom_glink *qcom_glink_smem_register(struct device *parent,
struct device_node *node);
void qcom_glink_smem_unregister(struct qcom_glink *glink);
-void qcom_glink_ssr_notify(const char *ssr_name);
#else
@@ -24,7 +29,6 @@ qcom_glink_smem_register(struct device *parent,
}
static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {}
-static inline void qcom_glink_ssr_notify(const char *ssr_name) {}
#endif
#endif
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 568909449c13..bd611e26291d 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -141,6 +141,8 @@ struct rtc_device {
*/
unsigned long set_offset_nsec;
+ unsigned long features[BITS_TO_LONGS(RTC_FEATURE_CNT)];
+
time64_t range_min;
timeu64_t range_max;
time64_t start_secs;
diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h
deleted file mode 100644
index b31f2856733d..000000000000
--- a/include/linux/rtc/sirfsoc_rtciobrg.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * RTC I/O Bridge interfaces for CSR SiRFprimaII
- * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-#ifndef _SIRFSOC_RTC_IOBRG_H_
-#define _SIRFSOC_RTC_IOBRG_H_
-
-struct regmap_config;
-
-extern void sirfsoc_rtc_iobrg_besyncing(void);
-
-extern u32 sirfsoc_rtc_iobrg_readl(u32 addr);
-
-extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr);
-struct regmap *devm_regmap_init_iobg(struct device *dev,
- const struct regmap_config *config);
-
-#endif
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 3dcd617e65ae..7ce9a51ae5c0 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -128,4 +128,11 @@ do { \
1 : ({ local_irq_restore(flags); 0; }); \
})
+#ifdef arch_rwlock_is_contended
+#define rwlock_is_contended(lock) \
+ arch_rwlock_is_contended(&(lock)->raw_lock)
+#else
+#define rwlock_is_contended(lock) ((void)(lock), 0)
+#endif /* arch_rwlock_is_contended */
+
#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6e3a5eeec509..ef00bb22164c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -47,6 +47,7 @@ struct cfs_rq;
struct fs_struct;
struct futex_pi_state;
struct io_context;
+struct io_uring_task;
struct mempolicy;
struct nameidata;
struct nsproxy;
@@ -65,7 +66,6 @@ struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
-struct io_uring_task;
/*
* Task state bitmask. NOTE! These bits are also
@@ -895,6 +895,9 @@ struct task_struct {
/* CLONE_CHILD_CLEARTID: */
int __user *clear_child_tid;
+ /* PF_IO_WORKER */
+ void *pf_io_worker;
+
u64 utime;
u64 stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
@@ -1871,11 +1874,32 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
* value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling,
*/
-#ifndef CONFIG_PREEMPTION
-extern int _cond_resched(void);
+#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
+extern int __cond_resched(void);
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+
+DECLARE_STATIC_CALL(cond_resched, __cond_resched);
+
+static __always_inline int _cond_resched(void)
+{
+ return static_call_mod(cond_resched)();
+}
+
#else
+
+static inline int _cond_resched(void)
+{
+ return __cond_resched();
+}
+
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
+#else
+
static inline int _cond_resched(void) { return 0; }
-#endif
+
+#endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */
#define cond_resched() ({ \
___might_sleep(__FILE__, __LINE__, 0); \
@@ -1883,12 +1907,24 @@ static inline int _cond_resched(void) { return 0; }
})
extern int __cond_resched_lock(spinlock_t *lock);
+extern int __cond_resched_rwlock_read(rwlock_t *lock);
+extern int __cond_resched_rwlock_write(rwlock_t *lock);
#define cond_resched_lock(lock) ({ \
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
__cond_resched_lock(lock); \
})
+#define cond_resched_rwlock_read(lock) ({ \
+ __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+ __cond_resched_rwlock_read(lock); \
+})
+
+#define cond_resched_rwlock_write(lock) ({ \
+ __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+ __cond_resched_rwlock_write(lock); \
+})
+
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
@@ -1912,6 +1948,23 @@ static inline int spin_needbreak(spinlock_t *lock)
#endif
}
+/*
+ * Check if a rwlock is contended.
+ * Returns non-zero if there is another task waiting on the rwlock.
+ * Returns zero if the lock is not contended or the system / underlying
+ * rwlock implementation does not support contention detection.
+ * Technically does not depend on CONFIG_PREEMPTION, but a general need
+ * for low latency.
+ */
+static inline int rwlock_needbreak(rwlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+ return rwlock_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
@@ -1968,6 +2021,11 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
+#ifdef CONFIG_SMP
+/* Returns effective CPU energy utilization, as seen by the scheduler */
+unsigned long sched_cpu_util(int cpu, unsigned long max);
+#endif /* CONFIG_SMP */
+
#ifdef CONFIG_RSEQ
/*
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
index 7d64feafc408..ab83d85e1183 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -11,16 +11,9 @@
* priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
* tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
* values are inverted: lower p->prio value means higher priority.
- *
- * The MAX_USER_RT_PRIO value allows the actual maximum
- * RT priority to be separate from the value exported to
- * user-space. This allows kernel threads to set their
- * priority to a value higher than any user task. Note:
- * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
*/
-#define MAX_USER_RT_PRIO 100
-#define MAX_RT_PRIO MAX_USER_RT_PRIO
+#define MAX_RT_PRIO 100
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
@@ -34,15 +27,6 @@
#define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO)
/*
- * 'User priority' is the nice value converted to something we
- * can work with better when scaling various scheduler parameters,
- * it's a [ 0 ... 39 ] range.
- */
-#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
-#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
-#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
-
-/*
* Convert nice value [19,-20] to rlimit style value [1,40].
*/
static inline long nice_to_rlimit(long nice)
diff --git a/include/linux/security.h b/include/linux/security.h
index c35ea0ffccd9..8aeebd6646dc 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -145,13 +145,16 @@ extern int cap_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
-extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
-extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
-extern int cap_inode_need_killpriv(struct dentry *dentry);
-extern int cap_inode_killpriv(struct dentry *dentry);
-extern int cap_inode_getsecurity(struct inode *inode, const char *name,
- void **buffer, bool alloc);
+int cap_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags);
+int cap_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name);
+int cap_inode_need_killpriv(struct dentry *dentry);
+int cap_inode_killpriv(struct user_namespace *mnt_userns,
+ struct dentry *dentry);
+int cap_inode_getsecurity(struct user_namespace *mnt_userns,
+ struct inode *inode, const char *name, void **buffer,
+ bool alloc);
extern int cap_mmap_addr(unsigned long addr);
extern int cap_mmap_file(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags);
@@ -324,6 +327,9 @@ void security_inode_free(struct inode *inode);
int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
initxattrs initxattrs, void *fs_data);
+int security_inode_init_security_anon(struct inode *inode,
+ const struct qstr *name,
+ const struct inode *context_inode);
int security_old_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, const char **name,
void **value, size_t *len);
@@ -345,16 +351,21 @@ int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
int security_inode_permission(struct inode *inode, int mask);
int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
int security_inode_getattr(const struct path *path);
-int security_inode_setxattr(struct dentry *dentry, const char *name,
+int security_inode_setxattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
int security_inode_getxattr(struct dentry *dentry, const char *name);
int security_inode_listxattr(struct dentry *dentry);
-int security_inode_removexattr(struct dentry *dentry, const char *name);
+int security_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name);
int security_inode_need_killpriv(struct dentry *dentry);
-int security_inode_killpriv(struct dentry *dentry);
-int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc);
+int security_inode_killpriv(struct user_namespace *mnt_userns,
+ struct dentry *dentry);
+int security_inode_getsecurity(struct user_namespace *mnt_userns,
+ struct inode *inode, const char *name,
+ void **buffer, bool alloc);
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags);
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size);
void security_inode_getsecid(struct inode *inode, u32 *secid);
@@ -738,6 +749,13 @@ static inline int security_inode_init_security(struct inode *inode,
return 0;
}
+static inline int security_inode_init_security_anon(struct inode *inode,
+ const struct qstr *name,
+ const struct inode *context_inode)
+{
+ return 0;
+}
+
static inline int security_old_inode_init_security(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
@@ -831,8 +849,9 @@ static inline int security_inode_getattr(const struct path *path)
return 0;
}
-static inline int security_inode_setxattr(struct dentry *dentry,
- const char *name, const void *value, size_t size, int flags)
+static inline int security_inode_setxattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
{
return cap_inode_setxattr(dentry, name, value, size, flags);
}
@@ -852,10 +871,11 @@ static inline int security_inode_listxattr(struct dentry *dentry)
return 0;
}
-static inline int security_inode_removexattr(struct dentry *dentry,
- const char *name)
+static inline int security_inode_removexattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry,
+ const char *name)
{
- return cap_inode_removexattr(dentry, name);
+ return cap_inode_removexattr(mnt_userns, dentry, name);
}
static inline int security_inode_need_killpriv(struct dentry *dentry)
@@ -863,14 +883,18 @@ static inline int security_inode_need_killpriv(struct dentry *dentry)
return cap_inode_need_killpriv(dentry);
}
-static inline int security_inode_killpriv(struct dentry *dentry)
+static inline int security_inode_killpriv(struct user_namespace *mnt_userns,
+ struct dentry *dentry)
{
- return cap_inode_killpriv(dentry);
+ return cap_inode_killpriv(mnt_userns, dentry);
}
-static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
+static inline int security_inode_getsecurity(struct user_namespace *mnt_userns,
+ struct inode *inode,
+ const char *name, void **buffer,
+ bool alloc)
{
- return cap_inode_getsecurity(inode, name, buffer, alloc);
+ return cap_inode_getsecurity(mnt_userns, inode, name, buffer, alloc);
}
static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
deleted file mode 100644
index e0e1597ef9e6..000000000000
--- a/include/linux/sfi.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/* sfi.h Simple Firmware Interface */
-
-/*
-
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- The full GNU General Public License is included in this distribution
- in the file called LICENSE.GPL.
-
- BSD LICENSE
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#ifndef _LINUX_SFI_H
-#define _LINUX_SFI_H
-
-#include <linux/init.h>
-#include <linux/types.h>
-
-/* Table signatures reserved by the SFI specification */
-#define SFI_SIG_SYST "SYST"
-#define SFI_SIG_FREQ "FREQ"
-#define SFI_SIG_IDLE "IDLE"
-#define SFI_SIG_CPUS "CPUS"
-#define SFI_SIG_MTMR "MTMR"
-#define SFI_SIG_MRTC "MRTC"
-#define SFI_SIG_MMAP "MMAP"
-#define SFI_SIG_APIC "APIC"
-#define SFI_SIG_XSDT "XSDT"
-#define SFI_SIG_WAKE "WAKE"
-#define SFI_SIG_DEVS "DEVS"
-#define SFI_SIG_GPIO "GPIO"
-
-#define SFI_SIGNATURE_SIZE 4
-#define SFI_OEM_ID_SIZE 6
-#define SFI_OEM_TABLE_ID_SIZE 8
-
-#define SFI_NAME_LEN 16
-
-#define SFI_SYST_SEARCH_BEGIN 0x000E0000
-#define SFI_SYST_SEARCH_END 0x000FFFFF
-
-#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \
- ((ptable->header.len - sizeof(struct sfi_table_header)) / \
- (sizeof(entry_type)))
-/*
- * Table structures must be byte-packed to match the SFI specification,
- * as they are provided by the BIOS.
- */
-struct sfi_table_header {
- char sig[SFI_SIGNATURE_SIZE];
- u32 len;
- u8 rev;
- u8 csum;
- char oem_id[SFI_OEM_ID_SIZE];
- char oem_table_id[SFI_OEM_TABLE_ID_SIZE];
-} __packed;
-
-struct sfi_table_simple {
- struct sfi_table_header header;
- u64 pentry[1];
-} __packed;
-
-/* Comply with UEFI spec 2.1 */
-struct sfi_mem_entry {
- u32 type;
- u64 phys_start;
- u64 virt_start;
- u64 pages;
- u64 attrib;
-} __packed;
-
-struct sfi_cpu_table_entry {
- u32 apic_id;
-} __packed;
-
-struct sfi_cstate_table_entry {
- u32 hint; /* MWAIT hint */
- u32 latency; /* latency in ms */
-} __packed;
-
-struct sfi_apic_table_entry {
- u64 phys_addr; /* phy base addr for APIC reg */
-} __packed;
-
-struct sfi_freq_table_entry {
- u32 freq_mhz; /* in MHZ */
- u32 latency; /* transition latency in ms */
- u32 ctrl_val; /* value to write to PERF_CTL */
-} __packed;
-
-struct sfi_wake_table_entry {
- u64 phys_addr; /* pointer to where the wake vector locates */
-} __packed;
-
-struct sfi_timer_table_entry {
- u64 phys_addr; /* phy base addr for the timer */
- u32 freq_hz; /* in HZ */
- u32 irq;
-} __packed;
-
-struct sfi_rtc_table_entry {
- u64 phys_addr; /* phy base addr for the RTC */
- u32 irq;
-} __packed;
-
-struct sfi_device_table_entry {
- u8 type; /* bus type, I2C, SPI or ...*/
-#define SFI_DEV_TYPE_SPI 0
-#define SFI_DEV_TYPE_I2C 1
-#define SFI_DEV_TYPE_UART 2
-#define SFI_DEV_TYPE_HSI 3
-#define SFI_DEV_TYPE_IPC 4
-#define SFI_DEV_TYPE_SD 5
-
- u8 host_num; /* attached to host 0, 1...*/
- u16 addr;
- u8 irq;
- u32 max_freq;
- char name[SFI_NAME_LEN];
-} __packed;
-
-struct sfi_gpio_table_entry {
- char controller_name[SFI_NAME_LEN];
- u16 pin_no;
- char pin_name[SFI_NAME_LEN];
-} __packed;
-
-typedef int (*sfi_table_handler) (struct sfi_table_header *table);
-
-#ifdef CONFIG_SFI
-extern void __init sfi_init(void);
-extern int __init sfi_platform_init(void);
-extern void __init sfi_init_late(void);
-extern int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id,
- sfi_table_handler handler);
-
-extern int sfi_disabled;
-static inline void disable_sfi(void)
-{
- sfi_disabled = 1;
-}
-
-#else /* !CONFIG_SFI */
-
-static inline void sfi_init(void)
-{
-}
-
-static inline void sfi_init_late(void)
-{
-}
-
-#define sfi_disabled 0
-
-static inline int sfi_table_parse(char *signature, char *oem_id,
- char *oem_table_id,
- sfi_table_handler handler)
-{
- return -1;
-}
-
-#endif /* !CONFIG_SFI */
-
-#endif /*_LINUX_SFI_H*/
diff --git a/include/linux/sfi_acpi.h b/include/linux/sfi_acpi.h
deleted file mode 100644
index a6e555cbe05c..000000000000
--- a/include/linux/sfi_acpi.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* sfi.h Simple Firmware Interface */
-
-/*
-
- This file is provided under a dual BSD/GPLv2 license. When using or
- redistributing this file, you may do so under either license.
-
- GPL LICENSE SUMMARY
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of version 2 of the GNU General Public License as
- published by the Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- The full GNU General Public License is included in this distribution
- in the file called LICENSE.GPL.
-
- BSD LICENSE
-
- Copyright(c) 2009 Intel Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#ifndef _LINUX_SFI_ACPI_H
-#define _LINUX_SFI_ACPI_H
-
-#include <linux/acpi.h>
-#include <linux/sfi.h>
-
-#ifdef CONFIG_SFI
-extern int sfi_acpi_table_parse(char *signature, char *oem_id,
- char *oem_table_id,
- int (*handler)(struct acpi_table_header *));
-
-static inline int __init acpi_sfi_table_parse(char *signature,
- int (*handler)(struct acpi_table_header *))
-{
- if (!acpi_table_parse(signature, handler))
- return 0;
-
- return sfi_acpi_table_parse(signature, NULL, NULL, handler);
-}
-#else /* !CONFIG_SFI */
-static inline int sfi_acpi_table_parse(char *signature, char *oem_id,
- char *oem_table_id,
- int (*handler)(struct acpi_table_header *))
-{
- return -1;
-}
-
-static inline int __init acpi_sfi_table_parse(char *signature,
- int (*handler)(struct acpi_table_header *))
-{
- return acpi_table_parse(signature, handler);
-}
-#endif /* !CONFIG_SFI */
-
-#endif /*_LINUX_SFI_ACPI_H*/
diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
deleted file mode 100644
index 50161b6afb61..000000000000
--- a/include/linux/sirfsoc_dma.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SIRFSOC_DMA_H_
-#define _SIRFSOC_DMA_H_
-
-bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
-
-#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5f60c9e907c9..6d0a33d1c0db 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -430,28 +430,32 @@ enum {
/* device driver is going to provide hardware time stamp */
SKBTX_IN_PROGRESS = 1 << 2,
- /* device driver supports TX zero-copy buffers */
- SKBTX_DEV_ZEROCOPY = 1 << 3,
-
/* generate wifi status information (where possible) */
SKBTX_WIFI_STATUS = 1 << 4,
- /* This indicates at least one fragment might be overwritten
- * (as in vmsplice(), sendfile() ...)
- * If we need to compute a TX checksum, we'll need to copy
- * all frags to avoid possible bad checksum
- */
- SKBTX_SHARED_FRAG = 1 << 5,
-
/* generate software time stamp when entering packet scheduling */
SKBTX_SCHED_TSTAMP = 1 << 6,
};
-#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
SKBTX_SCHED_TSTAMP)
#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
+/* Definitions for flags in struct skb_shared_info */
+enum {
+ /* use zcopy routines */
+ SKBFL_ZEROCOPY_ENABLE = BIT(0),
+
+ /* This indicates at least one fragment might be overwritten
+ * (as in vmsplice(), sendfile() ...)
+ * If we need to compute a TX checksum, we'll need to copy
+ * all frags to avoid possible bad checksum
+ */
+ SKBFL_SHARED_FRAG = BIT(1),
+};
+
+#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
+
/*
* The callback notifies userspace to release buffers when skb DMA is done in
* lower device, the skb last reference should be 0 when calling this.
@@ -461,7 +465,8 @@ enum {
* The desc field is used to track userspace buffer index.
*/
struct ubuf_info {
- void (*callback)(struct ubuf_info *, bool zerocopy_success);
+ void (*callback)(struct sk_buff *, struct ubuf_info *,
+ bool zerocopy_success);
union {
struct {
unsigned long desc;
@@ -475,6 +480,7 @@ struct ubuf_info {
};
};
refcount_t refcnt;
+ u8 flags;
struct mmpin {
struct user_struct *user;
@@ -487,19 +493,14 @@ struct ubuf_info {
int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
void mm_unaccount_pinned_pages(struct mmpin *mmp);
-struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
-struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
- struct ubuf_info *uarg);
-
-static inline void sock_zerocopy_get(struct ubuf_info *uarg)
-{
- refcount_inc(&uarg->refcnt);
-}
+struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size);
+struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
+ struct ubuf_info *uarg);
-void sock_zerocopy_put(struct ubuf_info *uarg);
-void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
+void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
-void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
+void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
+ bool success);
int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
@@ -510,7 +511,7 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
- __u8 __unused;
+ __u8 flags;
__u8 meta_len;
__u8 nr_frags;
__u8 tx_flags;
@@ -1086,6 +1087,8 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb_around(struct sk_buff *skb,
void *data, unsigned int frag_size);
+struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
+
/**
* alloc_skb - allocate a network buffer
* @size: size to allocate
@@ -1352,8 +1355,8 @@ void
skb_flow_dissect_ct(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
- u16 *ctinfo_map,
- size_t mapsize);
+ u16 *ctinfo_map, size_t mapsize,
+ bool post_ct);
void
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
@@ -1438,11 +1441,22 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
{
- bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
+ bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE;
return is_zcopy ? skb_uarg(skb) : NULL;
}
+static inline void net_zcopy_get(struct ubuf_info *uarg)
+{
+ refcount_inc(&uarg->refcnt);
+}
+
+static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg)
+{
+ skb_shinfo(skb)->destructor_arg = uarg;
+ skb_shinfo(skb)->flags |= uarg->flags;
+}
+
static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
bool *have_ref)
{
@@ -1450,16 +1464,15 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
if (unlikely(have_ref && *have_ref))
*have_ref = false;
else
- sock_zerocopy_get(uarg);
- skb_shinfo(skb)->destructor_arg = uarg;
- skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+ net_zcopy_get(uarg);
+ skb_zcopy_init(skb, uarg);
}
}
static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
{
skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
- skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+ skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG;
}
static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
@@ -1472,33 +1485,32 @@ static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
}
-/* Release a reference on a zerocopy structure */
-static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
+static inline void net_zcopy_put(struct ubuf_info *uarg)
{
- struct ubuf_info *uarg = skb_zcopy(skb);
+ if (uarg)
+ uarg->callback(NULL, uarg, true);
+}
+static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
+{
if (uarg) {
- if (skb_zcopy_is_nouarg(skb)) {
- /* no notification callback */
- } else if (uarg->callback == sock_zerocopy_callback) {
- uarg->zerocopy = uarg->zerocopy && zerocopy;
- sock_zerocopy_put(uarg);
- } else {
- uarg->callback(uarg, zerocopy);
- }
-
- skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
+ if (uarg->callback == msg_zerocopy_callback)
+ msg_zerocopy_put_abort(uarg, have_uref);
+ else if (have_uref)
+ net_zcopy_put(uarg);
}
}
-/* Abort a zerocopy operation and revert zckey on error in send syscall */
-static inline void skb_zcopy_abort(struct sk_buff *skb)
+/* Release a reference on a zerocopy structure */
+static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
{
struct ubuf_info *uarg = skb_zcopy(skb);
if (uarg) {
- sock_zerocopy_put_abort(uarg, false);
- skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
+ if (!skb_zcopy_is_nouarg(skb))
+ uarg->callback(skb, uarg, zerocopy_success);
+
+ skb_shinfo(skb)->flags &= ~SKBFL_ZEROCOPY_FRAG;
}
}
@@ -2777,7 +2789,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
if (likely(!skb_zcopy(skb)))
return 0;
if (!skb_zcopy_is_nouarg(skb) &&
- skb_uarg(skb)->callback == sock_zerocopy_callback)
+ skb_uarg(skb)->callback == msg_zerocopy_callback)
return 0;
return skb_copy_ubufs(skb, gfp_mask);
}
@@ -2808,7 +2820,26 @@ void skb_queue_purge(struct sk_buff_head *list);
unsigned int skb_rbtree_purge(struct rb_root *root);
-void *netdev_alloc_frag(unsigned int fragsz);
+void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
+
+/**
+ * netdev_alloc_frag - allocate a page fragment
+ * @fragsz: fragment size
+ *
+ * Allocates a frag from a page for receive buffer.
+ * Uses GFP_ATOMIC allocations.
+ */
+static inline void *netdev_alloc_frag(unsigned int fragsz)
+{
+ return __netdev_alloc_frag_align(fragsz, ~0u);
+}
+
+static inline void *netdev_alloc_frag_align(unsigned int fragsz,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __netdev_alloc_frag_align(fragsz, -align);
+}
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
gfp_t gfp_mask);
@@ -2867,7 +2898,20 @@ static inline void skb_free_frag(void *addr)
page_frag_free(addr);
}
-void *napi_alloc_frag(unsigned int fragsz);
+void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
+
+static inline void *napi_alloc_frag(unsigned int fragsz)
+{
+ return __napi_alloc_frag_align(fragsz, ~0u);
+}
+
+static inline void *napi_alloc_frag_align(unsigned int fragsz,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __napi_alloc_frag_align(fragsz, -align);
+}
+
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
unsigned int length, gfp_t gfp_mask);
static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
@@ -2877,7 +2921,7 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
}
void napi_consume_skb(struct sk_buff *skb, int budget);
-void __kfree_skb_flush(void);
+void napi_skb_free_stolen_head(struct sk_buff *skb);
void __kfree_skb_defer(struct sk_buff *skb);
/**
@@ -2929,12 +2973,28 @@ static inline struct page *dev_alloc_page(void)
}
/**
+ * dev_page_is_reusable - check whether a page can be reused for network Rx
+ * @page: the page to test
+ *
+ * A page shouldn't be considered for reusing/recycling if it was allocated
+ * under memory pressure or at a distant memory node.
+ *
+ * Returns false if this page should be returned to page allocator, true
+ * otherwise.
+ */
+static inline bool dev_page_is_reusable(const struct page *page)
+{
+ return likely(page_to_nid(page) == numa_mem_id() &&
+ !page_is_pfmemalloc(page));
+}
+
+/**
* skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
* @page: The page that was allocated from skb_alloc_page
* @skb: The skb that may need pfmemalloc set
*/
-static inline void skb_propagate_pfmemalloc(struct page *page,
- struct sk_buff *skb)
+static inline void skb_propagate_pfmemalloc(const struct page *page,
+ struct sk_buff *skb)
{
if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
@@ -3324,7 +3384,7 @@ static inline int skb_linearize(struct sk_buff *skb)
static inline bool skb_has_shared_frag(const struct sk_buff *skb)
{
return skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
+ skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG;
}
/**
@@ -3849,7 +3909,7 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
void skb_complete_tx_timestamp(struct sk_buff *skb,
struct skb_shared_hwtstamps *hwtstamps);
-void __skb_tstamp_tx(struct sk_buff *orig_skb,
+void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb,
struct skb_shared_hwtstamps *hwtstamps,
struct sock *sk, int tstype);
@@ -4611,6 +4671,11 @@ static inline void skb_reset_redirect(struct sk_buff *skb)
#endif
}
+static inline bool skb_csum_is_sctp(struct sk_buff *skb)
+{
+ return skb->csum_not_inet;
+}
+
static inline void skb_set_kcov_handle(struct sk_buff *skb,
const u64 kcov_handle)
{
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index fec0c5ac1c4f..8edbbf5f2f93 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -390,7 +390,6 @@ static inline struct sk_psock *sk_psock_get(struct sock *sk)
}
void sk_psock_stop(struct sock *sk, struct sk_psock *psock);
-void sk_psock_destroy(struct rcu_head *rcu);
void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index be4ba5867ac5..7ae604076767 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -186,6 +186,8 @@ void kfree(const void *);
void kfree_sensitive(const void *);
size_t __ksize(const void *);
size_t ksize(const void *);
+bool kmem_valid_obj(void *object);
+void kmem_dump_obj(void *object);
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 9eb430c163c2..3aa5e1e73ab6 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_SLAB_DEF_H
#define _LINUX_SLAB_DEF_H
+#include <linux/kfence.h>
#include <linux/reciprocal_div.h>
/*
@@ -114,6 +115,8 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache,
static inline int objs_per_slab_page(const struct kmem_cache *cache,
const struct page *page)
{
+ if (is_kfence_address(page_address(page)))
+ return 1;
return cache->num;
}
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 1be0ed5befa1..dcde82a4434c 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -7,6 +7,7 @@
*
* (C) 2007 SGI, Christoph Lameter
*/
+#include <linux/kfence.h>
#include <linux/kobject.h>
#include <linux/reciprocal_div.h>
@@ -185,6 +186,8 @@ static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
const struct page *page, void *obj)
{
+ if (is_kfence_address(obj))
+ return 0;
return __obj_to_index(cache, page_address(page), obj);
}
diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h
index 8e884e0dda0a..f2b768852777 100644
--- a/include/linux/soc/brcmstb/brcmstb.h
+++ b/include/linux/soc/brcmstb/brcmstb.h
@@ -2,6 +2,8 @@
#ifndef __BRCMSTB_SOC_H
#define __BRCMSTB_SOC_H
+#include <linux/kconfig.h>
+
static inline u32 BRCM_ID(u32 reg)
{
return reg >> 28 ? reg >> 16 : reg >> 8;
@@ -12,6 +14,8 @@ static inline u32 BRCM_REV(u32 reg)
return reg & 0xff;
}
+#if IS_ENABLED(CONFIG_SOC_BRCMSTB)
+
/*
* Helper functions for getting family or product id from the
* SoC driver.
@@ -19,4 +23,16 @@ static inline u32 BRCM_REV(u32 reg)
u32 brcmstb_get_family_id(void);
u32 brcmstb_get_product_id(void);
+#else
+static inline u32 brcmstb_get_family_id(void)
+{
+ return 0;
+}
+
+static inline u32 brcmstb_get_product_id(void)
+{
+ return 0;
+}
+#endif
+
#endif /* __BRCMSTB_SOC_H */
diff --git a/include/linux/soc/marvell/octeontx2/asm.h b/include/linux/soc/marvell/octeontx2/asm.h
index ae2279fe830a..28c04d918f0f 100644
--- a/include/linux/soc/marvell/octeontx2/asm.h
+++ b/include/linux/soc/marvell/octeontx2/asm.h
@@ -22,8 +22,16 @@
: [rs]"r" (ioaddr)); \
(result); \
})
+#define cn10k_lmt_flush(val, addr) \
+({ \
+ __asm__ volatile(".cpu generic+lse\n" \
+ "steor %x[rf],[%[rs]]" \
+ : [rf]"+r"(val) \
+ : [rs]"r"(addr)); \
+})
#else
#define otx2_lmt_flush(ioaddr) ({ 0; })
+#define cn10k_lmt_flush(val, addr) ({ addr = val; })
#endif
#endif /* __SOC_OTX2_ASM_H */
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
index e7842debc05d..4615a228da51 100644
--- a/include/linux/soc/mediatek/infracfg.h
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -123,6 +123,14 @@
#define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22)
#define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23)
+#define MT8167_TOP_AXI_PROT_EN_MM_EMI BIT(1)
+#define MT8167_TOP_AXI_PROT_EN_MCU_MFG BIT(2)
+#define MT8167_TOP_AXI_PROT_EN_CONN_EMI BIT(4)
+#define MT8167_TOP_AXI_PROT_EN_MFG_EMI BIT(5)
+#define MT8167_TOP_AXI_PROT_EN_CONN_MCU BIT(8)
+#define MT8167_TOP_AXI_PROT_EN_MCU_CONN BIT(9)
+#define MT8167_TOP_AXI_PROT_EN_MCU_MM BIT(11)
+
#define MT2701_TOP_AXI_PROT_EN_MM_M0 BIT(1)
#define MT2701_TOP_AXI_PROT_EN_CONN_M BIT(2)
#define MT2701_TOP_AXI_PROT_EN_CONN_S BIT(8)
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index 8e9996610978..ac6b5f3cba95 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -280,16 +280,4 @@ int cmdq_pkt_finalize(struct cmdq_pkt *pkt);
int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
void *data);
-/**
- * cmdq_pkt_flush() - trigger CMDQ to execute the CMDQ packet
- * @pkt: the CMDQ packet
- *
- * Return: 0 for success; else the error code is returned
- *
- * Trigger CMDQ to execute the CMDQ packet. Note that this is a
- * synchronous flush function. When the function returned, the recorded
- * commands have been done.
- */
-int cmdq_pkt_flush(struct cmdq_pkt *pkt);
-
#endif /* __MTK_CMDQ_H__ */
diff --git a/include/linux/soc/mediatek/mtk-mutex.h b/include/linux/soc/mediatek/mtk-mutex.h
new file mode 100644
index 000000000000..6fe4ffbde290
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk-mutex.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ */
+
+#ifndef MTK_MUTEX_H
+#define MTK_MUTEX_H
+
+struct regmap;
+struct device;
+struct mtk_mutex;
+
+struct mtk_mutex *mtk_mutex_get(struct device *dev);
+int mtk_mutex_prepare(struct mtk_mutex *mutex);
+void mtk_mutex_add_comp(struct mtk_mutex *mutex,
+ enum mtk_ddp_comp_id id);
+void mtk_mutex_enable(struct mtk_mutex *mutex);
+void mtk_mutex_disable(struct mtk_mutex *mutex);
+void mtk_mutex_remove_comp(struct mtk_mutex *mutex,
+ enum mtk_ddp_comp_id id);
+void mtk_mutex_unprepare(struct mtk_mutex *mutex);
+void mtk_mutex_put(struct mtk_mutex *mutex);
+void mtk_mutex_acquire(struct mtk_mutex *mutex);
+void mtk_mutex_release(struct mtk_mutex *mutex);
+
+#endif /* MTK_MUTEX_H */
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 3db6797ba6ff..64fc582ae415 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -29,6 +29,7 @@
#define LLCC_AUDHW 22
#define LLCC_NPU 23
#define LLCC_WLHW 24
+#define LLCC_CVP 28
#define LLCC_MODPE 29
#define LLCC_APTCM 30
#define LLCC_WRCACHE 31
@@ -79,6 +80,7 @@ struct llcc_edac_reg_data {
* @bitmap: Bit map to track the active slice ids
* @offsets: Pointer to the bank offsets array
* @ecc_irq: interrupt for llcc cache error detection and reporting
+ * @major_version: Indicates the LLCC major version
*/
struct llcc_drv_data {
struct regmap *regmap;
@@ -91,6 +93,7 @@ struct llcc_drv_data {
unsigned long *bitmap;
u32 *offsets;
int ecc_irq;
+ u32 major_version;
};
#if IS_ENABLED(CONFIG_QCOM_LLCC)
diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h
index e600baec6825..afd47217996b 100644
--- a/include/linux/soc/qcom/mdt_loader.h
+++ b/include/linux/soc/qcom/mdt_loader.h
@@ -11,6 +11,8 @@
struct device;
struct firmware;
+#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
+
ssize_t qcom_mdt_get_size(const struct firmware *fw);
int qcom_mdt_load(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id, void *mem_region,
@@ -23,4 +25,37 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
phys_addr_t *reloc_base);
void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len);
+#else /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */
+
+static inline ssize_t qcom_mdt_get_size(const struct firmware *fw)
+{
+ return -ENODEV;
+}
+
+static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id,
+ void *mem_region, phys_addr_t mem_phys,
+ size_t mem_size, phys_addr_t *reloc_base)
+{
+ return -ENODEV;
+}
+
+static inline int qcom_mdt_load_no_init(struct device *dev,
+ const struct firmware *fw,
+ const char *fw_name, int pas_id,
+ void *mem_region, phys_addr_t mem_phys,
+ size_t mem_size,
+ phys_addr_t *reloc_base)
+{
+ return -ENODEV;
+}
+
+static inline void *qcom_mdt_read_metadata(const struct firmware *fw,
+ size_t *data_len)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+#endif /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */
+
#endif
diff --git a/include/linux/sony-laptop.h b/include/linux/sony-laptop.h
index 374d0fdb0743..1e3c92feea6e 100644
--- a/include/linux/sony-laptop.h
+++ b/include/linux/sony-laptop.h
@@ -31,7 +31,7 @@
#if IS_ENABLED(CONFIG_SONY_LAPTOP)
int sony_pic_camera_command(int command, u8 value);
#else
-static inline int sony_pic_camera_command(int command, u8 value) { return 0; };
+static inline int sony_pic_camera_command(int command, u8 value) { return 0; }
#endif
#endif /* __KERNEL__ */
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index f0b01b728640..d08039d65825 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -1005,6 +1005,8 @@ int sdw_bus_exit_clk_stop(struct sdw_bus *bus);
int sdw_read(struct sdw_slave *slave, u32 addr);
int sdw_write(struct sdw_slave *slave, u32 addr, u8 value);
+int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value);
+int sdw_read_no_pm(struct sdw_slave *slave, u32 addr);
int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
diff --git a/include/linux/spi/ifx_modem.h b/include/linux/spi/ifx_modem.h
deleted file mode 100644
index 6d19b09139d0..000000000000
--- a/include/linux/spi/ifx_modem.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_IFX_MODEM_H
-#define LINUX_IFX_MODEM_H
-
-struct ifx_modem_platform_data {
- unsigned short tx_pwr; /* modem power threshold */
- unsigned char modem_type; /* Modem type */
- unsigned long max_hz; /* max SPI frequency */
- unsigned short use_dma:1; /* spi protocol driver supplies
- dma-able addrs */
-};
-#define IFX_MODEM_6160 1
-#define IFX_MODEM_6260 2
-
-#endif
diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h
deleted file mode 100644
index f237b2d062e9..000000000000
--- a/include/linux/spi/lms283gf05.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * lms283gf05.h - Platform glue for Samsung LMS283GF05 LCD
- *
- * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
-*/
-
-#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_
-#define _INCLUDE_LINUX_SPI_LMS283GF05_H_
-
-struct lms283gf05_pdata {
- unsigned long reset_gpio;
- bool reset_inverted;
-};
-
-#endif /* _INCLUDE_LINUX_SPI_LMS283GF05_H_ */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 159463cc659c..2b65c9edc34e 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -311,6 +311,9 @@ void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
bool spi_mem_default_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op);
+bool spi_mem_dtr_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op);
+
#else
static inline int
spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
@@ -334,6 +337,12 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
return false;
}
+static inline
+bool spi_mem_dtr_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ return false;
+}
#endif /* CONFIG_SPI_MEM */
int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index aa09fdc8042d..592897fa4f03 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -6,6 +6,7 @@
#ifndef __LINUX_SPI_H
#define __LINUX_SPI_H
+#include <linux/bits.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
@@ -15,6 +16,8 @@
#include <linux/gpio/consumer.h>
#include <linux/ptp_clock_kernel.h>
+#include <uapi/linux/spi/spi.h>
+
struct dma_chan;
struct property_entry;
struct spi_controller;
@@ -164,28 +167,19 @@ struct spi_device {
u8 chip_select;
u8 bits_per_word;
bool rt;
+#define SPI_NO_TX BIT(31) /* no transmit wire */
+#define SPI_NO_RX BIT(30) /* no receive wire */
+ /*
+ * All bits defined above should be covered by SPI_MODE_KERNEL_MASK.
+ * The SPI_MODE_KERNEL_MASK has the SPI_MODE_USER_MASK counterpart,
+ * which is defined in 'include/uapi/linux/spi/spi.h'.
+ * The bits defined here are from bit 31 downwards, while in
+ * SPI_MODE_USER_MASK are from 0 upwards.
+ * These bits must not overlap. A static assert check should make sure of that.
+ * If adding extra bits, make sure to decrease the bit index below as well.
+ */
+#define SPI_MODE_KERNEL_MASK (~(BIT(30) - 1))
u32 mode;
-#define SPI_CPHA 0x01 /* clock phase */
-#define SPI_CPOL 0x02 /* clock polarity */
-#define SPI_MODE_0 (0|0) /* (original MicroWire) */
-#define SPI_MODE_1 (0|SPI_CPHA)
-#define SPI_MODE_2 (SPI_CPOL|0)
-#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
-#define SPI_MODE_X_MASK (SPI_CPOL|SPI_CPHA)
-#define SPI_CS_HIGH 0x04 /* chipselect active high? */
-#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
-#define SPI_3WIRE 0x10 /* SI/SO signals shared */
-#define SPI_LOOP 0x20 /* loopback mode */
-#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */
-#define SPI_READY 0x80 /* slave pulls low to pause */
-#define SPI_TX_DUAL 0x100 /* transmit with 2 wires */
-#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
-#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
-#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
-#define SPI_CS_WORD 0x1000 /* toggle cs after each word */
-#define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */
-#define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */
-#define SPI_3WIRE_HIZ 0x8000 /* high impedance turnaround */
int irq;
void *controller_state;
void *controller_data;
@@ -208,6 +202,10 @@ struct spi_device {
*/
};
+/* Make sure that SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK don't overlap */
+static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0,
+ "SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap");
+
static inline struct spi_device *to_spi_device(struct device *dev)
{
return dev ? container_of(dev, struct spi_device, dev) : NULL;
@@ -624,7 +622,7 @@ struct spi_controller {
/*
* These hooks are for drivers that use a generic implementation
- * of transfer_one_message() provied by the core.
+ * of transfer_one_message() provided by the core.
*/
void (*set_cs)(struct spi_device *spi, bool enable);
int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi,
@@ -827,6 +825,7 @@ extern void spi_res_release(struct spi_controller *ctlr,
* transfer. If 0 the default (from @spi_device) is used.
* @bits_per_word: select a bits_per_word other than the device default
* for this transfer. If 0 the default (from @spi_device) is used.
+ * @dummy_data: indicates transfer is dummy bytes transfer.
* @cs_change: affects chipselect after this transfer completes
* @cs_change_delay: delay between cs deassert and assert when
* @cs_change is set and @spi_transfer is not the last in @spi_message
@@ -939,6 +938,7 @@ struct spi_transfer {
struct sg_table tx_sg;
struct sg_table rx_sg;
+ unsigned dummy_data:1;
unsigned cs_change:1;
unsigned tx_nbits:3;
unsigned rx_nbits:3;
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index e432cc92c73d..a0895bbf71ce 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -60,6 +60,9 @@ void cleanup_srcu_struct(struct srcu_struct *ssp);
int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
void synchronize_srcu(struct srcu_struct *ssp);
+unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
+unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
+bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 5a5a1941ca15..0e0cf4d6a72a 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -15,7 +15,8 @@
struct srcu_struct {
short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
- short srcu_idx; /* Current reader array element. */
+ unsigned short srcu_idx; /* Current reader array element in bit 0x2. */
+ unsigned short srcu_idx_max; /* Furthest future srcu_idx request. */
u8 srcu_gp_running; /* GP workqueue running? */
u8 srcu_gp_waiting; /* GP waiting for readers? */
struct swait_queue_head srcu_wq;
@@ -59,7 +60,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
- idx = READ_ONCE(ssp->srcu_idx);
+ idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
return idx;
}
@@ -80,7 +81,7 @@ static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
{
int idx;
- idx = READ_ONCE(ssp->srcu_idx) & 0x1;
+ idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
tt, tf, idx,
READ_ONCE(ssp->srcu_lock_nesting[!idx]),
diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h
index 31593b34608e..15ba0df1ee0d 100644
--- a/include/linux/ssb/ssb_driver_gige.h
+++ b/include/linux/ssb/ssb_driver_gige.h
@@ -76,7 +76,7 @@ static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev)
if (dev)
return !!(dev->dev->bus->sprom.boardflags_lo &
SSB_GIGE_BFL_ROBOSWITCH);
- return 0;
+ return false;
}
/* Returns whether we can only do one DMA at once. */
@@ -86,7 +86,7 @@ static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev)
if (dev)
return ((dev->dev->bus->chip_id == 0x4785) &&
(dev->dev->bus->chip_rev < 2));
- return 0;
+ return false;
}
/* Returns whether we must flush posted writes. */
@@ -159,7 +159,7 @@ static inline void ssb_gige_exit(void)
static inline bool pdev_is_ssb_gige_core(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev)
{
@@ -167,19 +167,19 @@ static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev)
}
static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev)
{
- return 0;
+ return false;
}
static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
{
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 24d49c732341..6bb4bc1a5f54 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -21,4 +21,13 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries);
+#ifdef CONFIG_STACKDEPOT
+int stack_depot_init(void);
+#else
+static inline int stack_depot_init(void)
+{
+ return 0;
+}
+#endif /* CONFIG_STACKDEPOT */
+
#endif
diff --git a/include/linux/static_call.h b/include/linux/static_call.h
index 695da4c9b338..85ecc789f4ff 100644
--- a/include/linux/static_call.h
+++ b/include/linux/static_call.h
@@ -107,26 +107,10 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool
#define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name)
-/*
- * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
- * the symbol table so that objtool can reference it when it generates the
- * .static_call_sites section.
- */
-#define __static_call(name) \
-({ \
- __ADDRESSABLE(STATIC_CALL_KEY(name)); \
- &STATIC_CALL_TRAMP(name); \
-})
-
#else
#define STATIC_CALL_TRAMP_ADDR(name) NULL
#endif
-
-#define DECLARE_STATIC_CALL(name, func) \
- extern struct static_call_key STATIC_CALL_KEY(name); \
- extern typeof(func) STATIC_CALL_TRAMP(name);
-
#define static_call_update(name, func) \
({ \
BUILD_BUG_ON(!__same_type(*(func), STATIC_CALL_TRAMP(name))); \
@@ -154,17 +138,25 @@ struct static_call_key {
};
};
+/* For finding the key associated with a trampoline */
+struct static_call_tramp_key {
+ s32 tramp;
+ s32 key;
+};
+
extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
extern int static_call_mod_init(struct module *mod);
extern int static_call_text_reserved(void *start, void *end);
-#define DEFINE_STATIC_CALL(name, _func) \
+extern long __static_call_return0(void);
+
+#define __DEFINE_STATIC_CALL(name, _func, _func_init) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
- .func = _func, \
+ .func = _func_init, \
.type = 1, \
}; \
- ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
+ ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init)
#define DEFINE_STATIC_CALL_NULL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
@@ -174,17 +166,23 @@ extern int static_call_text_reserved(void *start, void *end);
}; \
ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
-#define static_call(name) __static_call(name)
#define static_call_cond(name) (void)__static_call(name)
#define EXPORT_STATIC_CALL(name) \
EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
-
#define EXPORT_STATIC_CALL_GPL(name) \
EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
+/* Leave the key unexported, so modules can't change static call targets: */
+#define EXPORT_STATIC_CALL_TRAMP(name) \
+ EXPORT_SYMBOL(STATIC_CALL_TRAMP(name)); \
+ ARCH_ADD_TRAMP_KEY(name)
+#define EXPORT_STATIC_CALL_TRAMP_GPL(name) \
+ EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name)); \
+ ARCH_ADD_TRAMP_KEY(name)
+
#elif defined(CONFIG_HAVE_STATIC_CALL)
static inline int static_call_init(void) { return 0; }
@@ -193,12 +191,12 @@ struct static_call_key {
void *func;
};
-#define DEFINE_STATIC_CALL(name, _func) \
+#define __DEFINE_STATIC_CALL(name, _func, _func_init) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
- .func = _func, \
+ .func = _func_init, \
}; \
- ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
+ ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init)
#define DEFINE_STATIC_CALL_NULL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
@@ -207,7 +205,6 @@ struct static_call_key {
}; \
ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
-#define static_call(name) __static_call(name)
#define static_call_cond(name) (void)__static_call(name)
static inline
@@ -224,14 +221,24 @@ static inline int static_call_text_reserved(void *start, void *end)
return 0;
}
+static inline long __static_call_return0(void)
+{
+ return 0;
+}
+
#define EXPORT_STATIC_CALL(name) \
EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
-
#define EXPORT_STATIC_CALL_GPL(name) \
EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
+/* Leave the key unexported, so modules can't change static call targets: */
+#define EXPORT_STATIC_CALL_TRAMP(name) \
+ EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
+#define EXPORT_STATIC_CALL_TRAMP_GPL(name) \
+ EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
+
#else /* Generic implementation */
static inline int static_call_init(void) { return 0; }
@@ -240,10 +247,15 @@ struct static_call_key {
void *func;
};
-#define DEFINE_STATIC_CALL(name, _func) \
+static inline long __static_call_return0(void)
+{
+ return 0;
+}
+
+#define __DEFINE_STATIC_CALL(name, _func, _func_init) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
- .func = _func, \
+ .func = _func_init, \
}
#define DEFINE_STATIC_CALL_NULL(name, _func) \
@@ -252,9 +264,6 @@ struct static_call_key {
.func = NULL, \
}
-#define static_call(name) \
- ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
-
static inline void __static_call_nop(void) { }
/*
@@ -295,4 +304,10 @@ static inline int static_call_text_reserved(void *start, void *end)
#endif /* CONFIG_HAVE_STATIC_CALL */
+#define DEFINE_STATIC_CALL(name, _func) \
+ __DEFINE_STATIC_CALL(name, _func, _func)
+
+#define DEFINE_STATIC_CALL_RET0(name, _func) \
+ __DEFINE_STATIC_CALL(name, _func, __static_call_return0)
+
#endif /* _LINUX_STATIC_CALL_H */
diff --git a/include/linux/static_call_types.h b/include/linux/static_call_types.h
index 89135bb35bf7..ae5662d368b9 100644
--- a/include/linux/static_call_types.h
+++ b/include/linux/static_call_types.h
@@ -4,11 +4,13 @@
#include <linux/types.h>
#include <linux/stringify.h>
+#include <linux/compiler.h>
#define STATIC_CALL_KEY_PREFIX __SCK__
#define STATIC_CALL_KEY_PREFIX_STR __stringify(STATIC_CALL_KEY_PREFIX)
#define STATIC_CALL_KEY_PREFIX_LEN (sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
#define STATIC_CALL_KEY(name) __PASTE(STATIC_CALL_KEY_PREFIX, name)
+#define STATIC_CALL_KEY_STR(name) __stringify(STATIC_CALL_KEY(name))
#define STATIC_CALL_TRAMP_PREFIX __SCT__
#define STATIC_CALL_TRAMP_PREFIX_STR __stringify(STATIC_CALL_TRAMP_PREFIX)
@@ -32,4 +34,52 @@ struct static_call_site {
s32 key;
};
+#define DECLARE_STATIC_CALL(name, func) \
+ extern struct static_call_key STATIC_CALL_KEY(name); \
+ extern typeof(func) STATIC_CALL_TRAMP(name);
+
+#ifdef CONFIG_HAVE_STATIC_CALL
+
+#define __raw_static_call(name) (&STATIC_CALL_TRAMP(name))
+
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+
+/*
+ * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
+ * the symbol table so that objtool can reference it when it generates the
+ * .static_call_sites section.
+ */
+#define __STATIC_CALL_ADDRESSABLE(name) \
+ __ADDRESSABLE(STATIC_CALL_KEY(name))
+
+#define __static_call(name) \
+({ \
+ __STATIC_CALL_ADDRESSABLE(name); \
+ __raw_static_call(name); \
+})
+
+#else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
+
+#define __STATIC_CALL_ADDRESSABLE(name)
+#define __static_call(name) __raw_static_call(name)
+
+#endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
+
+#ifdef MODULE
+#define __STATIC_CALL_MOD_ADDRESSABLE(name)
+#define static_call_mod(name) __raw_static_call(name)
+#else
+#define __STATIC_CALL_MOD_ADDRESSABLE(name) __STATIC_CALL_ADDRESSABLE(name)
+#define static_call_mod(name) __static_call(name)
+#endif
+
+#define static_call(name) __static_call(name)
+
+#else
+
+#define static_call(name) \
+ ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
+
+#endif /* CONFIG_HAVE_STATIC_CALL */
+
#endif /* _STATIC_CALL_TYPES_H */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 15ca6b4167cc..a302982de2d7 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -202,5 +202,6 @@ struct plat_stmmacenet_data {
bool vlan_fail_q_en;
u8 vlan_fail_q;
unsigned int eee_usecs_rate;
+ struct pci_dev *pdev;
};
#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index 4fcfb56abcf5..9521d8cab18e 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -266,287 +266,7 @@ void __read_overflow3(void) __compiletime_error("detected read beyond size of ob
void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
-
-#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
-extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
-extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
-extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
-extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
-extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
-extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
-extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
-extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
-extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
-extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
-#else
-#define __underlying_memchr __builtin_memchr
-#define __underlying_memcmp __builtin_memcmp
-#define __underlying_memcpy __builtin_memcpy
-#define __underlying_memmove __builtin_memmove
-#define __underlying_memset __builtin_memset
-#define __underlying_strcat __builtin_strcat
-#define __underlying_strcpy __builtin_strcpy
-#define __underlying_strlen __builtin_strlen
-#define __underlying_strncat __builtin_strncat
-#define __underlying_strncpy __builtin_strncpy
-#endif
-
-__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 1);
- if (__builtin_constant_p(size) && p_size < size)
- __write_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __underlying_strncpy(p, q, size);
-}
-
-__FORTIFY_INLINE char *strcat(char *p, const char *q)
-{
- size_t p_size = __builtin_object_size(p, 1);
- if (p_size == (size_t)-1)
- return __underlying_strcat(p, q);
- if (strlcat(p, q, p_size) >= p_size)
- fortify_panic(__func__);
- return p;
-}
-
-__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
-{
- __kernel_size_t ret;
- size_t p_size = __builtin_object_size(p, 1);
-
- /* Work around gcc excess stack consumption issue */
- if (p_size == (size_t)-1 ||
- (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
- return __underlying_strlen(p);
- ret = strnlen(p, p_size);
- if (p_size <= ret)
- fortify_panic(__func__);
- return ret;
-}
-
-extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
-__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
-{
- size_t p_size = __builtin_object_size(p, 1);
- __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
- if (p_size <= ret && maxlen != ret)
- fortify_panic(__func__);
- return ret;
-}
-
-/* defined after fortified strlen to reuse it */
-extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
-__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
-{
- size_t ret;
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __real_strlcpy(p, q, size);
- ret = strlen(q);
- if (size) {
- size_t len = (ret >= size) ? size - 1 : ret;
- if (__builtin_constant_p(len) && len >= p_size)
- __write_overflow();
- if (len >= p_size)
- fortify_panic(__func__);
- __underlying_memcpy(p, q, len);
- p[len] = '\0';
- }
- return ret;
-}
-
-/* defined after fortified strnlen to reuse it */
-extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
-__FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size)
-{
- size_t len;
- /* Use string size rather than possible enclosing struct size. */
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
-
- /* If we cannot get size of p and q default to call strscpy. */
- if (p_size == (size_t) -1 && q_size == (size_t) -1)
- return __real_strscpy(p, q, size);
-
- /*
- * If size can be known at compile time and is greater than
- * p_size, generate a compile time write overflow error.
- */
- if (__builtin_constant_p(size) && size > p_size)
- __write_overflow();
-
- /*
- * This call protects from read overflow, because len will default to q
- * length if it smaller than size.
- */
- len = strnlen(q, size);
- /*
- * If len equals size, we will copy only size bytes which leads to
- * -E2BIG being returned.
- * Otherwise we will copy len + 1 because of the final '\O'.
- */
- len = len == size ? size : len + 1;
-
- /*
- * Generate a runtime write overflow error if len is greater than
- * p_size.
- */
- if (len > p_size)
- fortify_panic(__func__);
-
- /*
- * We can now safely call vanilla strscpy because we are protected from:
- * 1. Read overflow thanks to call to strnlen().
- * 2. Write overflow thanks to above ifs.
- */
- return __real_strscpy(p, q, len);
-}
-
-/* defined after fortified strlen and strnlen to reuse them */
-__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
-{
- size_t p_len, copy_len;
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __underlying_strncat(p, q, count);
- p_len = strlen(p);
- copy_len = strnlen(q, count);
- if (p_size < p_len + copy_len + 1)
- fortify_panic(__func__);
- __underlying_memcpy(p + p_len, q, copy_len);
- p[p_len + copy_len] = '\0';
- return p;
-}
-
-__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __write_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __underlying_memset(p, c, size);
-}
-
-__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (__builtin_constant_p(size)) {
- if (p_size < size)
- __write_overflow();
- if (q_size < size)
- __read_overflow2();
- }
- if (p_size < size || q_size < size)
- fortify_panic(__func__);
- return __underlying_memcpy(p, q, size);
-}
-
-__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (__builtin_constant_p(size)) {
- if (p_size < size)
- __write_overflow();
- if (q_size < size)
- __read_overflow2();
- }
- if (p_size < size || q_size < size)
- fortify_panic(__func__);
- return __underlying_memmove(p, q, size);
-}
-
-extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
-__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __read_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __real_memscan(p, c, size);
-}
-
-__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
- if (__builtin_constant_p(size)) {
- if (p_size < size)
- __read_overflow();
- if (q_size < size)
- __read_overflow2();
- }
- if (p_size < size || q_size < size)
- fortify_panic(__func__);
- return __underlying_memcmp(p, q, size);
-}
-
-__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __read_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __underlying_memchr(p, c, size);
-}
-
-void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
-__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __read_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __real_memchr_inv(p, c, size);
-}
-
-extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
-__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
-{
- size_t p_size = __builtin_object_size(p, 0);
- if (__builtin_constant_p(size) && p_size < size)
- __read_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __real_kmemdup(p, size, gfp);
-}
-
-/* defined after fortified strlen and memcpy to reuse them */
-__FORTIFY_INLINE char *strcpy(char *p, const char *q)
-{
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
- size_t size;
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __underlying_strcpy(p, q);
- size = strlen(q) + 1;
- /* test here to use the more stringent object size */
- if (p_size < size)
- fortify_panic(__func__);
- memcpy(p, q, size);
- return p;
-}
-
-/* Don't use these outside the FORITFY_SOURCE implementation */
-#undef __underlying_memchr
-#undef __underlying_memcmp
-#undef __underlying_memcpy
-#undef __underlying_memmove
-#undef __underlying_memset
-#undef __underlying_strcat
-#undef __underlying_strcpy
-#undef __underlying_strlen
-#undef __underlying_strncat
-#undef __underlying_strncpy
+#include <linux/fortify-string.h>
#endif
/**
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 43f854487539..938c2bf29db8 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -10,9 +10,6 @@
#define RPC_VERSION 2
-/* size of an XDR encoding unit in bytes, i.e. 32bit */
-#define XDR_UNIT (4)
-
/* spec defines authentication flavor as an unsigned 32 bit integer */
typedef u32 rpc_authflavor_t;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 34c2a69820e9..31ee3b6047c3 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -463,6 +463,7 @@ struct svc_procedure {
unsigned int pc_ressize; /* result struct size */
unsigned int pc_cachetype; /* cache info (NFS) */
unsigned int pc_xdrressize; /* maximum size of XDR reply */
+ const char * pc_name; /* for display */
};
/*
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 294b56e61522..7c693b31965e 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -49,6 +49,7 @@
#include <linux/sunrpc/rpc_rdma_cid.h>
#include <linux/sunrpc/svc_rdma_pcl.h>
+#include <linux/percpu_counter.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
@@ -65,15 +66,10 @@ extern unsigned int svcrdma_max_requests;
extern unsigned int svcrdma_max_bc_requests;
extern unsigned int svcrdma_max_req_size;
-extern atomic_t rdma_stat_recv;
-extern atomic_t rdma_stat_read;
-extern atomic_t rdma_stat_write;
-extern atomic_t rdma_stat_sq_starve;
-extern atomic_t rdma_stat_rq_starve;
-extern atomic_t rdma_stat_rq_poll;
-extern atomic_t rdma_stat_rq_prod;
-extern atomic_t rdma_stat_sq_poll;
-extern atomic_t rdma_stat_sq_prod;
+extern struct percpu_counter svcrdma_stat_read;
+extern struct percpu_counter svcrdma_stat_recv;
+extern struct percpu_counter svcrdma_stat_sq_starve;
+extern struct percpu_counter svcrdma_stat_write;
struct svcxprt_rdma {
struct svc_xprt sc_xprt; /* SVC transport structure */
@@ -108,6 +104,7 @@ struct svcxprt_rdma {
wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */
unsigned long sc_flags;
+ u32 sc_pending_recvs;
struct list_head sc_read_complete_q;
struct work_struct sc_work;
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index b7ac7fe68306..bcc555c7ae9c 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -35,6 +35,8 @@ struct svc_sock {
/* Total length of the data (not including fragment headers)
* received so far in the fragments making up this rpc: */
u32 sk_datalen;
+ /* Number of queued send requests */
+ atomic_t sk_sendqlen;
struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */
};
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index b26213ae8c1a..2bc75c167f00 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -20,6 +20,13 @@ struct bio_vec;
struct rpc_rqst;
/*
+ * Size of an XDR encoding unit in bytes, i.e. 32 bits,
+ * as defined in Section 3 of RFC 4506. All encoded
+ * XDR data items are aligned on a boundary of 32 bits.
+ */
+#define XDR_UNIT sizeof(__be32)
+
+/*
* Buffer adjustment
*/
#define XDR_QUADLEN(l) (((l) + 3) >> 2)
@@ -329,7 +336,7 @@ ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
static inline size_t
xdr_align_size(size_t n)
{
- const size_t mask = sizeof(__u32) - 1;
+ const size_t mask = XDR_UNIT - 1;
return (n + mask) & ~mask;
}
@@ -359,7 +366,7 @@ static inline size_t xdr_pad_size(size_t n)
*/
static inline ssize_t xdr_stream_encode_item_present(struct xdr_stream *xdr)
{
- const size_t len = sizeof(__be32);
+ const size_t len = XDR_UNIT;
__be32 *p = xdr_reserve_space(xdr, len);
if (unlikely(!p))
@@ -378,7 +385,7 @@ static inline ssize_t xdr_stream_encode_item_present(struct xdr_stream *xdr)
*/
static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr)
{
- const size_t len = sizeof(__be32);
+ const size_t len = XDR_UNIT;
__be32 *p = xdr_reserve_space(xdr, len);
if (unlikely(!p))
diff --git a/include/linux/sunxi-rsb.h b/include/linux/sunxi-rsb.h
index 7e75bb0346d0..bf0d365f471c 100644
--- a/include/linux/sunxi-rsb.h
+++ b/include/linux/sunxi-rsb.h
@@ -59,7 +59,7 @@ static inline void sunxi_rsb_device_set_drvdata(struct sunxi_rsb_device *rdev,
struct sunxi_rsb_driver {
struct device_driver driver;
int (*probe)(struct sunxi_rsb_device *rdev);
- int (*remove)(struct sunxi_rsb_device *rdev);
+ void (*remove)(struct sunxi_rsb_device *rdev);
};
static inline struct sunxi_rsb_driver *to_sunxi_rsb_driver(struct device_driver *d)
diff --git a/include/linux/surface_acpi_notify.h b/include/linux/surface_acpi_notify.h
new file mode 100644
index 000000000000..8e3e86c7d78c
--- /dev/null
+++ b/include/linux/surface_acpi_notify.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Interface for Surface ACPI Notify (SAN) driver.
+ *
+ * Provides access to discrete GPU notifications sent from ACPI via the SAN
+ * driver, which are not handled by this driver directly.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _LINUX_SURFACE_ACPI_NOTIFY_H
+#define _LINUX_SURFACE_ACPI_NOTIFY_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+/**
+ * struct san_dgpu_event - Discrete GPU ACPI event.
+ * @category: Category of the event.
+ * @target: Target ID of the event source.
+ * @command: Command ID of the event.
+ * @instance: Instance ID of the event source.
+ * @length: Length of the event's payload data (in bytes).
+ * @payload: Pointer to the event's payload data.
+ */
+struct san_dgpu_event {
+ u8 category;
+ u8 target;
+ u8 command;
+ u8 instance;
+ u16 length;
+ u8 *payload;
+};
+
+int san_client_link(struct device *client);
+int san_dgpu_notifier_register(struct notifier_block *nb);
+int san_dgpu_notifier_unregister(struct notifier_block *nb);
+
+#endif /* _LINUX_SURFACE_ACPI_NOTIFY_H */
diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h
new file mode 100644
index 000000000000..f4b1ba887384
--- /dev/null
+++ b/include/linux/surface_aggregator/controller.h
@@ -0,0 +1,824 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Surface System Aggregator Module (SSAM) controller interface.
+ *
+ * Main communication interface for the SSAM EC. Provides a controller
+ * managing access and communication to and from the SSAM EC, as well as main
+ * communication structures and definitions.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H
+#define _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/serial_hub.h>
+
+
+/* -- Main data types and definitions --------------------------------------- */
+
+/**
+ * enum ssam_event_flags - Flags for enabling/disabling SAM events
+ * @SSAM_EVENT_SEQUENCED: The event will be sent via a sequenced data frame.
+ */
+enum ssam_event_flags {
+ SSAM_EVENT_SEQUENCED = BIT(0),
+};
+
+/**
+ * struct ssam_event - SAM event sent from the EC to the host.
+ * @target_category: Target category of the event source. See &enum ssam_ssh_tc.
+ * @target_id: Target ID of the event source.
+ * @command_id: Command ID of the event.
+ * @instance_id: Instance ID of the event source.
+ * @length: Length of the event payload in bytes.
+ * @data: Event payload data.
+ */
+struct ssam_event {
+ u8 target_category;
+ u8 target_id;
+ u8 command_id;
+ u8 instance_id;
+ u16 length;
+ u8 data[];
+};
+
+/**
+ * enum ssam_request_flags - Flags for SAM requests.
+ *
+ * @SSAM_REQUEST_HAS_RESPONSE:
+ * Specifies that the request expects a response. If not set, the request
+ * will be directly completed after its underlying packet has been
+ * transmitted. If set, the request transport system waits for a response
+ * of the request.
+ *
+ * @SSAM_REQUEST_UNSEQUENCED:
+ * Specifies that the request should be transmitted via an unsequenced
+ * packet. If set, the request must not have a response, meaning that this
+ * flag and the %SSAM_REQUEST_HAS_RESPONSE flag are mutually exclusive.
+ */
+enum ssam_request_flags {
+ SSAM_REQUEST_HAS_RESPONSE = BIT(0),
+ SSAM_REQUEST_UNSEQUENCED = BIT(1),
+};
+
+/**
+ * struct ssam_request - SAM request description.
+ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
+ * @target_id: ID of the request's target.
+ * @command_id: Command ID of the request.
+ * @instance_id: Instance ID of the request's target.
+ * @flags: Flags for the request. See &enum ssam_request_flags.
+ * @length: Length of the request payload in bytes.
+ * @payload: Request payload data.
+ *
+ * This struct fully describes a SAM request with payload. It is intended to
+ * help set up the actual transport struct, e.g. &struct ssam_request_sync,
+ * and specifically its raw message data via ssam_request_write_data().
+ */
+struct ssam_request {
+ u8 target_category;
+ u8 target_id;
+ u8 command_id;
+ u8 instance_id;
+ u16 flags;
+ u16 length;
+ const u8 *payload;
+};
+
+/**
+ * struct ssam_response - Response buffer for SAM request.
+ * @capacity: Capacity of the buffer, in bytes.
+ * @length: Length of the actual data stored in the memory pointed to by
+ * @pointer, in bytes. Set by the transport system.
+ * @pointer: Pointer to the buffer's memory, storing the response payload data.
+ */
+struct ssam_response {
+ size_t capacity;
+ size_t length;
+ u8 *pointer;
+};
+
+struct ssam_controller;
+
+struct ssam_controller *ssam_get_controller(void);
+struct ssam_controller *ssam_client_bind(struct device *client);
+int ssam_client_link(struct ssam_controller *ctrl, struct device *client);
+
+struct device *ssam_controller_device(struct ssam_controller *c);
+
+struct ssam_controller *ssam_controller_get(struct ssam_controller *c);
+void ssam_controller_put(struct ssam_controller *c);
+
+void ssam_controller_statelock(struct ssam_controller *c);
+void ssam_controller_stateunlock(struct ssam_controller *c);
+
+ssize_t ssam_request_write_data(struct ssam_span *buf,
+ struct ssam_controller *ctrl,
+ const struct ssam_request *spec);
+
+
+/* -- Synchronous request interface. ---------------------------------------- */
+
+/**
+ * struct ssam_request_sync - Synchronous SAM request struct.
+ * @base: Underlying SSH request.
+ * @comp: Completion used to signal full completion of the request. After the
+ * request has been submitted, this struct may only be modified or
+ * deallocated after the completion has been signaled.
+ * request has been submitted,
+ * @resp: Buffer to store the response.
+ * @status: Status of the request, set after the base request has been
+ * completed or has failed.
+ */
+struct ssam_request_sync {
+ struct ssh_request base;
+ struct completion comp;
+ struct ssam_response *resp;
+ int status;
+};
+
+int ssam_request_sync_alloc(size_t payload_len, gfp_t flags,
+ struct ssam_request_sync **rqst,
+ struct ssam_span *buffer);
+
+void ssam_request_sync_free(struct ssam_request_sync *rqst);
+
+int ssam_request_sync_init(struct ssam_request_sync *rqst,
+ enum ssam_request_flags flags);
+
+/**
+ * ssam_request_sync_set_data - Set message data of a synchronous request.
+ * @rqst: The request.
+ * @ptr: Pointer to the request message data.
+ * @len: Length of the request message data.
+ *
+ * Set the request message data of a synchronous request. The provided buffer
+ * needs to live until the request has been completed.
+ */
+static inline void ssam_request_sync_set_data(struct ssam_request_sync *rqst,
+ u8 *ptr, size_t len)
+{
+ ssh_request_set_data(&rqst->base, ptr, len);
+}
+
+/**
+ * ssam_request_sync_set_resp - Set response buffer of a synchronous request.
+ * @rqst: The request.
+ * @resp: The response buffer.
+ *
+ * Sets the response buffer of a synchronous request. This buffer will store
+ * the response of the request after it has been completed. May be %NULL if no
+ * response is expected.
+ */
+static inline void ssam_request_sync_set_resp(struct ssam_request_sync *rqst,
+ struct ssam_response *resp)
+{
+ rqst->resp = resp;
+}
+
+int ssam_request_sync_submit(struct ssam_controller *ctrl,
+ struct ssam_request_sync *rqst);
+
+/**
+ * ssam_request_sync_wait - Wait for completion of a synchronous request.
+ * @rqst: The request to wait for.
+ *
+ * Wait for completion and release of a synchronous request. After this
+ * function terminates, the request is guaranteed to have left the transport
+ * system. After successful submission of a request, this function must be
+ * called before accessing the response of the request, freeing the request,
+ * or freeing any of the buffers associated with the request.
+ *
+ * This function must not be called if the request has not been submitted yet
+ * and may lead to a deadlock/infinite wait if a subsequent request submission
+ * fails in that case, due to the completion never triggering.
+ *
+ * Return: Returns the status of the given request, which is set on completion
+ * of the packet. This value is zero on success and negative on failure.
+ */
+static inline int ssam_request_sync_wait(struct ssam_request_sync *rqst)
+{
+ wait_for_completion(&rqst->comp);
+ return rqst->status;
+}
+
+int ssam_request_sync(struct ssam_controller *ctrl,
+ const struct ssam_request *spec,
+ struct ssam_response *rsp);
+
+int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
+ const struct ssam_request *spec,
+ struct ssam_response *rsp,
+ struct ssam_span *buf);
+
+/**
+ * ssam_request_sync_onstack - Execute a synchronous request on the stack.
+ * @ctrl: The controller via which the request is submitted.
+ * @rqst: The request specification.
+ * @rsp: The response buffer.
+ * @payload_len: The (maximum) request payload length.
+ *
+ * Allocates a synchronous request with specified payload length on the stack,
+ * fully initializes it via the provided request specification, submits it,
+ * and finally waits for its completion before returning its status. This
+ * helper macro essentially allocates the request message buffer on the stack
+ * and then calls ssam_request_sync_with_buffer().
+ *
+ * Note: The @payload_len parameter specifies the maximum payload length, used
+ * for buffer allocation. The actual payload length may be smaller.
+ *
+ * Return: Returns the status of the request or any failure during setup, i.e.
+ * zero on success and a negative value on failure.
+ */
+#define ssam_request_sync_onstack(ctrl, rqst, rsp, payload_len) \
+ ({ \
+ u8 __data[SSH_COMMAND_MESSAGE_LENGTH(payload_len)]; \
+ struct ssam_span __buf = { &__data[0], ARRAY_SIZE(__data) }; \
+ \
+ ssam_request_sync_with_buffer(ctrl, rqst, rsp, &__buf); \
+ })
+
+/**
+ * __ssam_retry - Retry request in case of I/O errors or timeouts.
+ * @request: The request function to execute. Must return an integer.
+ * @n: Number of tries.
+ * @args: Arguments for the request function.
+ *
+ * Executes the given request function, i.e. calls @request. In case the
+ * request returns %-EREMOTEIO (indicates I/O error) or %-ETIMEDOUT (request
+ * or underlying packet timed out), @request will be re-executed again, up to
+ * @n times in total.
+ *
+ * Return: Returns the return value of the last execution of @request.
+ */
+#define __ssam_retry(request, n, args...) \
+ ({ \
+ int __i, __s = 0; \
+ \
+ for (__i = (n); __i > 0; __i--) { \
+ __s = request(args); \
+ if (__s != -ETIMEDOUT && __s != -EREMOTEIO) \
+ break; \
+ } \
+ __s; \
+ })
+
+/**
+ * ssam_retry - Retry request in case of I/O errors or timeouts up to three
+ * times in total.
+ * @request: The request function to execute. Must return an integer.
+ * @args: Arguments for the request function.
+ *
+ * Executes the given request function, i.e. calls @request. In case the
+ * request returns %-EREMOTEIO (indicates I/O error) or -%ETIMEDOUT (request
+ * or underlying packet timed out), @request will be re-executed again, up to
+ * three times in total.
+ *
+ * See __ssam_retry() for a more generic macro for this purpose.
+ *
+ * Return: Returns the return value of the last execution of @request.
+ */
+#define ssam_retry(request, args...) \
+ __ssam_retry(request, 3, args)
+
+/**
+ * struct ssam_request_spec - Blue-print specification of SAM request.
+ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
+ * @target_id: ID of the request's target.
+ * @command_id: Command ID of the request.
+ * @instance_id: Instance ID of the request's target.
+ * @flags: Flags for the request. See &enum ssam_request_flags.
+ *
+ * Blue-print specification for a SAM request. This struct describes the
+ * unique static parameters of a request (i.e. type) without specifying any of
+ * its instance-specific data (e.g. payload). It is intended to be used as base
+ * for defining simple request functions via the
+ * ``SSAM_DEFINE_SYNC_REQUEST_x()`` family of macros.
+ */
+struct ssam_request_spec {
+ u8 target_category;
+ u8 target_id;
+ u8 command_id;
+ u8 instance_id;
+ u8 flags;
+};
+
+/**
+ * struct ssam_request_spec_md - Blue-print specification for multi-device SAM
+ * request.
+ * @target_category: Category of the request's target. See &enum ssam_ssh_tc.
+ * @command_id: Command ID of the request.
+ * @flags: Flags for the request. See &enum ssam_request_flags.
+ *
+ * Blue-print specification for a multi-device SAM request, i.e. a request
+ * that is applicable to multiple device instances, described by their
+ * individual target and instance IDs. This struct describes the unique static
+ * parameters of a request (i.e. type) without specifying any of its
+ * instance-specific data (e.g. payload) and without specifying any of its
+ * device specific IDs (i.e. target and instance ID). It is intended to be
+ * used as base for defining simple multi-device request functions via the
+ * ``SSAM_DEFINE_SYNC_REQUEST_MD_x()`` and ``SSAM_DEFINE_SYNC_REQUEST_CL_x()``
+ * families of macros.
+ */
+struct ssam_request_spec_md {
+ u8 target_category;
+ u8 command_id;
+ u8 flags;
+};
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_N() - Define synchronous SAM request function
+ * with neither argument nor return value.
+ * @name: Name of the generated function.
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request having neither argument nor return value. The
+ * generated function takes care of setting up the request struct and buffer
+ * allocation, as well as execution of the request itself, returning once the
+ * request has been fully completed. The required transport buffer will be
+ * allocated on the stack.
+ *
+ * The generated function is defined as ``int name(struct ssam_controller
+ * *ctrl)``, returning the status of the request, which is zero on success and
+ * negative on failure. The ``ctrl`` parameter is the controller via which the
+ * request is being sent.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...) \
+ int name(struct ssam_controller *ctrl) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_W() - Define synchronous SAM request function with
+ * argument.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking an argument of type @atype and having no
+ * return value. The generated function takes care of setting up the request
+ * struct, buffer allocation, as well as execution of the request itself,
+ * returning once the request has been fully completed. The required transport
+ * buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``int name(struct ssam_controller
+ * *ctrl, const atype *arg)``, returning the status of the request, which is
+ * zero on success and negative on failure. The ``ctrl`` parameter is the
+ * controller via which the request is sent. The request argument is specified
+ * via the ``arg`` pointer.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_W(name, atype, spec...) \
+ int name(struct ssam_controller *ctrl, const atype *arg) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags; \
+ rqst.length = sizeof(atype); \
+ rqst.payload = (u8 *)arg; \
+ \
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, \
+ sizeof(atype)); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_R() - Define synchronous SAM request function with
+ * return value.
+ * @name: Name of the generated function.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking no argument but having a return value of
+ * type @rtype. The generated function takes care of setting up the request
+ * and response structs, buffer allocation, as well as execution of the
+ * request itself, returning once the request has been fully completed. The
+ * required transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``int name(struct ssam_controller
+ * *ctrl, rtype *ret)``, returning the status of the request, which is zero on
+ * success and negative on failure. The ``ctrl`` parameter is the controller
+ * via which the request is sent. The request's return value is written to the
+ * memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...) \
+ int name(struct ssam_controller *ctrl, rtype *ret) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)ret; \
+ \
+ status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, \
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_MD_N() - Define synchronous multi-device SAM
+ * request function with neither argument nor return value.
+ * @name: Name of the generated function.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request having neither argument nor return value. Device
+ * specifying parameters are not hard-coded, but instead must be provided to
+ * the function. The generated function takes care of setting up the request
+ * struct, buffer allocation, as well as execution of the request itself,
+ * returning once the request has been fully completed. The required transport
+ * buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``int name(struct ssam_controller
+ * *ctrl, u8 tid, u8 iid)``, returning the status of the request, which is
+ * zero on success and negative on failure. The ``ctrl`` parameter is the
+ * controller via which the request is sent, ``tid`` the target ID for the
+ * request, and ``iid`` the instance ID.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_MD_N(name, spec...) \
+ int name(struct ssam_controller *ctrl, u8 tid, u8 iid) \
+ { \
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_MD_W() - Define synchronous multi-device SAM
+ * request function with argument.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking an argument of type @atype and having no
+ * return value. Device specifying parameters are not hard-coded, but instead
+ * must be provided to the function. The generated function takes care of
+ * setting up the request struct, buffer allocation, as well as execution of
+ * the request itself, returning once the request has been fully completed.
+ * The required transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``int name(struct ssam_controller
+ * *ctrl, u8 tid, u8 iid, const atype *arg)``, returning the status of the
+ * request, which is zero on success and negative on failure. The ``ctrl``
+ * parameter is the controller via which the request is sent, ``tid`` the
+ * target ID for the request, and ``iid`` the instance ID. The request argument
+ * is specified via the ``arg`` pointer.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, atype, spec...) \
+ int name(struct ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg)\
+ { \
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags; \
+ rqst.length = sizeof(atype); \
+ rqst.payload = (u8 *)arg; \
+ \
+ return ssam_request_sync_onstack(ctrl, &rqst, NULL, \
+ sizeof(atype)); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_MD_R() - Define synchronous multi-device SAM
+ * request function with return value.
+ * @name: Name of the generated function.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking no argument but having a return value of
+ * type @rtype. Device specifying parameters are not hard-coded, but instead
+ * must be provided to the function. The generated function takes care of
+ * setting up the request and response structs, buffer allocation, as well as
+ * execution of the request itself, returning once the request has been fully
+ * completed. The required transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``int name(struct ssam_controller
+ * *ctrl, u8 tid, u8 iid, rtype *ret)``, returning the status of the request,
+ * which is zero on success and negative on failure. The ``ctrl`` parameter is
+ * the controller via which the request is sent, ``tid`` the target ID for the
+ * request, and ``iid`` the instance ID. The request's return value is written
+ * to the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...) \
+ int name(struct ssam_controller *ctrl, u8 tid, u8 iid, rtype *ret) \
+ { \
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = 0; \
+ rqst.payload = NULL; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)ret; \
+ \
+ status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, \
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+
+/* -- Event notifier/callbacks. --------------------------------------------- */
+
+#define SSAM_NOTIF_STATE_SHIFT 2
+#define SSAM_NOTIF_STATE_MASK ((1 << SSAM_NOTIF_STATE_SHIFT) - 1)
+
+/**
+ * enum ssam_notif_flags - Flags used in return values from SSAM notifier
+ * callback functions.
+ *
+ * @SSAM_NOTIF_HANDLED:
+ * Indicates that the notification has been handled. This flag should be
+ * set by the handler if the handler can act/has acted upon the event
+ * provided to it. This flag should not be set if the handler is not a
+ * primary handler intended for the provided event.
+ *
+ * If this flag has not been set by any handler after the notifier chain
+ * has been traversed, a warning will be emitted, stating that the event
+ * has not been handled.
+ *
+ * @SSAM_NOTIF_STOP:
+ * Indicates that the notifier traversal should stop. If this flag is
+ * returned from a notifier callback, notifier chain traversal will
+ * immediately stop and any remaining notifiers will not be called. This
+ * flag is automatically set when ssam_notifier_from_errno() is called
+ * with a negative error value.
+ */
+enum ssam_notif_flags {
+ SSAM_NOTIF_HANDLED = BIT(0),
+ SSAM_NOTIF_STOP = BIT(1),
+};
+
+struct ssam_event_notifier;
+
+typedef u32 (*ssam_notifier_fn_t)(struct ssam_event_notifier *nf,
+ const struct ssam_event *event);
+
+/**
+ * struct ssam_notifier_block - Base notifier block for SSAM event
+ * notifications.
+ * @node: The node for the list of notifiers.
+ * @fn: The callback function of this notifier. This function takes the
+ * respective notifier block and event as input and should return
+ * a notifier value, which can either be obtained from the flags
+ * provided in &enum ssam_notif_flags, converted from a standard
+ * error value via ssam_notifier_from_errno(), or a combination of
+ * both (e.g. ``ssam_notifier_from_errno(e) | SSAM_NOTIF_HANDLED``).
+ * @priority: Priority value determining the order in which notifier callbacks
+ * will be called. A higher value means higher priority, i.e. the
+ * associated callback will be executed earlier than other (lower
+ * priority) callbacks.
+ */
+struct ssam_notifier_block {
+ struct list_head node;
+ ssam_notifier_fn_t fn;
+ int priority;
+};
+
+/**
+ * ssam_notifier_from_errno() - Convert standard error value to notifier
+ * return code.
+ * @err: The error code to convert, must be negative (in case of failure) or
+ * zero (in case of success).
+ *
+ * Return: Returns the notifier return value obtained by converting the
+ * specified @err value. In case @err is negative, the %SSAM_NOTIF_STOP flag
+ * will be set, causing notifier call chain traversal to abort.
+ */
+static inline u32 ssam_notifier_from_errno(int err)
+{
+ if (WARN_ON(err > 0) || err == 0)
+ return 0;
+ else
+ return ((-err) << SSAM_NOTIF_STATE_SHIFT) | SSAM_NOTIF_STOP;
+}
+
+/**
+ * ssam_notifier_to_errno() - Convert notifier return code to standard error
+ * value.
+ * @ret: The notifier return value to convert.
+ *
+ * Return: Returns the negative error value encoded in @ret or zero if @ret
+ * indicates success.
+ */
+static inline int ssam_notifier_to_errno(u32 ret)
+{
+ return -(ret >> SSAM_NOTIF_STATE_SHIFT);
+}
+
+
+/* -- Event/notification registry. ------------------------------------------ */
+
+/**
+ * struct ssam_event_registry - Registry specification used for enabling events.
+ * @target_category: Target category for the event registry requests.
+ * @target_id: Target ID for the event registry requests.
+ * @cid_enable: Command ID for the event-enable request.
+ * @cid_disable: Command ID for the event-disable request.
+ *
+ * This struct describes a SAM event registry via the minimal collection of
+ * SAM IDs specifying the requests to use for enabling and disabling an event.
+ * The individual event to be enabled/disabled itself is specified via &struct
+ * ssam_event_id.
+ */
+struct ssam_event_registry {
+ u8 target_category;
+ u8 target_id;
+ u8 cid_enable;
+ u8 cid_disable;
+};
+
+/**
+ * struct ssam_event_id - Unique event ID used for enabling events.
+ * @target_category: Target category of the event source.
+ * @instance: Instance ID of the event source.
+ *
+ * This struct specifies the event to be enabled/disabled via an externally
+ * provided registry. It does not specify the registry to be used itself, this
+ * is done via &struct ssam_event_registry.
+ */
+struct ssam_event_id {
+ u8 target_category;
+ u8 instance;
+};
+
+/**
+ * enum ssam_event_mask - Flags specifying how events are matched to notifiers.
+ *
+ * @SSAM_EVENT_MASK_NONE:
+ * Run the callback for any event with matching target category. Do not
+ * do any additional filtering.
+ *
+ * @SSAM_EVENT_MASK_TARGET:
+ * In addition to filtering by target category, only execute the notifier
+ * callback for events with a target ID matching to the one of the
+ * registry used for enabling/disabling the event.
+ *
+ * @SSAM_EVENT_MASK_INSTANCE:
+ * In addition to filtering by target category, only execute the notifier
+ * callback for events with an instance ID matching to the instance ID
+ * used when enabling the event.
+ *
+ * @SSAM_EVENT_MASK_STRICT:
+ * Do all the filtering above.
+ */
+enum ssam_event_mask {
+ SSAM_EVENT_MASK_TARGET = BIT(0),
+ SSAM_EVENT_MASK_INSTANCE = BIT(1),
+
+ SSAM_EVENT_MASK_NONE = 0,
+ SSAM_EVENT_MASK_STRICT =
+ SSAM_EVENT_MASK_TARGET
+ | SSAM_EVENT_MASK_INSTANCE,
+};
+
+/**
+ * SSAM_EVENT_REGISTRY() - Define a new event registry.
+ * @tc: Target category for the event registry requests.
+ * @tid: Target ID for the event registry requests.
+ * @cid_en: Command ID for the event-enable request.
+ * @cid_dis: Command ID for the event-disable request.
+ *
+ * Return: Returns the &struct ssam_event_registry specified by the given
+ * parameters.
+ */
+#define SSAM_EVENT_REGISTRY(tc, tid, cid_en, cid_dis) \
+ ((struct ssam_event_registry) { \
+ .target_category = (tc), \
+ .target_id = (tid), \
+ .cid_enable = (cid_en), \
+ .cid_disable = (cid_dis), \
+ })
+
+#define SSAM_EVENT_REGISTRY_SAM \
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_SAM, 0x01, 0x0b, 0x0c)
+
+#define SSAM_EVENT_REGISTRY_KIP \
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, 0x02, 0x27, 0x28)
+
+#define SSAM_EVENT_REGISTRY_REG \
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02)
+
+/**
+ * struct ssam_event_notifier - Notifier block for SSAM events.
+ * @base: The base notifier block with callback function and priority.
+ * @event: The event for which this block will receive notifications.
+ * @event.reg: Registry via which the event will be enabled/disabled.
+ * @event.id: ID specifying the event.
+ * @event.mask: Flags determining how events are matched to the notifier.
+ * @event.flags: Flags used for enabling the event.
+ */
+struct ssam_event_notifier {
+ struct ssam_notifier_block base;
+
+ struct {
+ struct ssam_event_registry reg;
+ struct ssam_event_id id;
+ enum ssam_event_mask mask;
+ u8 flags;
+ } event;
+};
+
+int ssam_notifier_register(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n);
+
+int ssam_notifier_unregister(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n);
+
+#endif /* _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H */
diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h
new file mode 100644
index 000000000000..02f3e06c0a60
--- /dev/null
+++ b/include/linux/surface_aggregator/device.h
@@ -0,0 +1,423 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Surface System Aggregator Module (SSAM) bus and client-device subsystem.
+ *
+ * Main interface for the surface-aggregator bus, surface-aggregator client
+ * devices, and respective drivers building on top of the SSAM controller.
+ * Provides support for non-platform/non-ACPI SSAM clients via dedicated
+ * subsystem.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _LINUX_SURFACE_AGGREGATOR_DEVICE_H
+#define _LINUX_SURFACE_AGGREGATOR_DEVICE_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/types.h>
+
+#include <linux/surface_aggregator/controller.h>
+
+
+/* -- Surface System Aggregator Module bus. --------------------------------- */
+
+/**
+ * enum ssam_device_domain - SAM device domain.
+ * @SSAM_DOMAIN_VIRTUAL: Virtual device.
+ * @SSAM_DOMAIN_SERIALHUB: Physical device connected via Surface Serial Hub.
+ */
+enum ssam_device_domain {
+ SSAM_DOMAIN_VIRTUAL = 0x00,
+ SSAM_DOMAIN_SERIALHUB = 0x01,
+};
+
+/**
+ * enum ssam_virtual_tc - Target categories for the virtual SAM domain.
+ * @SSAM_VIRTUAL_TC_HUB: Device hub category.
+ */
+enum ssam_virtual_tc {
+ SSAM_VIRTUAL_TC_HUB = 0x00,
+};
+
+/**
+ * struct ssam_device_uid - Unique identifier for SSAM device.
+ * @domain: Domain of the device.
+ * @category: Target category of the device.
+ * @target: Target ID of the device.
+ * @instance: Instance ID of the device.
+ * @function: Sub-function of the device. This field can be used to split a
+ * single SAM device into multiple virtual subdevices to separate
+ * different functionality of that device and allow one driver per
+ * such functionality.
+ */
+struct ssam_device_uid {
+ u8 domain;
+ u8 category;
+ u8 target;
+ u8 instance;
+ u8 function;
+};
+
+/*
+ * Special values for device matching.
+ *
+ * These values are intended to be used with SSAM_DEVICE(), SSAM_VDEV(), and
+ * SSAM_SDEV() exclusively. Specifically, they are used to initialize the
+ * match_flags member of the device ID structure. Do not use them directly
+ * with struct ssam_device_id or struct ssam_device_uid.
+ */
+#define SSAM_ANY_TID 0xffff
+#define SSAM_ANY_IID 0xffff
+#define SSAM_ANY_FUN 0xffff
+
+/**
+ * SSAM_DEVICE() - Initialize a &struct ssam_device_id with the given
+ * parameters.
+ * @d: Domain of the device.
+ * @cat: Target category of the device.
+ * @tid: Target ID of the device.
+ * @iid: Instance ID of the device.
+ * @fun: Sub-function of the device.
+ *
+ * Initializes a &struct ssam_device_id with the given parameters. See &struct
+ * ssam_device_uid for details regarding the parameters. The special values
+ * %SSAM_ANY_TID, %SSAM_ANY_IID, and %SSAM_ANY_FUN can be used to specify that
+ * matching should ignore target ID, instance ID, and/or sub-function,
+ * respectively. This macro initializes the ``match_flags`` field based on the
+ * given parameters.
+ *
+ * Note: The parameters @d and @cat must be valid &u8 values, the parameters
+ * @tid, @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
+ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
+ * allowed.
+ */
+#define SSAM_DEVICE(d, cat, tid, iid, fun) \
+ .match_flags = (((tid) != SSAM_ANY_TID) ? SSAM_MATCH_TARGET : 0) \
+ | (((iid) != SSAM_ANY_IID) ? SSAM_MATCH_INSTANCE : 0) \
+ | (((fun) != SSAM_ANY_FUN) ? SSAM_MATCH_FUNCTION : 0), \
+ .domain = d, \
+ .category = cat, \
+ .target = ((tid) != SSAM_ANY_TID) ? (tid) : 0, \
+ .instance = ((iid) != SSAM_ANY_IID) ? (iid) : 0, \
+ .function = ((fun) != SSAM_ANY_FUN) ? (fun) : 0 \
+
+/**
+ * SSAM_VDEV() - Initialize a &struct ssam_device_id as virtual device with
+ * the given parameters.
+ * @cat: Target category of the device.
+ * @tid: Target ID of the device.
+ * @iid: Instance ID of the device.
+ * @fun: Sub-function of the device.
+ *
+ * Initializes a &struct ssam_device_id with the given parameters in the
+ * virtual domain. See &struct ssam_device_uid for details regarding the
+ * parameters. The special values %SSAM_ANY_TID, %SSAM_ANY_IID, and
+ * %SSAM_ANY_FUN can be used to specify that matching should ignore target ID,
+ * instance ID, and/or sub-function, respectively. This macro initializes the
+ * ``match_flags`` field based on the given parameters.
+ *
+ * Note: The parameter @cat must be a valid &u8 value, the parameters @tid,
+ * @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
+ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
+ * allowed.
+ */
+#define SSAM_VDEV(cat, tid, iid, fun) \
+ SSAM_DEVICE(SSAM_DOMAIN_VIRTUAL, SSAM_VIRTUAL_TC_##cat, tid, iid, fun)
+
+/**
+ * SSAM_SDEV() - Initialize a &struct ssam_device_id as physical SSH device
+ * with the given parameters.
+ * @cat: Target category of the device.
+ * @tid: Target ID of the device.
+ * @iid: Instance ID of the device.
+ * @fun: Sub-function of the device.
+ *
+ * Initializes a &struct ssam_device_id with the given parameters in the SSH
+ * domain. See &struct ssam_device_uid for details regarding the parameters.
+ * The special values %SSAM_ANY_TID, %SSAM_ANY_IID, and %SSAM_ANY_FUN can be
+ * used to specify that matching should ignore target ID, instance ID, and/or
+ * sub-function, respectively. This macro initializes the ``match_flags``
+ * field based on the given parameters.
+ *
+ * Note: The parameter @cat must be a valid &u8 value, the parameters @tid,
+ * @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
+ * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
+ * allowed.
+ */
+#define SSAM_SDEV(cat, tid, iid, fun) \
+ SSAM_DEVICE(SSAM_DOMAIN_SERIALHUB, SSAM_SSH_TC_##cat, tid, iid, fun)
+
+/**
+ * struct ssam_device - SSAM client device.
+ * @dev: Driver model representation of the device.
+ * @ctrl: SSAM controller managing this device.
+ * @uid: UID identifying the device.
+ */
+struct ssam_device {
+ struct device dev;
+ struct ssam_controller *ctrl;
+
+ struct ssam_device_uid uid;
+};
+
+/**
+ * struct ssam_device_driver - SSAM client device driver.
+ * @driver: Base driver model structure.
+ * @match_table: Match table specifying which devices the driver should bind to.
+ * @probe: Called when the driver is being bound to a device.
+ * @remove: Called when the driver is being unbound from the device.
+ */
+struct ssam_device_driver {
+ struct device_driver driver;
+
+ const struct ssam_device_id *match_table;
+
+ int (*probe)(struct ssam_device *sdev);
+ void (*remove)(struct ssam_device *sdev);
+};
+
+extern struct bus_type ssam_bus_type;
+extern const struct device_type ssam_device_type;
+
+/**
+ * is_ssam_device() - Check if the given device is a SSAM client device.
+ * @d: The device to test the type of.
+ *
+ * Return: Returns %true if the specified device is of type &struct
+ * ssam_device, i.e. the device type points to %ssam_device_type, and %false
+ * otherwise.
+ */
+static inline bool is_ssam_device(struct device *d)
+{
+ return d->type == &ssam_device_type;
+}
+
+/**
+ * to_ssam_device() - Casts the given device to a SSAM client device.
+ * @d: The device to cast.
+ *
+ * Casts the given &struct device to a &struct ssam_device. The caller has to
+ * ensure that the given device is actually enclosed in a &struct ssam_device,
+ * e.g. by calling is_ssam_device().
+ *
+ * Return: Returns a pointer to the &struct ssam_device wrapping the given
+ * device @d.
+ */
+static inline struct ssam_device *to_ssam_device(struct device *d)
+{
+ return container_of(d, struct ssam_device, dev);
+}
+
+/**
+ * to_ssam_device_driver() - Casts the given device driver to a SSAM client
+ * device driver.
+ * @d: The driver to cast.
+ *
+ * Casts the given &struct device_driver to a &struct ssam_device_driver. The
+ * caller has to ensure that the given driver is actually enclosed in a
+ * &struct ssam_device_driver.
+ *
+ * Return: Returns the pointer to the &struct ssam_device_driver wrapping the
+ * given device driver @d.
+ */
+static inline
+struct ssam_device_driver *to_ssam_device_driver(struct device_driver *d)
+{
+ return container_of(d, struct ssam_device_driver, driver);
+}
+
+const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table,
+ const struct ssam_device_uid uid);
+
+const struct ssam_device_id *ssam_device_get_match(const struct ssam_device *dev);
+
+const void *ssam_device_get_match_data(const struct ssam_device *dev);
+
+struct ssam_device *ssam_device_alloc(struct ssam_controller *ctrl,
+ struct ssam_device_uid uid);
+
+int ssam_device_add(struct ssam_device *sdev);
+void ssam_device_remove(struct ssam_device *sdev);
+
+/**
+ * ssam_device_get() - Increment reference count of SSAM client device.
+ * @sdev: The device to increment the reference count of.
+ *
+ * Increments the reference count of the given SSAM client device by
+ * incrementing the reference count of the enclosed &struct device via
+ * get_device().
+ *
+ * See ssam_device_put() for the counter-part of this function.
+ *
+ * Return: Returns the device provided as input.
+ */
+static inline struct ssam_device *ssam_device_get(struct ssam_device *sdev)
+{
+ return sdev ? to_ssam_device(get_device(&sdev->dev)) : NULL;
+}
+
+/**
+ * ssam_device_put() - Decrement reference count of SSAM client device.
+ * @sdev: The device to decrement the reference count of.
+ *
+ * Decrements the reference count of the given SSAM client device by
+ * decrementing the reference count of the enclosed &struct device via
+ * put_device().
+ *
+ * See ssam_device_get() for the counter-part of this function.
+ */
+static inline void ssam_device_put(struct ssam_device *sdev)
+{
+ if (sdev)
+ put_device(&sdev->dev);
+}
+
+/**
+ * ssam_device_get_drvdata() - Get driver-data of SSAM client device.
+ * @sdev: The device to get the driver-data from.
+ *
+ * Return: Returns the driver-data of the given device, previously set via
+ * ssam_device_set_drvdata().
+ */
+static inline void *ssam_device_get_drvdata(struct ssam_device *sdev)
+{
+ return dev_get_drvdata(&sdev->dev);
+}
+
+/**
+ * ssam_device_set_drvdata() - Set driver-data of SSAM client device.
+ * @sdev: The device to set the driver-data of.
+ * @data: The data to set the device's driver-data pointer to.
+ */
+static inline void ssam_device_set_drvdata(struct ssam_device *sdev, void *data)
+{
+ dev_set_drvdata(&sdev->dev, data);
+}
+
+int __ssam_device_driver_register(struct ssam_device_driver *d, struct module *o);
+void ssam_device_driver_unregister(struct ssam_device_driver *d);
+
+/**
+ * ssam_device_driver_register() - Register a SSAM client device driver.
+ * @drv: The driver to register.
+ */
+#define ssam_device_driver_register(drv) \
+ __ssam_device_driver_register(drv, THIS_MODULE)
+
+/**
+ * module_ssam_device_driver() - Helper macro for SSAM device driver
+ * registration.
+ * @drv: The driver managed by this module.
+ *
+ * Helper macro to register a SSAM device driver via module_init() and
+ * module_exit(). This macro may only be used once per module and replaces the
+ * aforementioned definitions.
+ */
+#define module_ssam_device_driver(drv) \
+ module_driver(drv, ssam_device_driver_register, \
+ ssam_device_driver_unregister)
+
+
+/* -- Helpers for client-device requests. ----------------------------------- */
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_CL_N() - Define synchronous client-device SAM
+ * request function with neither argument nor return value.
+ * @name: Name of the generated function.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request having neither argument nor return value. Device
+ * specifying parameters are not hard-coded, but instead are provided via the
+ * client device, specifically its UID, supplied when calling this function.
+ * The generated function takes care of setting up the request struct, buffer
+ * allocation, as well as execution of the request itself, returning once the
+ * request has been fully completed. The required transport buffer will be
+ * allocated on the stack.
+ *
+ * The generated function is defined as ``int name(struct ssam_device *sdev)``,
+ * returning the status of the request, which is zero on success and negative
+ * on failure. The ``sdev`` parameter specifies both the target device of the
+ * request and by association the controller via which the request is sent.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_CL_N(name, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_N(__raw_##name, spec) \
+ int name(struct ssam_device *sdev) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_CL_W() - Define synchronous client-device SAM
+ * request function with argument.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking an argument of type @atype and having no
+ * return value. Device specifying parameters are not hard-coded, but instead
+ * are provided via the client device, specifically its UID, supplied when
+ * calling this function. The generated function takes care of setting up the
+ * request struct, buffer allocation, as well as execution of the request
+ * itself, returning once the request has been fully completed. The required
+ * transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``int name(struct ssam_device *sdev,
+ * const atype *arg)``, returning the status of the request, which is zero on
+ * success and negative on failure. The ``sdev`` parameter specifies both the
+ * target device of the request and by association the controller via which
+ * the request is sent. The request's argument is specified via the ``arg``
+ * pointer.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_CL_W(name, atype, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_W(__raw_##name, atype, spec) \
+ int name(struct ssam_device *sdev, const atype *arg) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance, arg); \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_CL_R() - Define synchronous client-device SAM
+ * request function with return value.
+ * @name: Name of the generated function.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by
+ * @spec, with the request taking no argument but having a return value of
+ * type @rtype. Device specifying parameters are not hard-coded, but instead
+ * are provided via the client device, specifically its UID, supplied when
+ * calling this function. The generated function takes care of setting up the
+ * request struct, buffer allocation, as well as execution of the request
+ * itself, returning once the request has been fully completed. The required
+ * transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``int name(struct ssam_device *sdev,
+ * rtype *ret)``, returning the status of the request, which is zero on
+ * success and negative on failure. The ``sdev`` parameter specifies both the
+ * target device of the request and by association the controller via which
+ * the request is sent. The request's return value is written to the memory
+ * pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_CL_R(name, rtype, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_R(__raw_##name, rtype, spec) \
+ int name(struct ssam_device *sdev, rtype *ret) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance, ret); \
+ }
+
+#endif /* _LINUX_SURFACE_AGGREGATOR_DEVICE_H */
diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h
new file mode 100644
index 000000000000..64276fbfa1d5
--- /dev/null
+++ b/include/linux/surface_aggregator/serial_hub.h
@@ -0,0 +1,672 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Surface Serial Hub (SSH) protocol and communication interface.
+ *
+ * Lower-level communication layers and SSH protocol definitions for the
+ * Surface System Aggregator Module (SSAM). Provides the interface for basic
+ * packet- and request-based communication with the SSAM EC via SSH.
+ *
+ * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
+#define _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
+
+#include <linux/crc-ccitt.h>
+#include <linux/kref.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+
+/* -- Data structures for SAM-over-SSH communication. ----------------------- */
+
+/**
+ * enum ssh_frame_type - Frame types for SSH frames.
+ *
+ * @SSH_FRAME_TYPE_DATA_SEQ:
+ * Indicates a data frame, followed by a payload with the length specified
+ * in the ``struct ssh_frame.len`` field. This frame is sequenced, meaning
+ * that an ACK is required.
+ *
+ * @SSH_FRAME_TYPE_DATA_NSQ:
+ * Same as %SSH_FRAME_TYPE_DATA_SEQ, but unsequenced, meaning that the
+ * message does not have to be ACKed.
+ *
+ * @SSH_FRAME_TYPE_ACK:
+ * Indicates an ACK message.
+ *
+ * @SSH_FRAME_TYPE_NAK:
+ * Indicates an error response for previously sent frame. In general, this
+ * means that the frame and/or payload is malformed, e.g. a CRC is wrong.
+ * For command-type payloads, this can also mean that the command is
+ * invalid.
+ */
+enum ssh_frame_type {
+ SSH_FRAME_TYPE_DATA_SEQ = 0x80,
+ SSH_FRAME_TYPE_DATA_NSQ = 0x00,
+ SSH_FRAME_TYPE_ACK = 0x40,
+ SSH_FRAME_TYPE_NAK = 0x04,
+};
+
+/**
+ * struct ssh_frame - SSH communication frame.
+ * @type: The type of the frame. See &enum ssh_frame_type.
+ * @len: The length of the frame payload directly following the CRC for this
+ * frame. Does not include the final CRC for that payload.
+ * @seq: The sequence number for this message/exchange.
+ */
+struct ssh_frame {
+ u8 type;
+ __le16 len;
+ u8 seq;
+} __packed;
+
+static_assert(sizeof(struct ssh_frame) == 4);
+
+/*
+ * SSH_FRAME_MAX_PAYLOAD_SIZE - Maximum SSH frame payload length in bytes.
+ *
+ * This is the physical maximum length of the protocol. Implementations may
+ * set a more constrained limit.
+ */
+#define SSH_FRAME_MAX_PAYLOAD_SIZE U16_MAX
+
+/**
+ * enum ssh_payload_type - Type indicator for the SSH payload.
+ * @SSH_PLD_TYPE_CMD: The payload is a command structure with optional command
+ * payload.
+ */
+enum ssh_payload_type {
+ SSH_PLD_TYPE_CMD = 0x80,
+};
+
+/**
+ * struct ssh_command - Payload of a command-type frame.
+ * @type: The type of the payload. See &enum ssh_payload_type. Should be
+ * SSH_PLD_TYPE_CMD for this struct.
+ * @tc: Command target category.
+ * @tid_out: Output target ID. Should be zero if this an incoming (EC to host)
+ * message.
+ * @tid_in: Input target ID. Should be zero if this is an outgoing (host to
+ * EC) message.
+ * @iid: Instance ID.
+ * @rqid: Request ID. Used to match requests with responses and differentiate
+ * between responses and events.
+ * @cid: Command ID.
+ */
+struct ssh_command {
+ u8 type;
+ u8 tc;
+ u8 tid_out;
+ u8 tid_in;
+ u8 iid;
+ __le16 rqid;
+ u8 cid;
+} __packed;
+
+static_assert(sizeof(struct ssh_command) == 8);
+
+/*
+ * SSH_COMMAND_MAX_PAYLOAD_SIZE - Maximum SSH command payload length in bytes.
+ *
+ * This is the physical maximum length of the protocol. Implementations may
+ * set a more constrained limit.
+ */
+#define SSH_COMMAND_MAX_PAYLOAD_SIZE \
+ (SSH_FRAME_MAX_PAYLOAD_SIZE - sizeof(struct ssh_command))
+
+/*
+ * SSH_MSG_LEN_BASE - Base-length of a SSH message.
+ *
+ * This is the minimum number of bytes required to form a message. The actual
+ * message length is SSH_MSG_LEN_BASE plus the length of the frame payload.
+ */
+#define SSH_MSG_LEN_BASE (sizeof(struct ssh_frame) + 3ull * sizeof(u16))
+
+/*
+ * SSH_MSG_LEN_CTRL - Length of a SSH control message.
+ *
+ * This is the length of a SSH control message, which is equal to a SSH
+ * message without any payload.
+ */
+#define SSH_MSG_LEN_CTRL SSH_MSG_LEN_BASE
+
+/**
+ * SSH_MESSAGE_LENGTH() - Compute length of SSH message.
+ * @payload_size: Length of the payload inside the SSH frame.
+ *
+ * Return: Returns the length of a SSH message with payload of specified size.
+ */
+#define SSH_MESSAGE_LENGTH(payload_size) (SSH_MSG_LEN_BASE + (payload_size))
+
+/**
+ * SSH_COMMAND_MESSAGE_LENGTH() - Compute length of SSH command message.
+ * @payload_size: Length of the command payload.
+ *
+ * Return: Returns the length of a SSH command message with command payload of
+ * specified size.
+ */
+#define SSH_COMMAND_MESSAGE_LENGTH(payload_size) \
+ SSH_MESSAGE_LENGTH(sizeof(struct ssh_command) + (payload_size))
+
+/**
+ * SSH_MSGOFFSET_FRAME() - Compute offset in SSH message to specified field in
+ * frame.
+ * @field: The field for which the offset should be computed.
+ *
+ * Return: Returns the offset of the specified &struct ssh_frame field in the
+ * raw SSH message data as. Takes SYN bytes (u16) preceding the frame into
+ * account.
+ */
+#define SSH_MSGOFFSET_FRAME(field) \
+ (sizeof(u16) + offsetof(struct ssh_frame, field))
+
+/**
+ * SSH_MSGOFFSET_COMMAND() - Compute offset in SSH message to specified field
+ * in command.
+ * @field: The field for which the offset should be computed.
+ *
+ * Return: Returns the offset of the specified &struct ssh_command field in
+ * the raw SSH message data. Takes SYN bytes (u16) preceding the frame and the
+ * frame CRC (u16) between frame and command into account.
+ */
+#define SSH_MSGOFFSET_COMMAND(field) \
+ (2ull * sizeof(u16) + sizeof(struct ssh_frame) \
+ + offsetof(struct ssh_command, field))
+
+/*
+ * SSH_MSG_SYN - SSH message synchronization (SYN) bytes as u16.
+ */
+#define SSH_MSG_SYN ((u16)0x55aa)
+
+/**
+ * ssh_crc() - Compute CRC for SSH messages.
+ * @buf: The pointer pointing to the data for which the CRC should be computed.
+ * @len: The length of the data for which the CRC should be computed.
+ *
+ * Return: Returns the CRC computed on the provided data, as used for SSH
+ * messages.
+ */
+static inline u16 ssh_crc(const u8 *buf, size_t len)
+{
+ return crc_ccitt_false(0xffff, buf, len);
+}
+
+/*
+ * SSH_NUM_EVENTS - The number of reserved event IDs.
+ *
+ * The number of reserved event IDs, used for registering an SSH event
+ * handler. Valid event IDs are numbers below or equal to this value, with
+ * exception of zero, which is not an event ID. Thus, this is also the
+ * absolute maximum number of event handlers that can be registered.
+ */
+#define SSH_NUM_EVENTS 34
+
+/*
+ * SSH_NUM_TARGETS - The number of communication targets used in the protocol.
+ */
+#define SSH_NUM_TARGETS 2
+
+/**
+ * ssh_rqid_next_valid() - Return the next valid request ID.
+ * @rqid: The current request ID.
+ *
+ * Return: Returns the next valid request ID, following the current request ID
+ * provided to this function. This function skips any request IDs reserved for
+ * events.
+ */
+static inline u16 ssh_rqid_next_valid(u16 rqid)
+{
+ return rqid > 0 ? rqid + 1u : rqid + SSH_NUM_EVENTS + 1u;
+}
+
+/**
+ * ssh_rqid_to_event() - Convert request ID to its corresponding event ID.
+ * @rqid: The request ID to convert.
+ */
+static inline u16 ssh_rqid_to_event(u16 rqid)
+{
+ return rqid - 1u;
+}
+
+/**
+ * ssh_rqid_is_event() - Check if given request ID is a valid event ID.
+ * @rqid: The request ID to check.
+ */
+static inline bool ssh_rqid_is_event(u16 rqid)
+{
+ return ssh_rqid_to_event(rqid) < SSH_NUM_EVENTS;
+}
+
+/**
+ * ssh_tc_to_rqid() - Convert target category to its corresponding request ID.
+ * @tc: The target category to convert.
+ */
+static inline u16 ssh_tc_to_rqid(u8 tc)
+{
+ return tc;
+}
+
+/**
+ * ssh_tid_to_index() - Convert target ID to its corresponding target index.
+ * @tid: The target ID to convert.
+ */
+static inline u8 ssh_tid_to_index(u8 tid)
+{
+ return tid - 1u;
+}
+
+/**
+ * ssh_tid_is_valid() - Check if target ID is valid/supported.
+ * @tid: The target ID to check.
+ */
+static inline bool ssh_tid_is_valid(u8 tid)
+{
+ return ssh_tid_to_index(tid) < SSH_NUM_TARGETS;
+}
+
+/**
+ * struct ssam_span - Reference to a buffer region.
+ * @ptr: Pointer to the buffer region.
+ * @len: Length of the buffer region.
+ *
+ * A reference to a (non-owned) buffer segment, consisting of pointer and
+ * length. Use of this struct indicates non-owned data, i.e. data of which the
+ * life-time is managed (i.e. it is allocated/freed) via another pointer.
+ */
+struct ssam_span {
+ u8 *ptr;
+ size_t len;
+};
+
+/*
+ * Known SSH/EC target categories.
+ *
+ * List of currently known target category values; "Known" as in we know they
+ * exist and are valid on at least some device/model. Detailed functionality
+ * or the full category name is only known for some of these categories and
+ * is detailed in the respective comment below.
+ *
+ * These values and abbreviations have been extracted from strings inside the
+ * Windows driver.
+ */
+enum ssam_ssh_tc {
+ /* Category 0x00 is invalid for EC use. */
+ SSAM_SSH_TC_SAM = 0x01, /* Generic system functionality, real-time clock. */
+ SSAM_SSH_TC_BAT = 0x02, /* Battery/power subsystem. */
+ SSAM_SSH_TC_TMP = 0x03, /* Thermal subsystem. */
+ SSAM_SSH_TC_PMC = 0x04,
+ SSAM_SSH_TC_FAN = 0x05,
+ SSAM_SSH_TC_PoM = 0x06,
+ SSAM_SSH_TC_DBG = 0x07,
+ SSAM_SSH_TC_KBD = 0x08, /* Legacy keyboard (Laptop 1/2). */
+ SSAM_SSH_TC_FWU = 0x09,
+ SSAM_SSH_TC_UNI = 0x0a,
+ SSAM_SSH_TC_LPC = 0x0b,
+ SSAM_SSH_TC_TCL = 0x0c,
+ SSAM_SSH_TC_SFL = 0x0d,
+ SSAM_SSH_TC_KIP = 0x0e,
+ SSAM_SSH_TC_EXT = 0x0f,
+ SSAM_SSH_TC_BLD = 0x10,
+ SSAM_SSH_TC_BAS = 0x11, /* Detachment system (Surface Book 2/3). */
+ SSAM_SSH_TC_SEN = 0x12,
+ SSAM_SSH_TC_SRQ = 0x13,
+ SSAM_SSH_TC_MCU = 0x14,
+ SSAM_SSH_TC_HID = 0x15, /* Generic HID input subsystem. */
+ SSAM_SSH_TC_TCH = 0x16,
+ SSAM_SSH_TC_BKL = 0x17,
+ SSAM_SSH_TC_TAM = 0x18,
+ SSAM_SSH_TC_ACC = 0x19,
+ SSAM_SSH_TC_UFI = 0x1a,
+ SSAM_SSH_TC_USC = 0x1b,
+ SSAM_SSH_TC_PEN = 0x1c,
+ SSAM_SSH_TC_VID = 0x1d,
+ SSAM_SSH_TC_AUD = 0x1e,
+ SSAM_SSH_TC_SMC = 0x1f,
+ SSAM_SSH_TC_KPD = 0x20,
+ SSAM_SSH_TC_REG = 0x21, /* Extended event registry. */
+};
+
+
+/* -- Packet transport layer (ptl). ----------------------------------------- */
+
+/**
+ * enum ssh_packet_base_priority - Base priorities for &struct ssh_packet.
+ * @SSH_PACKET_PRIORITY_FLUSH: Base priority for flush packets.
+ * @SSH_PACKET_PRIORITY_DATA: Base priority for normal data packets.
+ * @SSH_PACKET_PRIORITY_NAK: Base priority for NAK packets.
+ * @SSH_PACKET_PRIORITY_ACK: Base priority for ACK packets.
+ */
+enum ssh_packet_base_priority {
+ SSH_PACKET_PRIORITY_FLUSH = 0, /* same as DATA to sequence flush */
+ SSH_PACKET_PRIORITY_DATA = 0,
+ SSH_PACKET_PRIORITY_NAK = 1,
+ SSH_PACKET_PRIORITY_ACK = 2,
+};
+
+/*
+ * Same as SSH_PACKET_PRIORITY() below, only with actual values.
+ */
+#define __SSH_PACKET_PRIORITY(base, try) \
+ (((base) << 4) | ((try) & 0x0f))
+
+/**
+ * SSH_PACKET_PRIORITY() - Compute packet priority from base priority and
+ * number of tries.
+ * @base: The base priority as suffix of &enum ssh_packet_base_priority, e.g.
+ * ``FLUSH``, ``DATA``, ``ACK``, or ``NAK``.
+ * @try: The number of tries (must be less than 16).
+ *
+ * Compute the combined packet priority. The combined priority is dominated by
+ * the base priority, whereas the number of (re-)tries decides the precedence
+ * of packets with the same base priority, giving higher priority to packets
+ * that already have more tries.
+ *
+ * Return: Returns the computed priority as value fitting inside a &u8. A
+ * higher number means a higher priority.
+ */
+#define SSH_PACKET_PRIORITY(base, try) \
+ __SSH_PACKET_PRIORITY(SSH_PACKET_PRIORITY_##base, (try))
+
+/**
+ * ssh_packet_priority_get_try() - Get number of tries from packet priority.
+ * @priority: The packet priority.
+ *
+ * Return: Returns the number of tries encoded in the specified packet
+ * priority.
+ */
+static inline u8 ssh_packet_priority_get_try(u8 priority)
+{
+ return priority & 0x0f;
+}
+
+/**
+ * ssh_packet_priority_get_base - Get base priority from packet priority.
+ * @priority: The packet priority.
+ *
+ * Return: Returns the base priority encoded in the given packet priority.
+ */
+static inline u8 ssh_packet_priority_get_base(u8 priority)
+{
+ return (priority & 0xf0) >> 4;
+}
+
+enum ssh_packet_flags {
+ /* state flags */
+ SSH_PACKET_SF_LOCKED_BIT,
+ SSH_PACKET_SF_QUEUED_BIT,
+ SSH_PACKET_SF_PENDING_BIT,
+ SSH_PACKET_SF_TRANSMITTING_BIT,
+ SSH_PACKET_SF_TRANSMITTED_BIT,
+ SSH_PACKET_SF_ACKED_BIT,
+ SSH_PACKET_SF_CANCELED_BIT,
+ SSH_PACKET_SF_COMPLETED_BIT,
+
+ /* type flags */
+ SSH_PACKET_TY_FLUSH_BIT,
+ SSH_PACKET_TY_SEQUENCED_BIT,
+ SSH_PACKET_TY_BLOCKING_BIT,
+
+ /* mask for state flags */
+ SSH_PACKET_FLAGS_SF_MASK =
+ BIT(SSH_PACKET_SF_LOCKED_BIT)
+ | BIT(SSH_PACKET_SF_QUEUED_BIT)
+ | BIT(SSH_PACKET_SF_PENDING_BIT)
+ | BIT(SSH_PACKET_SF_TRANSMITTING_BIT)
+ | BIT(SSH_PACKET_SF_TRANSMITTED_BIT)
+ | BIT(SSH_PACKET_SF_ACKED_BIT)
+ | BIT(SSH_PACKET_SF_CANCELED_BIT)
+ | BIT(SSH_PACKET_SF_COMPLETED_BIT),
+
+ /* mask for type flags */
+ SSH_PACKET_FLAGS_TY_MASK =
+ BIT(SSH_PACKET_TY_FLUSH_BIT)
+ | BIT(SSH_PACKET_TY_SEQUENCED_BIT)
+ | BIT(SSH_PACKET_TY_BLOCKING_BIT),
+};
+
+struct ssh_ptl;
+struct ssh_packet;
+
+/**
+ * struct ssh_packet_ops - Callback operations for a SSH packet.
+ * @release: Function called when the packet reference count reaches zero.
+ * This callback must be relied upon to ensure that the packet has
+ * left the transport system(s).
+ * @complete: Function called when the packet is completed, either with
+ * success or failure. In case of failure, the reason for the
+ * failure is indicated by the value of the provided status code
+ * argument. This value will be zero in case of success. Note that
+ * a call to this callback does not guarantee that the packet is
+ * not in use by the transport system any more.
+ */
+struct ssh_packet_ops {
+ void (*release)(struct ssh_packet *p);
+ void (*complete)(struct ssh_packet *p, int status);
+};
+
+/**
+ * struct ssh_packet - SSH transport packet.
+ * @ptl: Pointer to the packet transport layer. May be %NULL if the packet
+ * (or enclosing request) has not been submitted yet.
+ * @refcnt: Reference count of the packet.
+ * @priority: Priority of the packet. Must be computed via
+ * SSH_PACKET_PRIORITY(). Must only be accessed while holding the
+ * queue lock after first submission.
+ * @data: Raw message data.
+ * @data.len: Length of the raw message data.
+ * @data.ptr: Pointer to the raw message data buffer.
+ * @state: State and type flags describing current packet state (dynamic)
+ * and type (static). See &enum ssh_packet_flags for possible
+ * options.
+ * @timestamp: Timestamp specifying when the latest transmission of a
+ * currently pending packet has been started. May be %KTIME_MAX
+ * before or in-between transmission attempts. Used for the packet
+ * timeout implementation. Must only be accessed while holding the
+ * pending lock after first submission.
+ * @queue_node: The list node for the packet queue.
+ * @pending_node: The list node for the set of pending packets.
+ * @ops: Packet operations.
+ */
+struct ssh_packet {
+ struct ssh_ptl *ptl;
+ struct kref refcnt;
+
+ u8 priority;
+
+ struct {
+ size_t len;
+ u8 *ptr;
+ } data;
+
+ unsigned long state;
+ ktime_t timestamp;
+
+ struct list_head queue_node;
+ struct list_head pending_node;
+
+ const struct ssh_packet_ops *ops;
+};
+
+struct ssh_packet *ssh_packet_get(struct ssh_packet *p);
+void ssh_packet_put(struct ssh_packet *p);
+
+/**
+ * ssh_packet_set_data() - Set raw message data of packet.
+ * @p: The packet for which the message data should be set.
+ * @ptr: Pointer to the memory holding the message data.
+ * @len: Length of the message data.
+ *
+ * Sets the raw message data buffer of the packet to the provided memory. The
+ * memory is not copied. Instead, the caller is responsible for management
+ * (i.e. allocation and deallocation) of the memory. The caller must ensure
+ * that the provided memory is valid and contains a valid SSH message,
+ * starting from the time of submission of the packet until the ``release``
+ * callback has been called. During this time, the memory may not be altered
+ * in any way.
+ */
+static inline void ssh_packet_set_data(struct ssh_packet *p, u8 *ptr, size_t len)
+{
+ p->data.ptr = ptr;
+ p->data.len = len;
+}
+
+
+/* -- Request transport layer (rtl). ---------------------------------------- */
+
+enum ssh_request_flags {
+ /* state flags */
+ SSH_REQUEST_SF_LOCKED_BIT,
+ SSH_REQUEST_SF_QUEUED_BIT,
+ SSH_REQUEST_SF_PENDING_BIT,
+ SSH_REQUEST_SF_TRANSMITTING_BIT,
+ SSH_REQUEST_SF_TRANSMITTED_BIT,
+ SSH_REQUEST_SF_RSPRCVD_BIT,
+ SSH_REQUEST_SF_CANCELED_BIT,
+ SSH_REQUEST_SF_COMPLETED_BIT,
+
+ /* type flags */
+ SSH_REQUEST_TY_FLUSH_BIT,
+ SSH_REQUEST_TY_HAS_RESPONSE_BIT,
+
+ /* mask for state flags */
+ SSH_REQUEST_FLAGS_SF_MASK =
+ BIT(SSH_REQUEST_SF_LOCKED_BIT)
+ | BIT(SSH_REQUEST_SF_QUEUED_BIT)
+ | BIT(SSH_REQUEST_SF_PENDING_BIT)
+ | BIT(SSH_REQUEST_SF_TRANSMITTING_BIT)
+ | BIT(SSH_REQUEST_SF_TRANSMITTED_BIT)
+ | BIT(SSH_REQUEST_SF_RSPRCVD_BIT)
+ | BIT(SSH_REQUEST_SF_CANCELED_BIT)
+ | BIT(SSH_REQUEST_SF_COMPLETED_BIT),
+
+ /* mask for type flags */
+ SSH_REQUEST_FLAGS_TY_MASK =
+ BIT(SSH_REQUEST_TY_FLUSH_BIT)
+ | BIT(SSH_REQUEST_TY_HAS_RESPONSE_BIT),
+};
+
+struct ssh_rtl;
+struct ssh_request;
+
+/**
+ * struct ssh_request_ops - Callback operations for a SSH request.
+ * @release: Function called when the request's reference count reaches zero.
+ * This callback must be relied upon to ensure that the request has
+ * left the transport systems (both, packet an request systems).
+ * @complete: Function called when the request is completed, either with
+ * success or failure. The command data for the request response
+ * is provided via the &struct ssh_command parameter (``cmd``),
+ * the command payload of the request response via the &struct
+ * ssh_span parameter (``data``).
+ *
+ * If the request does not have any response or has not been
+ * completed with success, both ``cmd`` and ``data`` parameters will
+ * be NULL. If the request response does not have any command
+ * payload, the ``data`` span will be an empty (zero-length) span.
+ *
+ * In case of failure, the reason for the failure is indicated by
+ * the value of the provided status code argument (``status``). This
+ * value will be zero in case of success and a regular errno
+ * otherwise.
+ *
+ * Note that a call to this callback does not guarantee that the
+ * request is not in use by the transport systems any more.
+ */
+struct ssh_request_ops {
+ void (*release)(struct ssh_request *rqst);
+ void (*complete)(struct ssh_request *rqst,
+ const struct ssh_command *cmd,
+ const struct ssam_span *data, int status);
+};
+
+/**
+ * struct ssh_request - SSH transport request.
+ * @packet: The underlying SSH transport packet.
+ * @node: List node for the request queue and pending set.
+ * @state: State and type flags describing current request state (dynamic)
+ * and type (static). See &enum ssh_request_flags for possible
+ * options.
+ * @timestamp: Timestamp specifying when we start waiting on the response of
+ * the request. This is set once the underlying packet has been
+ * completed and may be %KTIME_MAX before that, or when the request
+ * does not expect a response. Used for the request timeout
+ * implementation.
+ * @ops: Request Operations.
+ */
+struct ssh_request {
+ struct ssh_packet packet;
+ struct list_head node;
+
+ unsigned long state;
+ ktime_t timestamp;
+
+ const struct ssh_request_ops *ops;
+};
+
+/**
+ * to_ssh_request() - Cast a SSH packet to its enclosing SSH request.
+ * @p: The packet to cast.
+ *
+ * Casts the given &struct ssh_packet to its enclosing &struct ssh_request.
+ * The caller is responsible for making sure that the packet is actually
+ * wrapped in a &struct ssh_request.
+ *
+ * Return: Returns the &struct ssh_request wrapping the provided packet.
+ */
+static inline struct ssh_request *to_ssh_request(struct ssh_packet *p)
+{
+ return container_of(p, struct ssh_request, packet);
+}
+
+/**
+ * ssh_request_get() - Increment reference count of request.
+ * @r: The request to increment the reference count of.
+ *
+ * Increments the reference count of the given request by incrementing the
+ * reference count of the underlying &struct ssh_packet, enclosed in it.
+ *
+ * See also ssh_request_put(), ssh_packet_get().
+ *
+ * Return: Returns the request provided as input.
+ */
+static inline struct ssh_request *ssh_request_get(struct ssh_request *r)
+{
+ return r ? to_ssh_request(ssh_packet_get(&r->packet)) : NULL;
+}
+
+/**
+ * ssh_request_put() - Decrement reference count of request.
+ * @r: The request to decrement the reference count of.
+ *
+ * Decrements the reference count of the given request by decrementing the
+ * reference count of the underlying &struct ssh_packet, enclosed in it. If
+ * the reference count reaches zero, the ``release`` callback specified in the
+ * request's &struct ssh_request_ops, i.e. ``r->ops->release``, will be
+ * called.
+ *
+ * See also ssh_request_get(), ssh_packet_put().
+ */
+static inline void ssh_request_put(struct ssh_request *r)
+{
+ if (r)
+ ssh_packet_put(&r->packet);
+}
+
+/**
+ * ssh_request_set_data() - Set raw message data of request.
+ * @r: The request for which the message data should be set.
+ * @ptr: Pointer to the memory holding the message data.
+ * @len: Length of the message data.
+ *
+ * Sets the raw message data buffer of the underlying packet to the specified
+ * buffer. Does not copy the actual message data, just sets the buffer pointer
+ * and length. Refer to ssh_packet_set_data() for more details.
+ */
+static inline void ssh_request_set_data(struct ssh_request *r, u8 *ptr, size_t len)
+{
+ ssh_packet_set_data(&r->packet, ptr, len);
+}
+
+#endif /* _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 596bc2f4d9b0..32f665b1ee85 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -356,7 +356,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page,
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
+extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
@@ -408,7 +408,11 @@ extern struct address_space *swapper_spaces[];
#define swap_address_space(entry) \
(&swapper_spaces[swp_type(entry)][swp_offset(entry) \
>> SWAP_ADDRESS_SPACE_SHIFT])
-extern unsigned long total_swapcache_pages(void);
+static inline unsigned long total_swapcache_pages(void)
+{
+ return global_node_page_state(NR_SWAPCACHE);
+}
+
extern void show_swap_cache_info(void);
extern int add_to_swap(struct page *page);
extern void *get_shadow_from_swap_cache(swp_entry_t entry);
@@ -468,7 +472,6 @@ extern int free_swap_and_cache(swp_entry_t);
int swap_type_of(dev_t device, sector_t offset);
int find_first_swap(dev_t *device);
extern unsigned int count_swap_pages(int, int);
-extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
extern int page_swapcount(struct page *);
extern int __swap_count(swp_entry_t entry);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index d9c9fc9ca5d2..5857a937c637 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -29,6 +29,7 @@ enum swiotlb_force {
* controllable.
*/
#define IO_TLB_SHIFT 11
+#define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
/* default to 64MB */
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 7688bc983de5..2839dc9a7c01 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -68,6 +68,7 @@ union bpf_attr;
struct io_uring_params;
struct clone_args;
struct open_how;
+struct mount_attr;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -607,11 +608,11 @@ asmlinkage long sys_unshare(unsigned long unshare_flags);
/* kernel/futex.c */
asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
- struct __kernel_timespec __user *utime, u32 __user *uaddr2,
- u32 val3);
+ const struct __kernel_timespec __user *utime,
+ u32 __user *uaddr2, u32 val3);
asmlinkage long sys_futex_time32(u32 __user *uaddr, int op, u32 val,
- struct old_timespec32 __user *utime, u32 __user *uaddr2,
- u32 val3);
+ const struct old_timespec32 __user *utime,
+ u32 __user *uaddr2, u32 val3);
asmlinkage long sys_get_robust_list(int pid,
struct robust_list_head __user * __user *head_ptr,
size_t __user *len_ptr);
@@ -1028,6 +1029,9 @@ asmlinkage long sys_open_tree(int dfd, const char __user *path, unsigned flags);
asmlinkage long sys_move_mount(int from_dfd, const char __user *from_path,
int to_dfd, const char __user *to_path,
unsigned int ms_flags);
+asmlinkage long sys_mount_setattr(int dfd, const char __user *path,
+ unsigned int flags,
+ struct mount_attr __user *uattr, size_t usize);
asmlinkage long sys_fsopen(const char __user *fs_name, unsigned int flags);
asmlinkage long sys_fsconfig(int fs_fd, unsigned int cmd, const char __user *key,
const void __user *value, int aux);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 2caa34c1ca1a..d76a1ddf83a3 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -164,11 +164,13 @@ __ATTRIBUTE_GROUPS(_name)
struct file;
struct vm_area_struct;
+struct address_space;
struct bin_attribute {
struct attribute attr;
size_t size;
void *private;
+ struct address_space *mapping;
ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *,
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 2f87377e9af7..48d8a363319e 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -496,7 +496,8 @@ static inline u32 tcp_saved_syn_len(const struct saved_syn *saved_syn)
}
struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
- const struct sk_buff *orig_skb);
+ const struct sk_buff *orig_skb,
+ const struct sk_buff *ack_skb);
static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
{
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index cdd049a724b1..54269e47ac9a 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -88,7 +88,7 @@ struct tee_param {
* @close_session: close a session
* @invoke_func: invoke a trusted function
* @cancel_req: request cancel of an ongoing invoke or open
- * @supp_revc: called for supplicant to get a command
+ * @supp_recv: called for supplicant to get a command
* @supp_send: called for supplicant to send a response
* @shm_register: register shared memory buffer in TEE
* @shm_unregister: unregister shared memory buffer in TEE
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 31b84404f047..6ac7bb1d2b1f 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -17,7 +17,6 @@
#include <linux/workqueue.h>
#include <uapi/linux/thermal.h>
-#define THERMAL_TRIPS_NONE -1
#define THERMAL_MAX_TRIPS 12
/* invalid cooling state */
@@ -77,8 +76,6 @@ struct thermal_zone_device_ops {
int (*set_emul_temp) (struct thermal_zone_device *, int);
int (*get_trend) (struct thermal_zone_device *, int,
enum thermal_trend *);
- int (*notify) (struct thermal_zone_device *, int,
- enum thermal_trip_type);
void (*hot)(struct thermal_zone_device *);
void (*critical)(struct thermal_zone_device *);
};
@@ -118,9 +115,9 @@ struct thermal_cooling_device {
* @devdata: private pointer for device private data
* @trips: number of trip points the thermal zone supports
* @trips_disabled; bitmap for disabled trips
- * @passive_delay: number of milliseconds to wait between polls when
+ * @passive_delay_jiffies: number of jiffies to wait between polls when
* performing passive cooling.
- * @polling_delay: number of milliseconds to wait between polls when
+ * @polling_delay_jiffies: number of jiffies to wait between polls when
* checking whether trip points have been crossed (0 for
* interrupt driven systems)
* @temperature: current temperature. This is only for core code,
@@ -133,9 +130,6 @@ struct thermal_cooling_device {
trip point.
* @prev_high_trip: the above current temperature if you've crossed a
passive trip point.
- * @forced_passive: If > 0, temperature at which to switch on all ACPI
- * processor cooling devices. Currently only used by the
- * step-wise governor.
* @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
* @ops: operations this &thermal_zone_device supports
* @tzp: thermal zone parameters
@@ -161,15 +155,14 @@ struct thermal_zone_device {
void *devdata;
int trips;
unsigned long trips_disabled; /* bitmap for disabled trips */
- int passive_delay;
- int polling_delay;
+ unsigned long passive_delay_jiffies;
+ unsigned long polling_delay_jiffies;
int temperature;
int last_temperature;
int emul_temperature;
int passive;
int prev_low_trip;
int prev_high_trip;
- unsigned int forced_passive;
atomic_t need_update;
struct thermal_zone_device_ops *ops;
struct thermal_zone_params *tzp;
@@ -397,7 +390,6 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
int thermal_zone_get_slope(struct thermal_zone_device *tz);
int thermal_zone_get_offset(struct thermal_zone_device *tz);
-void thermal_cdev_update(struct thermal_cooling_device *);
void thermal_notify_framework(struct thermal_zone_device *, int);
int thermal_zone_device_enable(struct thermal_zone_device *tz);
int thermal_zone_device_disable(struct thermal_zone_device *tz);
@@ -444,8 +436,6 @@ static inline int thermal_zone_get_offset(
struct thermal_zone_device *tz)
{ return -ENODEV; }
-static inline void thermal_cdev_update(struct thermal_cooling_device *cdev)
-{ }
static inline void thermal_notify_framework(struct thermal_zone_device *tz,
int trip)
{ }
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index 034dccf93955..659a0a810fa1 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -45,6 +45,8 @@ enum tb_cfg_pkg_type {
* @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected
* Thunderbolt dock (and Display Port). All PCIe
* links downstream of the dock are removed.
+ * @TB_SECURITY_NOPCIE: For USB4 systems this level is used when the
+ * PCIe tunneling is disabled from the BIOS.
*/
enum tb_security_level {
TB_SECURITY_NONE,
@@ -52,6 +54,7 @@ enum tb_security_level {
TB_SECURITY_SECURE,
TB_SECURITY_DPONLY,
TB_SECURITY_USBONLY,
+ TB_SECURITY_NOPCIE,
};
/**
diff --git a/include/linux/timer.h b/include/linux/timer.h
index fda13c9d1256..4118a97e62fb 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -192,6 +192,8 @@ extern int try_to_del_timer_sync(struct timer_list *timer);
#define del_singleshot_timer_sync(t) del_timer_sync(t)
+extern bool timer_curr_running(struct timer_list *timer);
+
extern void init_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index ad03df1cc266..7634cd737061 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -48,6 +48,7 @@ int arch_update_cpu_topology(void);
/* Conform to ACPI 2.0 SLIT distance definitions */
#define LOCAL_DISTANCE 10
#define REMOTE_DISTANCE 20
+#define DISTANCE_BITS 8
#ifndef node_distance
#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
#endif
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 7f65bd1dd307..0910c5803f35 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -32,11 +32,27 @@
#define TOROUT_STRING(s) \
pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s)
#define VERBOSE_TOROUT_STRING(s) \
- do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0)
+do { \
+ if (verbose) { \
+ verbose_torout_sleep(); \
+ pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); \
+ } \
+} while (0)
#define VERBOSE_TOROUT_ERRSTRING(s) \
- do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0)
+do { \
+ if (verbose) { \
+ verbose_torout_sleep(); \
+ pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); \
+ } \
+} while (0)
+void verbose_torout_sleep(void);
/* Definitions for online/offline exerciser. */
+#ifdef CONFIG_HOTPLUG_CPU
+int torture_num_online_cpus(void);
+#else /* #ifdef CONFIG_HOTPLUG_CPU */
+static inline int torture_num_online_cpus(void) { return 1; }
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
typedef void torture_ofl_func(void);
bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes,
unsigned long *sum_offl, int *min_onl, int *max_onl);
@@ -61,6 +77,13 @@ static inline void torture_random_init(struct torture_random_state *trsp)
trsp->trs_count = 0;
}
+/* Definitions for high-resolution-timer sleeps. */
+int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp);
+int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp);
+int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp);
+int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp);
+int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *trsp);
+
/* Task shuffler, which causes CPUs to occasionally go idle. */
void torture_shuffle_task_register(struct task_struct *tp);
int torture_shuffle_init(long shuffint);
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 8f4ff39f51e7..543aa3b1dedc 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -31,6 +31,7 @@ struct tpm_chip;
struct trusted_key_payload;
struct trusted_key_options;
+/* if you add a new hash to this, increment TPM_MAX_HASHES below */
enum tpm_algorithms {
TPM_ALG_ERROR = 0x0000,
TPM_ALG_SHA1 = 0x0004,
@@ -42,6 +43,12 @@ enum tpm_algorithms {
TPM_ALG_SM3_256 = 0x0012,
};
+/*
+ * maximum number of hashing algorithms a TPM can have. This is
+ * basically a count of every hash in tpm_algorithms above
+ */
+#define TPM_MAX_HASHES 5
+
struct tpm_digest {
u16 alg_id;
u8 digest[TPM_MAX_DIGEST_SIZE];
@@ -146,7 +153,7 @@ struct tpm_chip {
struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES];
- const struct attribute_group *groups[3];
+ const struct attribute_group *groups[3 + TPM_MAX_HASHES];
unsigned int groups_cnt;
u32 nr_allocated_banks;
@@ -397,6 +404,10 @@ static inline u32 tpm2_rc_value(u32 rc)
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
extern int tpm_is_tpm2(struct tpm_chip *chip);
+extern __must_check int tpm_try_get_ops(struct tpm_chip *chip);
+extern void tpm_put_ops(struct tpm_chip *chip);
+extern ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf,
+ size_t min_rsp_body_length, const char *desc);
extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digest);
extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
@@ -410,7 +421,6 @@ static inline int tpm_is_tpm2(struct tpm_chip *chip)
{
return -ENODEV;
}
-
static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx,
struct tpm_digest *digest)
{
diff --git a/include/linux/trace.h b/include/linux/trace.h
index 886a4ffd9d45..be1e130ed87c 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -34,8 +34,9 @@ int unregister_ftrace_export(struct trace_export *export);
struct trace_array;
void trace_printk_init_buffers(void);
+__printf(3, 4)
int trace_array_printk(struct trace_array *tr, unsigned long ip,
- const char *fmt, ...);
+ const char *fmt, ...);
int trace_array_init_printk(struct trace_array *tr);
void trace_array_put(struct trace_array *tr);
struct trace_array *trace_array_get_by_name(const char *name);
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index d321fe5ad1a1..7077fec653bb 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -55,6 +55,8 @@ struct trace_event;
int trace_raw_output_prep(struct trace_iterator *iter,
struct trace_event *event);
+extern __printf(2, 3)
+void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
/*
* The trace entry - the most basic unit of tracing. This is what
@@ -87,6 +89,8 @@ struct trace_iterator {
unsigned long iter_flags;
void *temp; /* temp holder */
unsigned int temp_size;
+ char *fmt; /* modified format holder */
+ unsigned int fmt_size;
/* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq;
@@ -148,17 +152,75 @@ enum print_line_t {
enum print_line_t trace_handle_return(struct trace_seq *s);
-void tracing_generic_entry_update(struct trace_entry *entry,
- unsigned short type,
- unsigned long flags,
- int pc);
+static inline void tracing_generic_entry_update(struct trace_entry *entry,
+ unsigned short type,
+ unsigned int trace_ctx)
+{
+ entry->preempt_count = trace_ctx & 0xff;
+ entry->pid = current->pid;
+ entry->type = type;
+ entry->flags = trace_ctx >> 16;
+}
+
+unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
+
+enum trace_flag_type {
+ TRACE_FLAG_IRQS_OFF = 0x01,
+ TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
+ TRACE_FLAG_NEED_RESCHED = 0x04,
+ TRACE_FLAG_HARDIRQ = 0x08,
+ TRACE_FLAG_SOFTIRQ = 0x10,
+ TRACE_FLAG_PREEMPT_RESCHED = 0x20,
+ TRACE_FLAG_NMI = 0x40,
+};
+
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
+{
+ unsigned int irq_status = irqs_disabled_flags(irqflags) ?
+ TRACE_FLAG_IRQS_OFF : 0;
+ return tracing_gen_ctx_irq_test(irq_status);
+}
+static inline unsigned int tracing_gen_ctx(void)
+{
+ unsigned long irqflags;
+
+ local_save_flags(irqflags);
+ return tracing_gen_ctx_flags(irqflags);
+}
+#else
+
+static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
+{
+ return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
+}
+static inline unsigned int tracing_gen_ctx(void)
+{
+ return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
+}
+#endif
+
+static inline unsigned int tracing_gen_ctx_dec(void)
+{
+ unsigned int trace_ctx;
+
+ trace_ctx = tracing_gen_ctx();
+ /*
+ * Subtract one from the preeption counter if preemption is enabled,
+ * see trace_event_buffer_reserve()for details.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPTION))
+ trace_ctx--;
+ return trace_ctx;
+}
+
struct trace_event_file;
struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
struct trace_event_file *trace_file,
int type, unsigned long len,
- unsigned long flags, int pc);
+ unsigned int trace_ctx);
#define TRACE_RECORD_CMDLINE BIT(0)
#define TRACE_RECORD_TGID BIT(1)
@@ -232,8 +294,7 @@ struct trace_event_buffer {
struct ring_buffer_event *event;
struct trace_event_file *trace_file;
void *entry;
- unsigned long flags;
- int pc;
+ unsigned int trace_ctx;
struct pt_regs *regs;
};
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 966ed8980327..9cfb099da58f 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -152,25 +152,28 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#ifdef TRACEPOINTS_ENABLED
#ifdef CONFIG_HAVE_STATIC_CALL
-#define __DO_TRACE_CALL(name) static_call(tp_func_##name)
+#define __DO_TRACE_CALL(name, args) \
+ do { \
+ struct tracepoint_func *it_func_ptr; \
+ void *__data; \
+ it_func_ptr = \
+ rcu_dereference_raw((&__tracepoint_##name)->funcs); \
+ if (it_func_ptr) { \
+ __data = (it_func_ptr)->data; \
+ static_call(tp_func_##name)(__data, args); \
+ } \
+ } while (0)
#else
-#define __DO_TRACE_CALL(name) __traceiter_##name
+#define __DO_TRACE_CALL(name, args) __traceiter_##name(NULL, args)
#endif /* CONFIG_HAVE_STATIC_CALL */
/*
* it_func[0] is never NULL because there is at least one element in the array
* when the array itself is non NULL.
- *
- * Note, the proto and args passed in includes "__data" as the first parameter.
- * The reason for this is to handle the "void" prototype. If a tracepoint
- * has a "void" prototype, then it is invalid to declare a function
- * as "(void *, void)".
*/
-#define __DO_TRACE(name, proto, args, cond, rcuidle) \
+#define __DO_TRACE(name, args, cond, rcuidle) \
do { \
- struct tracepoint_func *it_func_ptr; \
int __maybe_unused __idx = 0; \
- void *__data; \
\
if (!(cond)) \
return; \
@@ -190,12 +193,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
rcu_irq_enter_irqson(); \
} \
\
- it_func_ptr = \
- rcu_dereference_raw((&__tracepoint_##name)->funcs); \
- if (it_func_ptr) { \
- __data = (it_func_ptr)->data; \
- __DO_TRACE_CALL(name)(args); \
- } \
+ __DO_TRACE_CALL(name, TP_ARGS(args)); \
\
if (rcuidle) { \
rcu_irq_exit_irqson(); \
@@ -206,17 +204,16 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
} while (0)
#ifndef MODULE
-#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \
+#define __DECLARE_TRACE_RCU(name, proto, args, cond) \
static inline void trace_##name##_rcuidle(proto) \
{ \
if (static_key_false(&__tracepoint_##name.key)) \
__DO_TRACE(name, \
- TP_PROTO(data_proto), \
- TP_ARGS(data_args), \
+ TP_ARGS(args), \
TP_CONDITION(cond), 1); \
}
#else
-#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
+#define __DECLARE_TRACE_RCU(name, proto, args, cond)
#endif
/*
@@ -231,7 +228,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* even when this tracepoint is off. This code has no purpose other than
* poking RCU a bit.
*/
-#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
extern int __traceiter_##name(data_proto); \
DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \
extern struct tracepoint __tracepoint_##name; \
@@ -239,8 +236,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
{ \
if (static_key_false(&__tracepoint_##name.key)) \
__DO_TRACE(name, \
- TP_PROTO(data_proto), \
- TP_ARGS(data_args), \
+ TP_ARGS(args), \
TP_CONDITION(cond), 0); \
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
rcu_read_lock_sched_notrace(); \
@@ -249,7 +245,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
} \
} \
__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
- PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \
+ PARAMS(cond)) \
static inline int \
register_trace_##name(void (*probe)(data_proto), void *data) \
{ \
@@ -309,7 +305,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
rcu_dereference_raw((&__tracepoint_##_name)->funcs); \
if (it_func_ptr) { \
do { \
- it_func = (it_func_ptr)->func; \
+ it_func = READ_ONCE((it_func_ptr)->func); \
__data = (it_func_ptr)->data; \
((void(*)(void *, proto))(it_func))(__data, args); \
} while ((++it_func_ptr)->func); \
@@ -332,7 +328,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#else /* !TRACEPOINTS_ENABLED */
-#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
static inline void trace_##name(proto) \
{ } \
static inline void trace_##name##_rcuidle(proto) \
@@ -412,14 +408,12 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define DECLARE_TRACE(name, proto, args) \
__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
cpu_online(raw_smp_processor_id()), \
- PARAMS(void *__data, proto), \
- PARAMS(__data, args))
+ PARAMS(void *__data, proto))
#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
- PARAMS(void *__data, proto), \
- PARAMS(__data, args))
+ PARAMS(void *__data, proto))
#define TRACE_EVENT_FLAGS(event, flag)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 37803f3e6d49..95fc2f100f12 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -240,8 +240,7 @@ struct tty_port {
wait_queue_head_t delta_msr_wait; /* Modem status change */
unsigned long flags; /* User TTY flags ASYNC_ */
unsigned long iflags; /* Internal flags TTY_PORT_ */
- unsigned char console:1, /* port is a console */
- low_latency:1; /* optional: tune for latency */
+ unsigned char console:1; /* port is a console */
struct mutex mutex; /* Locking */
struct mutex buf_mutex; /* Buffer alloc lock */
unsigned char *xmit_buf; /* Optional buffer */
@@ -416,12 +415,14 @@ extern struct tty_struct *get_current_tty(void);
/* tty_io.c */
extern int __init tty_init(void);
extern const char *tty_name(const struct tty_struct *tty);
-extern struct tty_struct *tty_kopen(dev_t device);
+extern struct tty_struct *tty_kopen_exclusive(dev_t device);
+extern struct tty_struct *tty_kopen_shared(dev_t device);
extern void tty_kclose(struct tty_struct *tty);
extern int tty_dev_name_to_number(const char *name, dev_t *number);
extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
extern void tty_ldisc_unlock(struct tty_struct *tty);
extern ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *);
+extern struct file *tty_release_redirect(struct tty_struct *tty);
#else
static inline void tty_kref_put(struct tty_struct *tty)
{ }
@@ -442,7 +443,7 @@ static inline int __init tty_init(void)
{ return 0; }
static inline const char *tty_name(const struct tty_struct *tty)
{ return "(none)"; }
-static inline struct tty_struct *tty_kopen(dev_t device)
+static inline struct tty_struct *tty_kopen_exclusive(dev_t device)
{ return ERR_PTR(-ENODEV); }
static inline void tty_kclose(struct tty_struct *tty)
{ }
@@ -500,6 +501,8 @@ extern void tty_unthrottle(struct tty_struct *tty);
extern int tty_throttle_safe(struct tty_struct *tty);
extern int tty_unthrottle_safe(struct tty_struct *tty);
extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
+extern int tty_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount);
extern int is_current_pgrp_orphaned(void);
extern void tty_hangup(struct tty_struct *tty);
extern void tty_vhangup(struct tty_struct *tty);
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index b1e6043e9917..572a07976116 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -185,7 +185,8 @@ struct tty_ldisc_ops {
void (*close)(struct tty_struct *);
void (*flush_buffer)(struct tty_struct *tty);
ssize_t (*read)(struct tty_struct *tty, struct file *file,
- unsigned char __user *buf, size_t nr);
+ unsigned char *buf, size_t nr,
+ void **cookie, unsigned long offset);
ssize_t (*write)(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr);
int (*ioctl)(struct tty_struct *tty, struct file *file,
diff --git a/include/linux/types.h b/include/linux/types.h
index a147977602b5..ac825ad90e44 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -14,7 +14,7 @@ typedef u32 __kernel_dev_t;
typedef __kernel_fd_set fd_set;
typedef __kernel_dev_t dev_t;
-typedef __kernel_ino_t ino_t;
+typedef __kernel_ulong_t ino_t;
typedef __kernel_mode_t mode_t;
typedef unsigned short umode_t;
typedef u32 nlink_t;
@@ -189,7 +189,11 @@ struct hlist_node {
struct ustat {
__kernel_daddr_t f_tfree;
- __kernel_ino_t f_tinode;
+#ifdef CONFIG_ARCH_32BIT_USTAT_F_TINODE
+ unsigned int f_tinode;
+#else
+ unsigned long f_tinode;
+#endif
char f_fname[6];
char f_fpack[6];
};
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 72d88566694e..27ff8eb786dc 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -260,7 +260,13 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
i->count = count;
}
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
+
+struct csum_state {
+ __wsum csum;
+ size_t off;
+};
+
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
diff --git a/include/linux/units.h b/include/linux/units.h
index 5c115c809507..dcc30a53fa93 100644
--- a/include/linux/units.h
+++ b/include/linux/units.h
@@ -4,6 +4,10 @@
#include <linux/math.h>
+#define MILLIWATT_PER_WATT 1000L
+#define MICROWATT_PER_MILLIWATT 1000L
+#define MICROWATT_PER_WATT 1000000L
+
#define ABSOLUTE_ZERO_MILLICELSIUS -273150
static inline long milli_kelvin_to_millicelsius(long t)
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 0ce4377545f8..f7cb3ddce7fb 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -98,6 +98,8 @@ struct cdc_ncm_ctx {
struct hrtimer tx_timer;
struct tasklet_struct bh;
+ struct usbnet *dev;
+
const struct usb_cdc_ncm_desc *func_desc;
const struct usb_cdc_mbim_desc *mbim_desc;
const struct usb_cdc_mbim_extended_desc *mbim_extended_desc;
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 604c6c514a50..abdd310c77f0 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -36,6 +36,15 @@
#include <linux/device.h>
#include <uapi/linux/usb/ch9.h>
+/* USB 3.2 SuperSpeed Plus phy signaling rate generation and lane count */
+
+enum usb_ssp_rate {
+ USB_SSP_GEN_UNKNOWN = 0,
+ USB_SSP_GEN_2x1,
+ USB_SSP_GEN_1x2,
+ USB_SSP_GEN_2x2,
+};
+
/**
* usb_ep_type_string() - Returns human readable-name of the endpoint type.
* @ep_type: The endpoint type to return human-readable name for. If it's not
@@ -63,6 +72,17 @@ extern const char *usb_speed_string(enum usb_device_speed speed);
extern enum usb_device_speed usb_get_maximum_speed(struct device *dev);
/**
+ * usb_get_maximum_ssp_rate - Get the signaling rate generation and lane count
+ * of a SuperSpeed Plus capable device.
+ * @dev: Pointer to the given USB controller device
+ *
+ * If the string from "maximum-speed" property is super-speed-plus-genXxY where
+ * 'X' is the generation number and 'Y' is the number of lanes, then this
+ * function returns the corresponding enum usb_ssp_rate.
+ */
+extern enum usb_ssp_rate usb_get_maximum_ssp_rate(struct device *dev);
+
+/**
* usb_state_string - Returns human readable name for the state.
* @state: The state to return a human-readable name for. If it's not
* any of the states devices in usb_device_state_string enum,
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 025b41687ce9..edf3342507f1 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -88,6 +88,12 @@ struct ci_hdrc_platform_data {
struct pinctrl_state *pins_default;
struct pinctrl_state *pins_host;
struct pinctrl_state *pins_device;
+
+ /* platform-specific hooks */
+ int (*hub_control)(struct ci_hdrc *ci, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength,
+ bool *done, unsigned long *flags);
+ void (*enter_lpm)(struct ci_hdrc *ci, bool enable);
};
/* Default offset of capability registers */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index a2d229ab63ba..c71150f2c639 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -525,6 +525,8 @@ extern struct usb_string *usb_gstrings_attach(struct usb_composite_dev *cdev,
extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n);
extern void composite_disconnect(struct usb_gadget *gadget);
+extern void composite_reset(struct usb_gadget *gadget);
+
extern int composite_setup(struct usb_gadget *gadget,
const struct usb_ctrlrequest *ctrl);
extern void composite_suspend(struct usb_gadget *gadget);
@@ -573,8 +575,8 @@ static inline u16 get_default_bcdDevice(void)
{
u16 bcdDevice;
- bcdDevice = bin2bcd((LINUX_VERSION_CODE >> 16 & 0xff)) << 8;
- bcdDevice |= bin2bcd((LINUX_VERSION_CODE >> 8 & 0xff));
+ bcdDevice = bin2bcd(LINUX_VERSION_MAJOR) << 8;
+ bcdDevice |= bin2bcd(LINUX_VERSION_PATCHLEVEL);
return bcdDevice;
}
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index e7351d64f11f..ee04ef214ce8 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -323,6 +323,8 @@ struct usb_gadget_ops {
struct usb_gadget_driver *);
int (*udc_stop)(struct usb_gadget *);
void (*udc_set_speed)(struct usb_gadget *, enum usb_device_speed);
+ void (*udc_set_ssp_rate)(struct usb_gadget *gadget,
+ enum usb_ssp_rate rate);
struct usb_ep *(*match_ep)(struct usb_gadget *,
struct usb_endpoint_descriptor *,
struct usb_ss_ep_comp_descriptor *);
@@ -339,6 +341,10 @@ struct usb_gadget_ops {
* @speed: Speed of current connection to USB host.
* @max_speed: Maximal speed the UDC can handle. UDC must support this
* and all slower speeds.
+ * @ssp_rate: Current connected SuperSpeed Plus signaling rate and lane count.
+ * @max_ssp_rate: Maximum SuperSpeed Plus signaling rate and lane count the UDC
+ * can handle. The UDC must support this and all slower speeds and lower
+ * number of lanes.
* @state: the state we are now (attached, suspended, configured, etc)
* @name: Identifies the controller hardware type. Used in diagnostics
* and sometimes configuration.
@@ -406,6 +412,11 @@ struct usb_gadget {
struct list_head ep_list; /* of usb_ep */
enum usb_device_speed speed;
enum usb_device_speed max_speed;
+
+ /* USB SuperSpeed Plus only */
+ enum usb_ssp_rate ssp_rate;
+ enum usb_ssp_rate max_ssp_rate;
+
enum usb_device_state state;
const char *name;
struct device dev;
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index bb9a782e1411..70d681918d01 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -225,6 +225,7 @@ enum pd_pdo_type {
#define PDO_FIXED_EXTPOWER BIT(27) /* Externally powered */
#define PDO_FIXED_USB_COMM BIT(26) /* USB communications capable */
#define PDO_FIXED_DATA_SWAP BIT(25) /* Data role swap supported */
+#define PDO_FIXED_UNCHUNK_EXT BIT(24) /* Unchunked Extended Message supported (Source) */
#define PDO_FIXED_FRS_CURR_MASK (BIT(24) | BIT(23)) /* FR_Swap Current (Sink) */
#define PDO_FIXED_FRS_CURR_SHIFT 23
#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */
@@ -479,6 +480,8 @@ static inline unsigned int rdo_max_power(u32 rdo)
#define PD_T_NEWSRC 250 /* Maximum of 275ms */
#define PD_T_SWAP_SRC_START 20 /* Minimum of 20ms */
#define PD_T_BIST_CONT_MODE 50 /* 30 - 60 ms */
+#define PD_T_SINK_TX 16 /* 16 - 20 ms */
+#define PD_T_CHUNK_NOT_SUPP 42 /* 40 - 50 ms */
#define PD_T_DRP_TRY 100 /* 75 - 150 ms */
#define PD_T_DRP_TRYWAIT 600 /* 400 - 800 ms */
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
index 8c08eeb9a74b..b057250704e8 100644
--- a/include/linux/usb/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
@@ -21,22 +21,24 @@
* ----------
* <31:16> :: SVID
* <15> :: VDM type ( 1b == structured, 0b == unstructured )
- * <14:13> :: Structured VDM version (can only be 00 == 1.0 currently)
+ * <14:13> :: Structured VDM version
* <12:11> :: reserved
* <10:8> :: object position (1-7 valid ... used for enter/exit mode only)
* <7:6> :: command type (SVDM only?)
* <5> :: reserved (SVDM), command type (UVDM)
* <4:0> :: command
*/
-#define VDO(vid, type, custom) \
+#define VDO(vid, type, ver, custom) \
(((vid) << 16) | \
((type) << 15) | \
+ ((ver) << 13) | \
((custom) & 0x7FFF))
#define VDO_SVDM_TYPE (1 << 15)
#define VDO_SVDM_VERS(x) ((x) << 13)
#define VDO_OPOS(x) ((x) << 8)
#define VDO_CMDT(x) ((x) << 6)
+#define VDO_SVDM_VERS_MASK VDO_SVDM_VERS(0x3)
#define VDO_OPOS_MASK VDO_OPOS(0x7)
#define VDO_CMDT_MASK VDO_CMDT(0x3)
@@ -74,6 +76,7 @@
#define PD_VDO_VID(vdo) ((vdo) >> 16)
#define PD_VDO_SVDM(vdo) (((vdo) >> 15) & 1)
+#define PD_VDO_SVDM_VER(vdo) (((vdo) >> 13) & 0x3)
#define PD_VDO_OPOS(vdo) (((vdo) >> 8) & 0x7)
#define PD_VDO_CMD(vdo) ((vdo) & 0x1f)
#define PD_VDO_CMDT(vdo) (((vdo) >> 6) & 0x3)
@@ -103,34 +106,50 @@
* --------------------
* <31> :: data capable as a USB host
* <30> :: data capable as a USB device
- * <29:27> :: product type (UFP / Cable)
+ * <29:27> :: product type (UFP / Cable / VPD)
* <26> :: modal operation supported (1b == yes)
- * <25:16> :: product type (DFP)
+ * <25:23> :: product type (DFP) (SVDM version 2.0+ only; set to zero in version 1.0)
+ * <22:21> :: connector type (SVDM version 2.0+ only; set to zero in version 1.0)
+ * <20:16> :: Reserved, Shall be set to zero
* <15:0> :: USB-IF assigned VID for this cable vendor
*/
+
+/* PD Rev2.0 definition */
#define IDH_PTYPE_UNDEF 0
+
+/* SOP Product Type (UFP) */
+#define IDH_PTYPE_NOT_UFP 0
#define IDH_PTYPE_HUB 1
#define IDH_PTYPE_PERIPH 2
#define IDH_PTYPE_PSD 3
#define IDH_PTYPE_AMA 5
+/* SOP' Product Type (Cable Plug / VPD) */
+#define IDH_PTYPE_NOT_CABLE 0
#define IDH_PTYPE_PCABLE 3
#define IDH_PTYPE_ACABLE 4
+#define IDH_PTYPE_VPD 6
-#define IDH_PTYPE_DFP_UNDEF 0
+/* SOP Product Type (DFP) */
+#define IDH_PTYPE_NOT_DFP 0
#define IDH_PTYPE_DFP_HUB 1
#define IDH_PTYPE_DFP_HOST 2
#define IDH_PTYPE_DFP_PB 3
-#define IDH_PTYPE_DFP_AMC 4
-#define VDO_IDH(usbh, usbd, ptype, is_modal, vid) \
- ((usbh) << 31 | (usbd) << 30 | ((ptype) & 0x7) << 27 \
- | (is_modal) << 26 | ((vid) & 0xffff))
+/* ID Header Mask */
+#define IDH_DFP_MASK GENMASK(25, 23)
+#define IDH_CONN_MASK GENMASK(22, 21)
+
+#define VDO_IDH(usbh, usbd, ufp_cable, is_modal, dfp, conn, vid) \
+ ((usbh) << 31 | (usbd) << 30 | ((ufp_cable) & 0x7) << 27 \
+ | (is_modal) << 26 | ((dfp) & 0x7) << 23 | ((conn) & 0x3) << 21 \
+ | ((vid) & 0xffff))
#define PD_IDH_PTYPE(vdo) (((vdo) >> 27) & 0x7)
#define PD_IDH_VID(vdo) ((vdo) & 0xffff)
#define PD_IDH_MODAL_SUPP(vdo) ((vdo) & (1 << 26))
#define PD_IDH_DFP_PTYPE(vdo) (((vdo) >> 23) & 0x7)
+#define PD_IDH_CONN_TYPE(vdo) (((vdo) >> 21) & 0x3)
/*
* Cert Stat VDO
@@ -138,6 +157,7 @@
* <31:0> : USB-IF assigned XID for this cable
*/
#define PD_CSTAT_XID(vdo) (vdo)
+#define VDO_CERT(xid) ((xid) & 0xffffffff)
/*
* Product VDO
@@ -149,79 +169,271 @@
#define PD_PRODUCT_PID(vdo) (((vdo) >> 16) & 0xffff)
/*
- * UFP VDO1
+ * UFP VDO (PD Revision 3.0+ only)
* --------
* <31:29> :: UFP VDO version
* <28> :: Reserved
* <27:24> :: Device capability
- * <23:6> :: Reserved
+ * <23:22> :: Connector type (10b == receptacle, 11b == captive plug)
+ * <21:11> :: Reserved
+ * <10:8> :: Vconn power (AMA only)
+ * <7> :: Vconn required (AMA only, 0b == no, 1b == yes)
+ * <6> :: Vbus required (AMA only, 0b == yes, 1b == no)
* <5:3> :: Alternate modes
* <2:0> :: USB highest speed
*/
-#define PD_VDO1_UFP_DEVCAP(vdo) (((vdo) & GENMASK(27, 24)) >> 24)
+#define PD_VDO_UFP_DEVCAP(vdo) (((vdo) & GENMASK(27, 24)) >> 24)
+
+/* UFP VDO Version */
+#define UFP_VDO_VER1_2 2
+/* Device Capability */
#define DEV_USB2_CAPABLE BIT(0)
#define DEV_USB2_BILLBOARD BIT(1)
#define DEV_USB3_CAPABLE BIT(2)
#define DEV_USB4_CAPABLE BIT(3)
+/* Connector Type */
+#define UFP_RECEPTACLE 2
+#define UFP_CAPTIVE 3
+
+/* Vconn Power (AMA only, set to AMA_VCONN_NOT_REQ if Vconn is not required) */
+#define AMA_VCONN_PWR_1W 0
+#define AMA_VCONN_PWR_1W5 1
+#define AMA_VCONN_PWR_2W 2
+#define AMA_VCONN_PWR_3W 3
+#define AMA_VCONN_PWR_4W 4
+#define AMA_VCONN_PWR_5W 5
+#define AMA_VCONN_PWR_6W 6
+
+/* Vconn Required (AMA only) */
+#define AMA_VCONN_NOT_REQ 0
+#define AMA_VCONN_REQ 1
+
+/* Vbus Required (AMA only) */
+#define AMA_VBUS_REQ 0
+#define AMA_VBUS_NOT_REQ 1
+
+/* Alternate Modes */
+#define UFP_ALTMODE_NOT_SUPP 0
+#define UFP_ALTMODE_TBT3 BIT(0)
+#define UFP_ALTMODE_RECFG BIT(1)
+#define UFP_ALTMODE_NO_RECFG BIT(2)
+
+/* USB Highest Speed */
+#define UFP_USB2_ONLY 0
+#define UFP_USB32_GEN1 1
+#define UFP_USB32_4_GEN2 2
+#define UFP_USB4_GEN3 3
+
+#define VDO_UFP(ver, cap, conn, vcpwr, vcr, vbr, alt, spd) \
+ (((ver) & 0x7) << 29 | ((cap) & 0xf) << 24 | ((conn) & 0x3) << 22 \
+ | ((vcpwr) & 0x7) << 8 | (vcr) << 7 | (vbr) << 6 | ((alt) & 0x7) << 3 \
+ | ((spd) & 0x7))
+
/*
- * DFP VDO
+ * DFP VDO (PD Revision 3.0+ only)
* --------
* <31:29> :: DFP VDO version
* <28:27> :: Reserved
* <26:24> :: Host capability
- * <23:5> :: Reserved
+ * <23:22> :: Connector type (10b == receptacle, 11b == captive plug)
+ * <21:5> :: Reserved
* <4:0> :: Port number
*/
#define PD_VDO_DFP_HOSTCAP(vdo) (((vdo) & GENMASK(26, 24)) >> 24)
+#define DFP_VDO_VER1_1 1
#define HOST_USB2_CAPABLE BIT(0)
#define HOST_USB3_CAPABLE BIT(1)
#define HOST_USB4_CAPABLE BIT(2)
+#define DFP_RECEPTACLE 2
+#define DFP_CAPTIVE 3
+
+#define VDO_DFP(ver, cap, conn, pnum) \
+ (((ver) & 0x7) << 29 | ((cap) & 0x7) << 24 | ((conn) & 0x3) << 22 \
+ | ((pnum) & 0x1f))
/*
- * Cable VDO
+ * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0)
* ---------
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
* <23:20> :: Reserved, Shall be set to zero
* <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive)
- * <17> :: Type-C to Plug/Receptacle (0b == plug, 1b == receptacle)
+ * <17> :: Reserved, Shall be set to zero
* <16:13> :: cable latency (0001 == <10ns(~1m length))
* <12:11> :: cable termination type (11b == both ends active VCONN req)
* <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
* <9> :: SSTX2 Directionality support
* <8> :: SSRX1 Directionality support
* <7> :: SSRX2 Directionality support
- * <6:5> :: Vbus current handling capability
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
* <4> :: Vbus through cable (0b == no, 1b == yes)
* <3> :: SOP" controller present? (0b == no, 1b == yes)
* <2:0> :: USB SS Signaling support
+ *
+ * Passive Cable VDO (PD Rev3.0+)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:21> :: VDO version
+ * <20> :: Reserved, Shall be set to zero
+ * <19:18> :: Type-C to Type-C/Captive (10b == C, 11b == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (10b == Vconn not req, 01b == Vconn req)
+ * <10:9> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <8:7> :: Reserved, Shall be set to zero
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4:3> :: Reserved, Shall be set to zero
+ * <2:0> :: USB highest speed
+ *
+ * Active Cable VDO 1 (PD Rev3.0+)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:21> :: VDO version
+ * <20> :: Reserved, Shall be set to zero
+ * <19:18> :: Connector type (10b == C, 11b == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (10b == one end active, 11b == both ends active VCONN req)
+ * <10:9> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <8> :: SBU supported (0b == supported, 1b == not supported)
+ * <7> :: SBU type (0b == passive, 1b == active)
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4> :: Vbus through cable (0b == no, 1b == yes)
+ * <3> :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0> :: USB highest speed
*/
+/* Cable VDO Version */
+#define CABLE_VDO_VER1_0 0
+#define CABLE_VDO_VER1_3 3
+
+/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */
#define CABLE_ATYPE 0
#define CABLE_BTYPE 1
#define CABLE_CTYPE 2
#define CABLE_CAPTIVE 3
-#define CABLE_PLUG 0
-#define CABLE_RECEPTACLE 1
-#define CABLE_CURR_1A5 0
+
+/* Cable Latency */
+#define CABLE_LATENCY_1M 1
+#define CABLE_LATENCY_2M 2
+#define CABLE_LATENCY_3M 3
+#define CABLE_LATENCY_4M 4
+#define CABLE_LATENCY_5M 5
+#define CABLE_LATENCY_6M 6
+#define CABLE_LATENCY_7M 7
+#define CABLE_LATENCY_7M_PLUS 8
+
+/* Cable Termination Type */
+#define PCABLE_VCONN_NOT_REQ 0
+#define PCABLE_VCONN_REQ 1
+#define ACABLE_ONE_END 2
+#define ACABLE_BOTH_END 3
+
+/* Maximum Vbus Voltage */
+#define CABLE_MAX_VBUS_20V 0
+#define CABLE_MAX_VBUS_30V 1
+#define CABLE_MAX_VBUS_40V 2
+#define CABLE_MAX_VBUS_50V 3
+
+/* Active Cable SBU Supported/Type */
+#define ACABLE_SBU_SUPP 0
+#define ACABLE_SBU_NOT_SUPP 1
+#define ACABLE_SBU_PASSIVE 0
+#define ACABLE_SBU_ACTIVE 1
+
+/* Vbus Current Handling Capability */
+#define CABLE_CURR_DEF 0
#define CABLE_CURR_3A 1
#define CABLE_CURR_5A 2
+
+/* USB SuperSpeed Signaling Support (PD Rev2.0) */
#define CABLE_USBSS_U2_ONLY 0
#define CABLE_USBSS_U31_GEN1 1
#define CABLE_USBSS_U31_GEN2 2
-#define VDO_CABLE(hw, fw, cbl, gdr, lat, term, tx1d, tx2d, rx1d, rx2d, cur,\
- vps, sopp, usbss) \
- (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
- | (gdr) << 17 | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 \
- | (tx1d) << 10 | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 \
- | ((cur) & 0x3) << 5 | (vps) << 4 | (sopp) << 3 \
- | ((usbss) & 0x7))
+
+/* USB Highest Speed */
+#define CABLE_USB2_ONLY 0
+#define CABLE_USB32_GEN1 1
+#define CABLE_USB32_4_GEN2 2
+#define CABLE_USB4_GEN3 3
+
+#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
+ | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10 \
+ | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5 \
+ | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7))
+#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \
+ | ((vbm) & 0x3) << 9 | ((cur) & 0x3) << 5 | ((spd) & 0x7))
+#define VDO_ACABLE1(hw, fw, ver, conn, lat, term, vbm, sbu, sbut, cur, vbt, sopp, spd) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \
+ | ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \
+ | (vbt) << 4 | (sopp) << 3 | ((spd) & 0x7))
+
#define VDO_TYPEC_CABLE_TYPE(vdo) (((vdo) >> 18) & 0x3)
/*
- * AMA VDO
+ * Active Cable VDO 2
+ * ---------
+ * <31:24> :: Maximum operating temperature
+ * <23:16> :: Shutdown temperature
+ * <15> :: Reserved, Shall be set to zero
+ * <14:12> :: U3/CLd power
+ * <11> :: U3 to U0 transition mode (0b == direct, 1b == through U3S)
+ * <10> :: Physical connection (0b == copper, 1b == optical)
+ * <9> :: Active element (0b == redriver, 1b == retimer)
+ * <8> :: USB4 supported (0b == yes, 1b == no)
+ * <7:6> :: USB2 hub hops consumed
+ * <5> :: USB2 supported (0b == yes, 1b == no)
+ * <4> :: USB3.2 supported (0b == yes, 1b == no)
+ * <3> :: USB lanes supported (0b == one lane, 1b == two lanes)
+ * <2> :: Optically isolated active cable (0b == no, 1b == yes)
+ * <1> :: Reserved, Shall be set to zero
+ * <0> :: USB gen (0b == gen1, 1b == gen2+)
+ */
+/* U3/CLd Power*/
+#define ACAB2_U3_CLD_10MW_PLUS 0
+#define ACAB2_U3_CLD_10MW 1
+#define ACAB2_U3_CLD_5MW 2
+#define ACAB2_U3_CLD_1MW 3
+#define ACAB2_U3_CLD_500UW 4
+#define ACAB2_U3_CLD_200UW 5
+#define ACAB2_U3_CLD_50UW 6
+
+/* Other Active Cable VDO 2 Fields */
+#define ACAB2_U3U0_DIRECT 0
+#define ACAB2_U3U0_U3S 1
+#define ACAB2_PHY_COPPER 0
+#define ACAB2_PHY_OPTICAL 1
+#define ACAB2_REDRIVER 0
+#define ACAB2_RETIMER 1
+#define ACAB2_USB4_SUPP 0
+#define ACAB2_USB4_NOT_SUPP 1
+#define ACAB2_USB2_SUPP 0
+#define ACAB2_USB2_NOT_SUPP 1
+#define ACAB2_USB32_SUPP 0
+#define ACAB2_USB32_NOT_SUPP 1
+#define ACAB2_LANES_ONE 0
+#define ACAB2_LANES_TWO 1
+#define ACAB2_OPT_ISO_NO 0
+#define ACAB2_OPT_ISO_YES 1
+#define ACAB2_GEN_1 0
+#define ACAB2_GEN_2_PLUS 1
+
+#define VDO_ACABLE2(mtemp, stemp, u3p, trans, phy, ele, u4, hops, u2, u32, lane, iso, gen) \
+ (((mtemp) & 0xff) << 24 | ((stemp) & 0xff) << 16 | ((u3p) & 0x7) << 12 \
+ | (trans) << 11 | (phy) << 10 | (ele) << 9 | (u4) << 8 \
+ | ((hops) & 0x3) << 6 | (u2) << 5 | (u32) << 4 | (lane) << 3 \
+ | (iso) << 2 | (gen))
+
+/*
+ * AMA VDO (PD Rev2.0)
* ---------
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
@@ -244,19 +456,41 @@
#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)
-#define AMA_VCONN_PWR_1W 0
-#define AMA_VCONN_PWR_1W5 1
-#define AMA_VCONN_PWR_2W 2
-#define AMA_VCONN_PWR_3W 3
-#define AMA_VCONN_PWR_4W 4
-#define AMA_VCONN_PWR_5W 5
-#define AMA_VCONN_PWR_6W 6
#define AMA_USBSS_U2_ONLY 0
#define AMA_USBSS_U31_GEN1 1
#define AMA_USBSS_U31_GEN2 2
#define AMA_USBSS_BBONLY 3
/*
+ * VPD VDO
+ * ---------
+ * <31:28> :: HW version
+ * <27:24> :: FW version
+ * <23:21> :: VDO version
+ * <20:17> :: Reserved, Shall be set to zero
+ * <16:15> :: Maximum Vbus voltage (00b == 20V, 01b == 30V, 10b == 40V, 11b == 50V)
+ * <14> :: Charge through current support (0b == 3A, 1b == 5A)
+ * <13> :: Reserved, Shall be set to zero
+ * <12:7> :: Vbus impedance
+ * <6:1> :: Ground impedance
+ * <0> :: Charge through support (0b == no, 1b == yes)
+ */
+#define VPD_VDO_VER1_0 0
+#define VPD_MAX_VBUS_20V 0
+#define VPD_MAX_VBUS_30V 1
+#define VPD_MAX_VBUS_40V 2
+#define VPD_MAX_VBUS_50V 3
+#define VPDCT_CURR_3A 0
+#define VPDCT_CURR_5A 1
+#define VPDCT_NOT_SUPP 0
+#define VPDCT_SUPP 1
+
+#define VDO_VPD(hw, fw, ver, vbm, curr, vbi, gi, ct) \
+ (((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
+ | ((vbm) & 0x3) << 15 | (curr) << 14 | ((vbi) & 0x3f) << 7 \
+ | ((gi) & 0x3f) << 1 | (ct))
+
+/*
* SVDM Discover SVIDs request -> response
*
* Request is properly formatted VDM Header with discover SVIDs command.
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 1c09b922f8b0..952272002e48 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -260,7 +260,7 @@ struct usb_serial_driver {
void (*release)(struct usb_serial *serial);
int (*port_probe)(struct usb_serial_port *port);
- int (*port_remove)(struct usb_serial_port *port);
+ void (*port_remove)(struct usb_serial_port *port);
int (*suspend)(struct usb_serial *serial, pm_message_t message);
int (*resume)(struct usb_serial *serial);
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index f4a18427f5c4..42fcfbe10590 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -19,6 +19,10 @@ enum typec_cc_status {
TYPEC_CC_RP_3_0,
};
+/* Collision Avoidance */
+#define SINK_TX_NG TYPEC_CC_RP_1_5
+#define SINK_TX_OK TYPEC_CC_RP_3_0
+
enum typec_cc_polarity {
TYPEC_POLARITY_CC1,
TYPEC_POLARITY_CC2,
@@ -104,6 +108,10 @@ enum tcpm_transmit_type {
* is supported by TCPC, set this callback for TCPM to query
* whether vbus is at VSAFE0V when needed.
* Returns true when vbus is at VSAFE0V, false otherwise.
+ * @set_partner_usb_comm_capable:
+ * Optional; The USB Communications Capable bit indicates if port
+ * partner is capable of communication over the USB data lines
+ * (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit.
*/
struct tcpc_dev {
struct fwnode_handle *fwnode;
@@ -135,6 +143,7 @@ struct tcpc_dev {
int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
bool pps_active, u32 requested_vbus_voltage);
bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev);
+ void (*set_partner_usb_comm_capable)(struct tcpc_dev *dev, bool enable);
};
struct tcpm_port;
diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h
index c29d1b4c9381..fd1c9f6a4e37 100644
--- a/include/linux/usb/tegra_usb_phy.h
+++ b/include/linux/usb/tegra_usb_phy.h
@@ -79,6 +79,8 @@ struct tegra_usb_phy {
bool is_ulpi_phy;
struct gpio_desc *reset_gpio;
struct reset_control *pad_rst;
+ bool wakeup_enabled;
+ bool pad_wakeup;
bool powered_on;
};
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 54475323f83b..91b4303ca305 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -126,6 +126,7 @@ struct typec_altmode_desc {
enum typec_port_data roles;
};
+void typec_partner_set_pd_revision(struct typec_partner *partner, u16 pd_revision);
int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmodes);
struct typec_altmode
*typec_partner_register_altmode(struct typec_partner *partner,
@@ -164,6 +165,7 @@ struct typec_plug_desc {
* @type: The plug type from USB PD Cable VDO
* @active: Is the cable active or passive
* @identity: Result of Discover Identity command
+ * @pd_revision: USB Power Delivery Specification revision if supported
*
* Represents USB Type-C Cable attached to USB Type-C port.
*/
@@ -171,6 +173,8 @@ struct typec_cable_desc {
enum typec_plug_type type;
unsigned int active:1;
struct usb_pd_identity *identity;
+ u16 pd_revision; /* 0300H = "3.0" */
+
};
/*
@@ -178,15 +182,22 @@ struct typec_cable_desc {
* @usb_pd: USB Power Delivery support
* @accessory: Audio, Debug or none.
* @identity: Discover Identity command data
+ * @pd_revision: USB Power Delivery Specification Revision if supported
*
* Details about a partner that is attached to USB Type-C port. If @identity
* member exists when partner is registered, a directory named "identity" is
* created to sysfs for the partner device.
+ *
+ * @pd_revision is based on the setting of the "Specification Revision" field
+ * in the message header on the initial "Source Capabilities" message received
+ * from the partner, or a "Request" message received from the partner, depending
+ * on whether our port is a Sink or a Source.
*/
struct typec_partner_desc {
unsigned int usb_pd:1;
enum typec_accessory accessory;
struct usb_pd_identity *identity;
+ u16 pd_revision; /* 0300H = "3.0" */
};
/**
@@ -206,12 +217,19 @@ struct typec_operations {
enum typec_port_type type);
};
+enum usb_pd_svdm_ver {
+ SVDM_VER_1_0 = 0,
+ SVDM_VER_2_0 = 1,
+ SVDM_VER_MAX = SVDM_VER_2_0,
+};
+
/*
* struct typec_capability - USB Type-C Port Capabilities
* @type: Supported power role of the port
* @data: Supported data role of the port
* @revision: USB Type-C Specification release. Binary coded decimal
* @pd_revision: USB Power Delivery Specification revision if supported
+ * @svdm_version: USB PD Structured VDM version if supported
* @prefer_role: Initial role preference (DRP ports).
* @accessory: Supported Accessory Modes
* @fwnode: Optional fwnode of the port
@@ -225,6 +243,7 @@ struct typec_capability {
enum typec_port_data data;
u16 revision; /* 0120H = "1.2" */
u16 pd_revision; /* 0300H = "3.0" */
+ enum usb_pd_svdm_ver svdm_version;
int prefer_role;
enum typec_accessory accessory[TYPEC_MAX_ACCESSORY];
unsigned int orientation_aware:1;
@@ -275,4 +294,8 @@ int typec_find_orientation(const char *name);
int typec_find_port_power_role(const char *name);
int typec_find_power_role(const char *name);
int typec_find_port_data_role(const char *name);
+
+void typec_partner_set_svdm_version(struct typec_partner *partner,
+ enum usb_pd_svdm_ver svdm_version);
+int typec_get_negotiated_svdm_version(struct typec_port *port);
#endif /* __LINUX_USB_TYPEC_H */
diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
index 5e0a7b7647c3..65933cbe9129 100644
--- a/include/linux/usb/typec_altmode.h
+++ b/include/linux/usb/typec_altmode.h
@@ -133,6 +133,16 @@ typec_altmode_get_orientation(struct typec_altmode *altmode)
}
/**
+ * typec_altmode_get_svdm_version - Get negotiated SVDM version
+ * @altmode: Handle to the alternate mode
+ */
+static inline int
+typec_altmode_get_svdm_version(struct typec_altmode *altmode)
+{
+ return typec_get_negotiated_svdm_version(typec_altmode2port(altmode));
+}
+
+/**
* struct typec_altmode_driver - USB Type-C alternate mode device driver
* @id_table: Null terminated array of SVIDs
* @probe: Callback for device binding
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 0fefeb976877..4ab5494503a8 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -35,6 +35,8 @@ struct vdpa_vq_state {
u16 avail_index;
};
+struct vdpa_mgmt_dev;
+
/**
* vDPA device - representation of a vDPA device
* @dev: underlying device
@@ -43,6 +45,8 @@ struct vdpa_vq_state {
* @index: device index
* @features_valid: were features initialized? for legacy guests
* @nvqs: maximum number of supported virtqueues
+ * @mdev: management device pointer; caller must setup when registering device as part
+ * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device().
*/
struct vdpa_device {
struct device dev;
@@ -51,6 +55,7 @@ struct vdpa_device {
unsigned int index;
bool features_valid;
int nvqs;
+ struct vdpa_mgmt_dev *mdev;
};
/**
@@ -245,20 +250,22 @@ struct vdpa_config_ops {
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
- int nvqs,
- size_t size);
+ int nvqs, size_t size, const char *name);
-#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \
+#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs, name) \
container_of(__vdpa_alloc_device( \
parent, config, nvqs, \
sizeof(dev_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \
- dev_struct, member))), \
+ dev_struct, member)), name), \
dev_struct, member)
int vdpa_register_device(struct vdpa_device *vdev);
void vdpa_unregister_device(struct vdpa_device *vdev);
+int _vdpa_register_device(struct vdpa_device *vdev);
+void _vdpa_unregister_device(struct vdpa_device *vdev);
+
/**
* vdpa_driver - operations for a vDPA driver
* @driver: underlying device driver
@@ -336,4 +343,33 @@ static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset,
ops->get_config(vdev, offset, buf, len);
}
+/**
+ * vdpa_mgmtdev_ops - vdpa device ops
+ * @dev_add: Add a vdpa device using alloc and register
+ * @mdev: parent device to use for device addition
+ * @name: name of the new vdpa device
+ * Driver need to add a new device using _vdpa_register_device()
+ * after fully initializing the vdpa device. Driver must return 0
+ * on success or appropriate error code.
+ * @dev_del: Remove a vdpa device using unregister
+ * @mdev: parent device to use for device removal
+ * @dev: vdpa device to remove
+ * Driver need to remove the specified device by calling
+ * _vdpa_unregister_device().
+ */
+struct vdpa_mgmtdev_ops {
+ int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name);
+ void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev);
+};
+
+struct vdpa_mgmt_dev {
+ struct device *device;
+ const struct vdpa_mgmtdev_ops *ops;
+ const struct virtio_device_id *id_table; /* supported ids */
+ struct list_head list;
+};
+
+int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
+void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev);
+
#endif /* _LINUX_VDPA_H */
diff --git a/include/linux/verification.h b/include/linux/verification.h
index 911ab7c2b1ab..a655923335ae 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -8,6 +8,8 @@
#ifndef _LINUX_VERIFICATION_H
#define _LINUX_VERIFICATION_H
+#include <linux/types.h>
+
/*
* Indicate that both builtin trusted keys and secondary trusted keys
* should be used.
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index f45940b38a02..b7e18bde5aa8 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -57,6 +57,11 @@ extern struct vfio_device *vfio_device_get_from_dev(struct device *dev);
extern void vfio_device_put(struct vfio_device *device);
extern void *vfio_device_data(struct vfio_device *device);
+/* events for the backend driver notify callback */
+enum vfio_iommu_notify_type {
+ VFIO_IOMMU_CONTAINER_CLOSE = 0,
+};
+
/**
* struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
*/
@@ -92,6 +97,8 @@ struct vfio_iommu_driver_ops {
void *data, size_t count, bool write);
struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
struct iommu_group *group);
+ void (*notify)(void *iommu_data,
+ enum vfio_iommu_notify_type event);
};
extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 977caf96c8d2..fc6dfeba04a5 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -121,9 +121,9 @@ extern struct pci_dev *vga_default_device(void);
extern void vga_set_default_device(struct pci_dev *pdev);
extern int vga_remove_vgacon(struct pci_dev *pdev);
#else
-static inline struct pci_dev *vga_default_device(void) { return NULL; };
-static inline void vga_set_default_device(struct pci_dev *pdev) { };
-static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; };
+static inline struct pci_dev *vga_default_device(void) { return NULL; }
+static inline void vga_set_default_device(struct pci_dev *pdev) { }
+static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; }
#endif
/*
diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h
new file mode 100644
index 000000000000..f26acbeec965
--- /dev/null
+++ b/include/linux/virtio_pci_modern.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_VIRTIO_PCI_MODERN_H
+#define _LINUX_VIRTIO_PCI_MODERN_H
+
+#include <linux/pci.h>
+#include <linux/virtio_pci.h>
+
+struct virtio_pci_modern_device {
+ struct pci_dev *pci_dev;
+
+ struct virtio_pci_common_cfg __iomem *common;
+ /* Device-specific data (non-legacy mode) */
+ void __iomem *device;
+ /* Base of vq notifications (non-legacy mode). */
+ void __iomem *notify_base;
+ /* Where to read and clear interrupt */
+ u8 __iomem *isr;
+
+ /* So we can sanity-check accesses. */
+ size_t notify_len;
+ size_t device_len;
+
+ /* Capability for when we need to map notifications per-vq. */
+ int notify_map_cap;
+
+ /* Multiply queue_notify_off by this value. (non-legacy mode). */
+ u32 notify_offset_multiplier;
+
+ int modern_bars;
+
+ struct virtio_device_id id;
+};
+
+/*
+ * Type-safe wrappers for io accesses.
+ * Use these to enforce at compile time the following spec requirement:
+ *
+ * The driver MUST access each field using the “natural” access
+ * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
+ * for 16-bit fields and 8-bit accesses for 8-bit fields.
+ */
+static inline u8 vp_ioread8(const u8 __iomem *addr)
+{
+ return ioread8(addr);
+}
+static inline u16 vp_ioread16 (const __le16 __iomem *addr)
+{
+ return ioread16(addr);
+}
+
+static inline u32 vp_ioread32(const __le32 __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
+{
+ iowrite8(value, addr);
+}
+
+static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
+{
+ iowrite16(value, addr);
+}
+
+static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
+{
+ iowrite32(value, addr);
+}
+
+static inline void vp_iowrite64_twopart(u64 val,
+ __le32 __iomem *lo,
+ __le32 __iomem *hi)
+{
+ vp_iowrite32((u32)val, lo);
+ vp_iowrite32(val >> 32, hi);
+}
+
+u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
+void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
+ u64 features);
+u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
+u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev);
+void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
+ u8 status);
+u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
+ u16 idx, u16 vector);
+u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
+ u16 vector);
+void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
+ u16 index, u64 desc_addr, u64 driver_addr,
+ u64 device_addr);
+void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 idx, bool enable);
+bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
+ u16 idx);
+void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
+ u16 idx, u16 size);
+u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
+ u16 idx);
+u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev);
+u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
+ u16 idx);
+void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
+ size_t minlen,
+ u32 align,
+ u32 start, u32 size,
+ size_t *len);
+int vp_modern_probe(struct virtio_pci_modern_device *mdev);
+void vp_modern_remove(struct virtio_pci_modern_device *mdev);
+#endif
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index cedcda6593f6..df92211cf771 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -241,4 +241,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
int register_vmap_purge_notifier(struct notifier_block *nb);
int unregister_vmap_purge_notifier(struct notifier_block *nb);
+#ifdef CONFIG_MMU
+bool vmalloc_dump_obj(void *object);
+#else
+static inline bool vmalloc_dump_obj(void *object) { return false; }
+#endif
+
#endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/vme.h b/include/linux/vme.h
index 7e82bf500f01..b204a9b4be1b 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -122,7 +122,7 @@ struct vme_driver {
const char *name;
int (*match)(struct vme_dev *);
int (*probe)(struct vme_dev *);
- int (*remove)(struct vme_dev *);
+ void (*remove)(struct vme_dev *);
struct device_driver driver;
struct list_head devices;
};
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 773135fc6e19..506d625163a1 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -313,6 +313,12 @@ static inline void __mod_node_page_state(struct pglist_data *pgdat,
enum node_stat_item item, int delta)
{
if (vmstat_item_in_bytes(item)) {
+ /*
+ * Only cgroups use subpage accounting right now; at
+ * the global level, these items still change in
+ * multiples of whole pages. Store them as pages
+ * internally to keep the per-cpu counters compact.
+ */
VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
delta >>= PAGE_SHIFT;
}
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index be0afe6f379b..e36cb114c188 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -66,7 +66,7 @@ enum {
* consists of at least two pages, the memory limit also dictates the
* number of queue pairs a guest can create.
*/
-#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
+#define VMCI_MAX_GUEST_QP_MEMORY ((size_t)(128 * 1024 * 1024))
#define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2)
/*
@@ -80,7 +80,7 @@ enum {
* too much kernel memory (especially on vmkernel). We limit a queuepair to
* 32 KB, or 16 KB per queue for symmetrical pairs.
*/
-#define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024)
+#define VMCI_MAX_PINNED_QP_MEMORY ((size_t)(32 * 1024))
/*
* We have a fixed set of resource IDs available in the VMX.
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 349e39c3ab60..94e7a315479c 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -16,18 +16,6 @@
#include <linux/consolemap.h>
#include <linux/notifier.h>
-/*
- * Presently, a lot of graphics programs do not restore the contents of
- * the higher font pages. Defining this flag will avoid use of them, but
- * will lose support for PIO_FONTRESET. Note that many font operations are
- * not likely to work with these programs anyway; they need to be
- * fixed. The linux/Documentation directory includes a code snippet
- * to save and restore the text font.
- */
-#ifdef CONFIG_VGA_CONSOLE
-#define BROKEN_GRAPHICS_PROGRAMS 1
-#endif
-
void kd_mksound(unsigned int hz, unsigned int ticks);
int kbd_rate(struct kbd_repeat *rep);
diff --git a/include/linux/w1.h b/include/linux/w1.h
index 949d3b10e531..9a2a0ef39018 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -280,7 +280,7 @@ int w1_register_family(struct w1_family *family);
void w1_unregister_family(struct w1_family *family);
/**
- * module_w1_driver() - Helper macro for registering a 1-Wire families
+ * module_w1_family() - Helper macro for registering a 1-Wire families
* @__w1_family: w1_family struct
*
* Helper macro for 1-Wire families which do not do anything special in module
diff --git a/include/linux/wm97xx.h b/include/linux/wm97xx.h
index 58e082dadc68..462854f4f286 100644
--- a/include/linux/wm97xx.h
+++ b/include/linux/wm97xx.h
@@ -294,7 +294,6 @@ struct wm97xx {
struct wm97xx_batt_pdata {
int batt_aux;
int temp_aux;
- int charge_gpio;
int min_voltage;
int max_voltage;
int batt_div;
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 26de0cae2a0a..d15a7730ee18 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -311,7 +311,7 @@ enum {
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */
- WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
+ WQ_SYSFS = 1 << 6, /* visible in sysfs, see workqueue_sysfs_register() */
/*
* Per-cpu workqueues are generally preferred because they tend to
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 10b4dc2709f0..4c379d23ec6e 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/user_namespace.h>
#include <uapi/linux/xattr.h>
struct inode;
@@ -34,7 +35,8 @@ struct xattr_handler {
int (*get)(const struct xattr_handler *, struct dentry *dentry,
struct inode *inode, const char *name, void *buffer,
size_t size);
- int (*set)(const struct xattr_handler *, struct dentry *dentry,
+ int (*set)(const struct xattr_handler *,
+ struct user_namespace *mnt_userns, struct dentry *dentry,
struct inode *inode, const char *name, const void *buffer,
size_t size, int flags);
};
@@ -48,18 +50,26 @@ struct xattr {
};
ssize_t __vfs_getxattr(struct dentry *, struct inode *, const char *, void *, size_t);
-ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
+ssize_t vfs_getxattr(struct user_namespace *, struct dentry *, const char *,
+ void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
-int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int);
-int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
-int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **);
-int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
-int __vfs_removexattr(struct dentry *, const char *);
-int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **);
-int vfs_removexattr(struct dentry *, const char *);
+int __vfs_setxattr(struct user_namespace *, struct dentry *, struct inode *,
+ const char *, const void *, size_t, int);
+int __vfs_setxattr_noperm(struct user_namespace *, struct dentry *,
+ const char *, const void *, size_t, int);
+int __vfs_setxattr_locked(struct user_namespace *, struct dentry *,
+ const char *, const void *, size_t, int,
+ struct inode **);
+int vfs_setxattr(struct user_namespace *, struct dentry *, const char *,
+ const void *, size_t, int);
+int __vfs_removexattr(struct user_namespace *, struct dentry *, const char *);
+int __vfs_removexattr_locked(struct user_namespace *, struct dentry *,
+ const char *, struct inode **);
+int vfs_removexattr(struct user_namespace *, struct dentry *, const char *);
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
-ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
+ssize_t vfs_getxattr_alloc(struct user_namespace *mnt_userns,
+ struct dentry *dentry, const char *name,
char **xattr_value, size_t size, gfp_t flags);
int xattr_supported_namespace(struct inode *inode, const char *prefix);
diff --git a/include/linux/z2_battery.h b/include/linux/z2_battery.h
index eaba53ff387c..9e8be7a7cd25 100644
--- a/include/linux/z2_battery.h
+++ b/include/linux/z2_battery.h
@@ -6,7 +6,6 @@ struct z2_battery_info {
int batt_I2C_bus;
int batt_I2C_addr;
int batt_I2C_reg;
- int charge_gpio;
int min_voltage;
int max_voltage;
int batt_div;
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index 51bf43076165..e8997010612a 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -73,6 +73,7 @@ u64 zpool_get_total_size(struct zpool *pool);
* @malloc: allocate mem from a pool.
* @free: free mem from a pool.
* @shrink: shrink the pool.
+ * @sleep_mapped: whether zpool driver can sleep during map.
* @map: map a handle.
* @unmap: unmap a handle.
* @total_size: get total size of a pool.
@@ -100,6 +101,7 @@ struct zpool_driver {
int (*shrink)(void *pool, unsigned int pages,
unsigned int *reclaimed);
+ bool sleep_mapped;
void *(*map)(void *pool, unsigned long handle,
enum zpool_mapmode mm);
void (*unmap)(void *pool, unsigned long handle);
@@ -112,5 +114,6 @@ void zpool_register_driver(struct zpool_driver *driver);
int zpool_unregister_driver(struct zpool_driver *driver);
bool zpool_evictable(struct zpool *pool);
+bool zpool_can_sleep_mapped(struct zpool *pool);
#endif
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 4807ca4d52e0..2a430e713ce5 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -35,7 +35,7 @@ enum zs_mapmode {
struct zs_pool_stats {
/* How many pages were migrated (freed) */
- unsigned long pages_compacted;
+ atomic_long_t pages_compacted;
};
struct zs_pool;
diff --git a/include/linux/zstd.h b/include/linux/zstd.h
index 249575e2485f..e87f78c9b19c 100644
--- a/include/linux/zstd.h
+++ b/include/linux/zstd.h
@@ -791,11 +791,11 @@ size_t ZSTD_DStreamOutSize(void);
/* for static allocation */
#define ZSTD_FRAMEHEADERSIZE_MAX 18
#define ZSTD_FRAMEHEADERSIZE_MIN 6
-static const size_t ZSTD_frameHeaderSize_prefix = 5;
-static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN;
-static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX;
+#define ZSTD_frameHeaderSize_prefix 5
+#define ZSTD_frameHeaderSize_min ZSTD_FRAMEHEADERSIZE_MIN
+#define ZSTD_frameHeaderSize_max ZSTD_FRAMEHEADERSIZE_MAX
/* magic number + skippable frame length */
-static const size_t ZSTD_skippableHeaderSize = 8;
+#define ZSTD_skippableHeaderSize 8
/*-*************************************