summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h99
-rw-r--r--include/linux/acpi_iort.h5
-rw-r--r--include/linux/aer.h5
-rw-r--r--include/linux/ahci_platform.h2
-rw-r--r--include/linux/arch_topology.h4
-rw-r--r--include/linux/ata.h10
-rw-r--r--include/linux/atomic.h3
-rw-r--r--include/linux/audit.h12
-rw-r--r--include/linux/avf/virtchnl.h9
-rw-r--r--include/linux/binfmts.h26
-rw-r--r--include/linux/bio.h35
-rw-r--r--include/linux/bitfield.h2
-rw-r--r--include/linux/bitmap.h32
-rw-r--r--include/linux/bitops.h5
-rw-r--r--include/linux/bitrev.h19
-rw-r--r--include/linux/blk-cgroup.h3
-rw-r--r--include/linux/blk-mq-rdma.h10
-rw-r--r--include/linux/blk-mq.h5
-rw-r--r--include/linux/blk_types.h3
-rw-r--r--include/linux/blkdev.h64
-rw-r--r--include/linux/blktrace_api.h13
-rw-r--r--include/linux/bpf.h98
-rw-r--r--include/linux/bpf_types.h7
-rw-r--r--include/linux/bpf_verifier.h78
-rw-r--r--include/linux/bsg-lib.h7
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/byteorder/big_endian.h4
-rw-r--r--include/linux/byteorder/little_endian.h4
-rw-r--r--include/linux/capability.h2
-rw-r--r--include/linux/ccp.h11
-rw-r--r--include/linux/ceph/ceph_fs.h6
-rw-r--r--include/linux/ceph/libceph.h11
-rw-r--r--include/linux/ceph/mon_client.h4
-rw-r--r--include/linux/ceph/rados.h1
-rw-r--r--include/linux/cgroup-defs.h68
-rw-r--r--include/linux/cgroup.h55
-rw-r--r--include/linux/clk-provider.h4
-rw-r--r--include/linux/clk/at91_pmc.h25
-rw-r--r--include/linux/clk/sunxi-ng.h35
-rw-r--r--include/linux/compat.h27
-rw-r--r--include/linux/compiler-gcc.h11
-rw-r--r--include/linux/compiler.h34
-rw-r--r--include/linux/completion.h47
-rw-r--r--include/linux/coresight-pmu.h6
-rw-r--r--include/linux/cper.h94
-rw-r--r--include/linux/cpufreq.h38
-rw-r--r--include/linux/cpuhotplug.h24
-rw-r--r--include/linux/cpuidle.h21
-rw-r--r--include/linux/cpumask.h21
-rw-r--r--include/linux/cpuset.h12
-rw-r--r--include/linux/dax.h56
-rw-r--r--include/linux/dcache.h14
-rw-r--r--include/linux/debugfs.h16
-rw-r--r--include/linux/devfreq.h13
-rw-r--r--include/linux/device-mapper.h48
-rw-r--r--include/linux/device.h39
-rw-r--r--include/linux/dma-fence.h19
-rw-r--r--include/linux/dma-mapping.h28
-rw-r--r--include/linux/dma/qcom_bam_dma.h79
-rw-r--r--include/linux/dmaengine.h23
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/drbd_genl.h3
-rw-r--r--include/linux/drbd_limits.h8
-rw-r--r--include/linux/edac.h1
-rw-r--r--include/linux/eeprom_93xx46.h3
-rw-r--r--include/linux/efi.h35
-rw-r--r--include/linux/errseq.h14
-rw-r--r--include/linux/ethtool.h15
-rw-r--r--include/linux/extcon.h130
-rw-r--r--include/linux/f2fs_fs.h40
-rw-r--r--include/linux/fb.h10
-rw-r--r--include/linux/fbcon.h12
-rw-r--r--include/linux/filter.h17
-rw-r--r--include/linux/fmc.h39
-rw-r--r--include/linux/fpga/fpga-mgr.h4
-rw-r--r--include/linux/fs.h127
-rw-r--r--include/linux/fscache.h9
-rw-r--r--include/linux/ftrace.h4
-rw-r--r--include/linux/futex.h7
-rw-r--r--include/linux/fwnode.h56
-rw-r--r--include/linux/genalloc.h5
-rw-r--r--include/linux/genhd.h26
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/gpio/driver.h24
-rw-r--r--include/linux/gpio/machine.h3
-rw-r--r--include/linux/hid.h24
-rw-r--r--include/linux/hmm.h520
-rw-r--r--include/linux/huge_mm.h24
-rw-r--r--include/linux/hugetlb.h1
-rw-r--r--include/linux/hyperv.h107
-rw-r--r--include/linux/i2c-mux-pinctrl.h41
-rw-r--r--include/linux/i2c/bfin_twi.h145
-rw-r--r--include/linux/idr.h69
-rw-r--r--include/linux/igmp.h3
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h3
-rw-r--r--include/linux/iio/common/st_sensors.h12
-rw-r--r--include/linux/iio/common/st_sensors_i2c.h10
-rw-r--r--include/linux/iio/iio.h2
-rw-r--r--include/linux/iio/timer/stm32-lptim-trigger.h27
-rw-r--r--include/linux/iio/timer/stm32-timer-trigger.h14
-rw-r--r--include/linux/iio/trigger.h4
-rw-r--r--include/linux/inet_diag.h7
-rw-r--r--include/linux/init_task.h13
-rw-r--r--include/linux/input.h1
-rw-r--r--include/linux/interrupt.h14
-rw-r--r--include/linux/interval_tree.h8
-rw-r--r--include/linux/interval_tree_generic.h48
-rw-r--r--include/linux/iommu.h69
-rw-r--r--include/linux/ioport.h2
-rw-r--r--include/linux/iova.h67
-rw-r--r--include/linux/ipc.h6
-rw-r--r--include/linux/ipc_namespace.h8
-rw-r--r--include/linux/ipv6.h10
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/irq_sim.h44
-rw-r--r--include/linux/irqchip/arm-gic-common.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h84
-rw-r--r--include/linux/irqchip/arm-gic-v4.h105
-rw-r--r--include/linux/irqchip/mips-gic.h297
-rw-r--r--include/linux/irqdomain.h7
-rw-r--r--include/linux/irqflags.h24
-rw-r--r--include/linux/jump_label.h33
-rw-r--r--include/linux/kasan-checks.h10
-rw-r--r--include/linux/kernel.h104
-rw-r--r--include/linux/kernfs.h28
-rw-r--r--include/linux/key.h2
-rw-r--r--include/linux/kmod.h60
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kvm_host.h9
-rw-r--r--include/linux/leds.h2
-rw-r--r--include/linux/lguest.h73
-rw-r--r--include/linux/lguest_launcher.h44
-rw-r--r--include/linux/libnvdimm.h16
-rw-r--r--include/linux/lockdep.h165
-rw-r--r--include/linux/lsm_audit.h2
-rw-r--r--include/linux/lsm_hooks.h21
-rw-r--r--include/linux/mcb.h2
-rw-r--r--include/linux/mdio-mux.h9
-rw-r--r--include/linux/mem_encrypt.h13
-rw-r--r--include/linux/memcontrol.h52
-rw-r--r--include/linux/memory_hotplug.h13
-rw-r--r--include/linux/memremap.h99
-rw-r--r--include/linux/mfd/axp20x.h29
-rw-r--r--include/linux/mfd/bd9571mwv.h115
-rw-r--r--include/linux/mfd/cros_ec_commands.h75
-rw-r--r--include/linux/mfd/da9052/da9052.h6
-rw-r--r--include/linux/mfd/da9052/reg.h11
-rw-r--r--include/linux/mfd/dm355evm_msp.h (renamed from include/linux/i2c/dm355evm_msp.h)0
-rw-r--r--include/linux/mfd/ds1wm.h29
-rw-r--r--include/linux/mfd/hi6421-pmic.h5
-rw-r--r--include/linux/mfd/rk808.h121
-rw-r--r--include/linux/mfd/rn5t618.h6
-rw-r--r--include/linux/mfd/stm32-lptimer.h62
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h32
-rw-r--r--include/linux/mfd/tmio.h6
-rw-r--r--include/linux/mfd/tps65010.h (renamed from include/linux/i2c/tps65010.h)2
-rw-r--r--include/linux/mfd/tps68470.h97
-rw-r--r--include/linux/mfd/twl.h (renamed from include/linux/i2c/twl.h)0
-rw-r--r--include/linux/migrate.h152
-rw-r--r--include/linux/migrate_mode.h5
-rw-r--r--include/linux/mlx4/device.h28
-rw-r--r--include/linux/mlx5/device.h20
-rw-r--r--include/linux/mlx5/driver.h43
-rw-r--r--include/linux/mlx5/mlx5_ifc.h165
-rw-r--r--include/linux/mlx5/qp.h3
-rw-r--r--include/linux/mlx5/srq.h5
-rw-r--r--include/linux/mlx5/vport.h3
-rw-r--r--include/linux/mm.h73
-rw-r--r--include/linux/mm_types.h102
-rw-r--r--include/linux/mmc/card.h4
-rw-r--r--include/linux/mmc/core.h23
-rw-r--r--include/linux/mmc/host.h62
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmu_notifier.h30
-rw-r--r--include/linux/mmzone.h39
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/msg.h15
-rw-r--r--include/linux/msi.h1
-rw-r--r--include/linux/mtd/mtd.h10
-rw-r--r--include/linux/mtd/nand-gpio.h2
-rw-r--r--include/linux/mtd/rawnand.h (renamed from include/linux/mtd/nand.h)30
-rw-r--r--include/linux/mtd/sh_flctl.h2
-rw-r--r--include/linux/mtd/sharpsl.h3
-rw-r--r--include/linux/mtd/spi-nor.h11
-rw-r--r--include/linux/mtd/xip.h10
-rw-r--r--include/linux/mux/consumer.h2
-rw-r--r--include/linux/net.h12
-rw-r--r--include/linux/netdev_features.h6
-rw-r--r--include/linux/netdevice.h71
-rw-r--r--include/linux/netfilter.h45
-rw-r--r--include/linux/netfilter/xt_hashlimit.h3
-rw-r--r--include/linux/netfilter_bridge/ebtables.h7
-rw-r--r--include/linux/netfilter_ingress.h4
-rw-r--r--include/linux/nfs_fs.h6
-rw-r--r--include/linux/nfs_page.h6
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/nmi.h121
-rw-r--r--include/linux/nvme-fc-driver.h15
-rw-r--r--include/linux/nvme.h62
-rw-r--r--include/linux/nvmem-consumer.h10
-rw-r--r--include/linux/of.h13
-rw-r--r--include/linux/of_device.h2
-rw-r--r--include/linux/of_platform.h7
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/pagemap.h12
-rw-r--r--include/linux/pagevec.h12
-rw-r--r--include/linux/pci-epc.h8
-rw-r--r--include/linux/pci-epf.h11
-rw-r--r--include/linux/pci.h65
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/pcieport_if.h6
-rw-r--r--include/linux/percpu.h20
-rw-r--r--include/linux/perf_event.h27
-rw-r--r--include/linux/phy.h21
-rw-r--r--include/linux/phy/phy.h2
-rw-r--r--include/linux/phylink.h148
-rw-r--r--include/linux/pinctrl/machine.h4
-rw-r--r--include/linux/pinctrl/pinconf-generic.h2
-rw-r--r--include/linux/platform_data/gpio_backlight.h1
-rw-r--r--include/linux/platform_data/hsmmc-omap.h3
-rw-r--r--include/linux/platform_data/mdio-bcm-unimac.h13
-rw-r--r--include/linux/platform_data/mtd-davinci.h2
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h2
-rw-r--r--include/linux/platform_data/omap_drm.h53
-rw-r--r--include/linux/platform_data/pca954x.h (renamed from include/linux/i2c/pca954x.h)0
-rw-r--r--include/linux/platform_data/tc35876x.h (renamed from include/linux/i2c/tc35876x.h)0
-rw-r--r--include/linux/platform_data/x86/apple.h13
-rw-r--r--include/linux/platform_data/x86/mlxcpld.h (renamed from include/linux/i2c/mlxcpld.h)0
-rw-r--r--include/linux/pm.h4
-rw-r--r--include/linux/pm_domain.h3
-rw-r--r--include/linux/power/bq24190_charger.h18
-rw-r--r--include/linux/power/bq27xxx_battery.h27
-rw-r--r--include/linux/power_supply.h2
-rw-r--r--include/linux/pps-gpio.h2
-rw-r--r--include/linux/pps_kernel.h16
-rw-r--r--include/linux/proc_fs.h8
-rw-r--r--include/linux/property.h67
-rw-r--r--include/linux/qed/qed_eth_if.h1
-rw-r--r--include/linux/qed/qed_if.h37
-rw-r--r--include/linux/quota.h31
-rw-r--r--include/linux/quotaops.h5
-rw-r--r--include/linux/radix-tree.h21
-rw-r--r--include/linux/raid/pq.h1
-rw-r--r--include/linux/rbtree.h21
-rw-r--r--include/linux/rbtree_augmented.h33
-rw-r--r--include/linux/rcupdate.h31
-rw-r--r--include/linux/rcutiny.h8
-rw-r--r--include/linux/refcount.h4
-rw-r--r--include/linux/regulator/mt6380-regulator.h32
-rw-r--r--include/linux/remoteproc.h2
-rw-r--r--include/linux/remoteproc/qcom_rproc.h22
-rw-r--r--include/linux/reservation.h3
-rw-r--r--include/linux/reset.h68
-rw-r--r--include/linux/rhashtable.h2
-rw-r--r--include/linux/rmap.h7
-rw-r--r--include/linux/rpmsg/qcom_glink.h27
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/rtmutex.h11
-rw-r--r--include/linux/rwsem-spinlock.h1
-rw-r--r--include/linux/rwsem.h1
-rw-r--r--include/linux/rxrpc.h79
-rw-r--r--include/linux/sched.h88
-rw-r--r--include/linux/sched/debug.h4
-rw-r--r--include/linux/sched/mm.h10
-rw-r--r--include/linux/sched/task.h1
-rw-r--r--include/linux/sched/user.h3
-rw-r--r--include/linux/sctp.h171
-rw-r--r--include/linux/seccomp.h3
-rw-r--r--include/linux/security.h15
-rw-r--r--include/linux/seg6_local.h6
-rw-r--r--include/linux/sem.h3
-rw-r--r--include/linux/serial_8250.h7
-rw-r--r--include/linux/serial_core.h10
-rw-r--r--include/linux/sfp.h434
-rw-r--r--include/linux/shm.h23
-rw-r--r--include/linux/shmem_fs.h6
-rw-r--r--include/linux/shrinker.h7
-rw-r--r--include/linux/signal.h22
-rw-r--r--include/linux/skbuff.h244
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/smp.h8
-rw-r--r--include/linux/smpboot.h4
-rw-r--r--include/linux/soc/mediatek/infracfg.h7
-rw-r--r--include/linux/soc/ti/knav_dma.h2
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/spinlock.h72
-rw-r--r--include/linux/spinlock_up.h6
-rw-r--r--include/linux/srcutiny.h13
-rw-r--r--include/linux/srcutree.h3
-rw-r--r--include/linux/string.h49
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--include/linux/sunrpc/svc.h6
-rw-r--r--include/linux/sunrpc/svc_xprt.h4
-rw-r--r--include/linux/sunrpc/xdr.h13
-rw-r--r--include/linux/sunrpc/xprt.h5
-rw-r--r--include/linux/suspend.h48
-rw-r--r--include/linux/swait.h113
-rw-r--r--include/linux/swap.h102
-rw-r--r--include/linux/swapops.h143
-rw-r--r--include/linux/syscalls.h47
-rw-r--r--include/linux/syslog.h9
-rw-r--r--include/linux/tcp.h11
-rw-r--r--include/linux/tee_drv.h1
-rw-r--r--include/linux/thermal.h1
-rw-r--r--include/linux/thread_info.h6
-rw-r--r--include/linux/time.h17
-rw-r--r--include/linux/timer.h14
-rw-r--r--include/linux/tnum.h81
-rw-r--r--include/linux/trace_events.h13
-rw-r--r--include/linux/tty.h26
-rw-r--r--include/linux/tty_driver.h2
-rw-r--r--include/linux/tty_flip.h3
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/umh.h69
-rw-r--r--include/linux/usb/chipidea.h1
-rw-r--r--include/linux/usb/gadget.h2
-rw-r--r--include/linux/usb/phy.h54
-rw-r--r--include/linux/user_namespace.h9
-rw-r--r--include/linux/virtio_net.h5
-rw-r--r--include/linux/vm_event_item.h6
-rw-r--r--include/linux/vmstat.h37
-rw-r--r--include/linux/vt_buffer.h13
-rw-r--r--include/linux/w1.h4
-rw-r--r--include/linux/wait.h3
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/linux/xxhash.h236
-rw-r--r--include/linux/zstd.h1157
328 files changed, 8336 insertions, 2656 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 27b4b6615263..502af53ec012 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -57,9 +57,6 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
acpi_fwnode_handle(adev) : NULL)
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
-
-extern const struct fwnode_operations acpi_fwnode_ops;
-
static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
{
struct fwnode_handle *fwnode;
@@ -68,15 +65,14 @@ static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
if (!fwnode)
return NULL;
- fwnode->type = FWNODE_ACPI_STATIC;
- fwnode->ops = &acpi_fwnode_ops;
+ fwnode->ops = &acpi_static_fwnode_ops;
return fwnode;
}
static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode)
{
- if (WARN_ON(!fwnode || fwnode->type != FWNODE_ACPI_STATIC))
+ if (WARN_ON(!is_acpi_static_node(fwnode)))
return;
kfree(fwnode);
@@ -228,8 +224,8 @@ struct acpi_subtable_proc {
int count;
};
-char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
-void __acpi_unmap_table(char *map, unsigned long size);
+void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
+void __acpi_unmap_table(void __iomem *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
void acpi_boot_table_init (void);
@@ -427,6 +423,8 @@ void acpi_dev_free_resource_list(struct list_head *list);
int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
int (*preproc)(struct acpi_resource *, void *),
void *preproc_data);
+int acpi_dev_get_dma_resources(struct acpi_device *adev,
+ struct list_head *list);
int acpi_dev_filter_resource_type(struct acpi_resource *ares,
unsigned long types);
@@ -556,6 +554,25 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
#define ACPI_OST_SC_DRIVER_LOAD_FAILURE 0x81
#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82
+enum acpi_predicate {
+ all_versions,
+ less_than_or_equal,
+ equal,
+ greater_than_or_equal,
+};
+
+/* Table must be terminted by a NULL entry */
+struct acpi_platform_list {
+ char oem_id[ACPI_OEM_ID_SIZE+1];
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE+1];
+ u32 oem_revision;
+ char *table;
+ enum acpi_predicate pred;
+ char *reason;
+ u32 data;
+};
+int acpi_match_platform_list(const struct acpi_platform_list *plat);
+
extern void acpi_early_init(void);
extern void acpi_subsystem_init(void);
@@ -774,6 +791,12 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NOT_SUPPORTED;
}
+static inline int acpi_dma_get_range(struct device *dev, u64 *dma_addr,
+ u64 *offset, u64 *size)
+{
+ return -ENODEV;
+}
+
static inline int acpi_dma_configure(struct device *dev,
enum dev_dma_attr attr)
{
@@ -1007,13 +1030,14 @@ struct acpi_reference_args {
};
#ifdef CONFIG_ACPI
-int acpi_dev_get_property(struct acpi_device *adev, const char *name,
+int acpi_dev_get_property(const struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj);
-int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index, size_t num_args,
struct acpi_reference_args *args);
-static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+static inline int acpi_node_get_property_reference(
+ const struct fwnode_handle *fwnode,
const char *name, size_t index,
struct acpi_reference_args *args)
{
@@ -1021,22 +1045,25 @@ static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
MAX_ACPI_REFERENCE_ARGS, args);
}
-int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
+int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
void **valptr);
-int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
- enum dev_prop_type proptype, void *val);
-int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname,
- enum dev_prop_type proptype, void *val, size_t nval);
-int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
+int acpi_dev_prop_read_single(struct acpi_device *adev,
+ const char *propname, enum dev_prop_type proptype,
+ void *val);
+int acpi_node_prop_read(const struct fwnode_handle *fwnode,
+ const char *propname, enum dev_prop_type proptype,
+ void *val, size_t nval);
+int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname,
enum dev_prop_type proptype, void *val, size_t nval);
-struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
+struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
-struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode);
+struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode);
-struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
- struct fwnode_handle *prev);
-int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
+struct fwnode_handle *
+acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *prev);
+int acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle **remote,
struct fwnode_handle **port,
struct fwnode_handle **endpoint);
@@ -1104,35 +1131,36 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
}
static inline int
-__acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+__acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index, size_t num_args,
struct acpi_reference_args *args)
{
return -ENXIO;
}
-static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
- const char *name, size_t index,
- struct acpi_reference_args *args)
+static inline int
+acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
+ const char *name, size_t index,
+ struct acpi_reference_args *args)
{
return -ENXIO;
}
-static inline int acpi_node_prop_get(struct fwnode_handle *fwnode,
+static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode,
const char *propname,
void **valptr)
{
return -ENXIO;
}
-static inline int acpi_dev_prop_get(struct acpi_device *adev,
+static inline int acpi_dev_prop_get(const struct acpi_device *adev,
const char *propname,
void **valptr)
{
return -ENXIO;
}
-static inline int acpi_dev_prop_read_single(struct acpi_device *adev,
+static inline int acpi_dev_prop_read_single(const struct acpi_device *adev,
const char *propname,
enum dev_prop_type proptype,
void *val)
@@ -1140,7 +1168,7 @@ static inline int acpi_dev_prop_read_single(struct acpi_device *adev,
return -ENXIO;
}
-static inline int acpi_node_prop_read(struct fwnode_handle *fwnode,
+static inline int acpi_node_prop_read(const struct fwnode_handle *fwnode,
const char *propname,
enum dev_prop_type proptype,
void *val, size_t nval)
@@ -1148,7 +1176,7 @@ static inline int acpi_node_prop_read(struct fwnode_handle *fwnode,
return -ENXIO;
}
-static inline int acpi_dev_prop_read(struct acpi_device *adev,
+static inline int acpi_dev_prop_read(const struct acpi_device *adev,
const char *propname,
enum dev_prop_type proptype,
void *val, size_t nval)
@@ -1157,26 +1185,27 @@ static inline int acpi_dev_prop_read(struct acpi_device *adev,
}
static inline struct fwnode_handle *
-acpi_get_next_subnode(struct fwnode_handle *fwnode, struct fwnode_handle *child)
+acpi_get_next_subnode(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
{
return NULL;
}
static inline struct fwnode_handle *
-acpi_node_get_parent(struct fwnode_handle *fwnode)
+acpi_node_get_parent(const struct fwnode_handle *fwnode)
{
return NULL;
}
static inline struct fwnode_handle *
-acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
return ERR_PTR(-ENXIO);
}
static inline int
-acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
+acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle **remote,
struct fwnode_handle **port,
struct fwnode_handle **endpoint)
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 8379d406ad2e..8d3f0bf80379 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -36,7 +36,7 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
void acpi_configure_pmsi_domain(struct device *dev);
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
/* IOMMU interface */
-void iort_set_dma_mask(struct device *dev);
+void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
const struct iommu_ops *iort_iommu_configure(struct device *dev);
#else
static inline void acpi_iort_init(void) { }
@@ -47,7 +47,8 @@ static inline struct irq_domain *iort_get_device_domain(struct device *dev,
{ return NULL; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
/* IOMMU interface */
-static inline void iort_set_dma_mask(struct device *dev) { }
+static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
+ u64 *size) { }
static inline
const struct iommu_ops *iort_iommu_configure(struct device *dev)
{ return NULL; }
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 04602cbe85dc..43799bd17a02 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -39,7 +39,7 @@ struct aer_capability_regs {
};
#if defined(CONFIG_PCIEAER)
-/* pci-e port driver needs this function to enable aer */
+/* PCIe port driver needs this function to enable AER */
int pci_enable_pcie_error_reporting(struct pci_dev *dev);
int pci_disable_pcie_error_reporting(struct pci_dev *dev);
int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
@@ -67,7 +67,6 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer);
int cper_severity_to_aer(int cper_severity);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
- int severity,
- struct aer_capability_regs *aer_regs);
+ int severity, struct aer_capability_regs *aer_regs);
#endif //_AER_H_
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index a270f25ee7c7..1b0a17b22cd3 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -36,6 +36,8 @@ int ahci_platform_init_host(struct platform_device *pdev,
const struct ata_port_info *pi_template,
struct scsi_host_template *sht);
+void ahci_platform_shutdown(struct platform_device *pdev);
+
int ahci_platform_suspend_host(struct device *dev);
int ahci_platform_resume_host(struct device *dev);
int ahci_platform_suspend(struct device *dev);
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 9af3c174c03a..716ce587247e 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -4,10 +4,12 @@
#ifndef _LINUX_ARCH_TOPOLOGY_H_
#define _LINUX_ARCH_TOPOLOGY_H_
+#include <linux/types.h>
+
void topology_normalize_cpu_scale(void);
struct device_node;
-int topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
+bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
struct sched_domain;
unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu);
diff --git a/include/linux/ata.h b/include/linux/ata.h
index e65ae4b2ed48..c7a353825450 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -60,7 +60,8 @@ enum {
ATA_ID_FW_REV = 23,
ATA_ID_PROD = 27,
ATA_ID_MAX_MULTSECT = 47,
- ATA_ID_DWORD_IO = 48,
+ ATA_ID_DWORD_IO = 48, /* before ATA-8 */
+ ATA_ID_TRUSTED = 48, /* ATA-8 and later */
ATA_ID_CAPABILITY = 49,
ATA_ID_OLD_PIO_MODES = 51,
ATA_ID_OLD_DMA_MODES = 52,
@@ -889,6 +890,13 @@ static inline bool ata_id_has_dword_io(const u16 *id)
return id[ATA_ID_DWORD_IO] & (1 << 0);
}
+static inline bool ata_id_has_trusted(const u16 *id)
+{
+ if (ata_id_major_version(id) <= 7)
+ return false;
+ return id[ATA_ID_TRUSTED] & (1 << 0);
+}
+
static inline bool ata_id_has_unload(const u16 *id)
{
if (ata_id_major_version(id) >= 7 &&
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index c56be7410130..40d6bfec0e0d 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -38,6 +38,9 @@
* Besides, if an arch has a special barrier for acquire/release, it could
* implement its own __atomic_op_* and use the same framework for building
* variants
+ *
+ * If an architecture overrides __atomic_op_acquire() it will probably want
+ * to define smp_mb__after_spinlock().
*/
#ifndef __atomic_op_acquire
#define __atomic_op_acquire(op, args...) \
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 2150bdccfbab..cb708eb8accc 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -314,11 +314,7 @@ void audit_core_dumps(long signr);
static inline void audit_seccomp(unsigned long syscall, long signr, int code)
{
- if (!audit_enabled)
- return;
-
- /* Force a record to be reported if a signal was delivered. */
- if (signr || unlikely(!audit_dummy_context()))
+ if (audit_enabled && unlikely(!audit_dummy_context()))
__audit_seccomp(syscall, signr, code);
}
@@ -351,7 +347,7 @@ extern int __audit_socketcall(int nargs, unsigned long *args);
extern int __audit_sockaddr(int len, void *addr);
extern void __audit_fd_pair(int fd1, int fd2);
extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr);
-extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout);
+extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout);
extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification);
extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat);
extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
@@ -412,7 +408,7 @@ static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
if (unlikely(!audit_dummy_context()))
__audit_mq_open(oflag, mode, attr);
}
-static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout)
+static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec64 *abs_timeout)
{
if (unlikely(!audit_dummy_context()))
__audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout);
@@ -549,7 +545,7 @@ static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
{ }
static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len,
unsigned int msg_prio,
- const struct timespec *abs_timeout)
+ const struct timespec64 *abs_timeout)
{ }
static inline void audit_mq_notify(mqd_t mqdes,
const struct sigevent *notification)
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index c893b9520a67..2b038442c352 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -133,6 +133,8 @@ enum virtchnl_ops {
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
};
/* This macro is used to generate a compilation error if a structure
@@ -223,7 +225,7 @@ struct virtchnl_vsi_resource {
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
-/* VF offload flags
+/* VF capability flags
* VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
@@ -251,7 +253,7 @@ struct virtchnl_vf_resource {
u16 max_vectors;
u16 max_mtu;
- u32 vf_offload_flags;
+ u32 vf_cap_flags;
u32 rss_key_size;
u32 rss_lut_size;
@@ -686,6 +688,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_SET_RSS_HENA:
valid_len = sizeof(struct virtchnl_rss_hena);
break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 3ae9013eeaaa..18d05b5491f3 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -25,11 +25,25 @@ struct linux_binprm {
struct mm_struct *mm;
unsigned long p; /* current top of mem */
unsigned int
- cred_prepared:1,/* true if creds already prepared (multiple
- * preps happen for interpreters) */
- cap_effective:1;/* true if has elevated effective capabilities,
- * false if not; except for init which inherits
- * its parent's caps anyway */
+ /*
+ * True after the bprm_set_creds hook has been called once
+ * (multiple calls can be made via prepare_binprm() for
+ * binfmt_script/misc).
+ */
+ called_set_creds:1,
+ /*
+ * True if most recent call to the commoncaps bprm_set_creds
+ * hook (due to multiple prepare_binprm() calls from the
+ * binfmt_script/misc handlers) resulted in elevated
+ * privileges.
+ */
+ cap_elevated:1,
+ /*
+ * Set by bprm_set_creds hook to indicate a privilege-gaining
+ * exec has happened. Used to sanitize execution environment
+ * and to set AT_SECURE auxv for glibc.
+ */
+ secureexec:1;
#ifdef __alpha__
unsigned int taso:1;
#endif
@@ -117,7 +131,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
int executable_stack);
extern int transfer_args_to_stack(struct linux_binprm *bprm,
unsigned long *sp_location);
-extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
+extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc, const char *const *argv,
struct linux_binprm *bprm);
extern int prepare_bprm_creds(struct linux_binprm *bprm);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7b1cf4ba0902..275c91c99516 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -38,7 +38,15 @@
#define BIO_BUG_ON
#endif
+#ifdef CONFIG_THP_SWAP
+#if HPAGE_PMD_NR > 256
+#define BIO_MAX_PAGES HPAGE_PMD_NR
+#else
#define BIO_MAX_PAGES 256
+#endif
+#else
+#define BIO_MAX_PAGES 256
+#endif
#define bio_prio(bio) (bio)->bi_ioprio
#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
@@ -463,10 +471,11 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
-void generic_start_io_acct(int rw, unsigned long sectors,
- struct hd_struct *part);
-void generic_end_io_acct(int rw, struct hd_struct *part,
- unsigned long start_time);
+void generic_start_io_acct(struct request_queue *q, int rw,
+ unsigned long sectors, struct hd_struct *part);
+void generic_end_io_acct(struct request_queue *q, int rw,
+ struct hd_struct *part,
+ unsigned long start_time);
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
@@ -493,6 +502,24 @@ extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
+#define bio_set_dev(bio, bdev) \
+do { \
+ (bio)->bi_disk = (bdev)->bd_disk; \
+ (bio)->bi_partno = (bdev)->bd_partno; \
+} while (0)
+
+#define bio_copy_dev(dst, src) \
+do { \
+ (dst)->bi_disk = (src)->bi_disk; \
+ (dst)->bi_partno = (src)->bi_partno; \
+} while (0)
+
+#define bio_dev(bio) \
+ disk_devt((bio)->bi_disk)
+
+#define bio_devname(bio, buf) \
+ __bdevname(bio_dev(bio), (buf))
+
#ifdef CONFIG_BLK_CGROUP
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
int bio_associate_current(struct bio *bio);
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 8b9d6fff002d..f2deb71958b2 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -92,7 +92,7 @@
/**
* FIELD_GET() - extract a bitfield element
* @_mask: shifted mask defining the field's length and position
- * @_reg: 32bit value of entire bitfield
+ * @_reg: value of entire bitfield
*
* FIELD_GET() extracts the field specified by @_mask from the
* bitfield passed in as @_reg by masking and shifting it down.
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 5797ca6fdfe2..700cf5f67118 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -361,6 +361,38 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen,
}
/*
+ * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
+ *
+ * Linux bitmaps are internally arrays of unsigned longs, i.e. 32-bit
+ * integers in 32-bit environment, and 64-bit integers in 64-bit one.
+ *
+ * There are four combinations of endianness and length of the word in linux
+ * ABIs: LE64, BE64, LE32 and BE32.
+ *
+ * On 64-bit kernels 64-bit LE and BE numbers are naturally ordered in
+ * bitmaps and therefore don't require any special handling.
+ *
+ * On 32-bit kernels 32-bit LE ABI orders lo word of 64-bit number in memory
+ * prior to hi, and 32-bit BE orders hi word prior to lo. The bitmap on the
+ * other hand is represented as an array of 32-bit words and the position of
+ * bit N may therefore be calculated as: word #(N/32) and bit #(N%32) in that
+ * word. For example, bit #42 is located at 10th position of 2nd word.
+ * It matches 32-bit LE ABI, and we can simply let the compiler store 64-bit
+ * values in memory as it usually does. But for BE we need to swap hi and lo
+ * words manually.
+ *
+ * With all that, the macro BITMAP_FROM_U64() does explicit reordering of hi and
+ * lo parts of u64. For LE32 it does nothing, and for BE environment it swaps
+ * hi and lo words, as is expected by bitmap.
+ */
+#if __BITS_PER_LONG == 64
+#define BITMAP_FROM_U64(n) (n)
+#else
+#define BITMAP_FROM_U64(n) ((unsigned long) ((u64)(n) & ULONG_MAX)), \
+ ((unsigned long) ((u64)(n) >> 32))
+#endif
+
+/*
* bitmap_from_u64 - Check and swap words within u64.
* @mask: source bitmap
* @dst: destination bitmap
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a83c822c35c2..8fbe259b197c 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -19,10 +19,11 @@
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#define GENMASK(h, l) \
- (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
#define GENMASK_ULL(h, l) \
- (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
index fb790b8449c1..b97be27e5a85 100644
--- a/include/linux/bitrev.h
+++ b/include/linux/bitrev.h
@@ -29,6 +29,8 @@ static inline u32 __bitrev32(u32 x)
#endif /* CONFIG_HAVE_ARCH_BITREVERSE */
+#define __bitrev8x4(x) (__bitrev32(swab32(x)))
+
#define __constant_bitrev32(x) \
({ \
u32 __x = x; \
@@ -50,6 +52,15 @@ static inline u32 __bitrev32(u32 x)
__x; \
})
+#define __constant_bitrev8x4(x) \
+({ \
+ u32 __x = x; \
+ __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
+ __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
+ __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
+ __x; \
+})
+
#define __constant_bitrev8(x) \
({ \
u8 __x = x; \
@@ -75,6 +86,14 @@ static inline u32 __bitrev32(u32 x)
__bitrev16(__x); \
})
+#define bitrev8x4(x) \
+({ \
+ u32 __x = x; \
+ __builtin_constant_p(__x) ? \
+ __constant_bitrev8x4(__x) : \
+ __bitrev8x4(__x); \
+ })
+
#define bitrev8(x) \
({ \
u8 __x = x; \
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 7104bea8dab1..9d92153dd856 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -691,6 +691,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
rcu_read_lock();
blkcg = bio_blkcg(bio);
+ /* associate blkcg if bio hasn't attached one */
+ bio_associate_blkcg(bio, &blkcg->css);
+
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg)) {
spin_lock_irq(q->queue_lock);
diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h
new file mode 100644
index 000000000000..b4ade198007d
--- /dev/null
+++ b/include/linux/blk-mq-rdma.h
@@ -0,0 +1,10 @@
+#ifndef _LINUX_BLK_MQ_RDMA_H
+#define _LINUX_BLK_MQ_RDMA_H
+
+struct blk_mq_tag_set;
+struct ib_device;
+
+int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
+ struct ib_device *dev, int first_vec);
+
+#endif /* _LINUX_BLK_MQ_RDMA_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 14542308d25b..50c6485cb04f 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -97,7 +97,6 @@ typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
unsigned int, unsigned int);
typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
unsigned int);
-typedef int (reinit_request_fn)(void *, struct request *);
typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
bool);
@@ -143,7 +142,6 @@ struct blk_mq_ops {
*/
init_request_fn *init_request;
exit_request_fn *exit_request;
- reinit_request_fn *reinit_request;
/* Called from inside blk_get_request() */
void (*initialize_rq_fn)(struct request *rq);
@@ -261,7 +259,8 @@ void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
-int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
+int blk_mq_reinit_tagset(struct blk_mq_tag_set *set,
+ int (reinit_request)(void *, struct request *));
int blk_mq_map_queues(struct blk_mq_tag_set *set);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d2eb87c84d82..a2d2aa709cef 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -48,7 +48,8 @@ struct blk_issue_stat {
*/
struct bio {
struct bio *bi_next; /* request queue link */
- struct block_device *bi_bdev;
+ struct gendisk *bi_disk;
+ u8 bi_partno;
blk_status_t bi_status;
unsigned int bi_opf; /* bottom bits req flags,
* top bits REQ_OP. Use
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 25f6a0cb27d3..02fa42d24b52 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -134,7 +134,7 @@ typedef __u32 __bitwise req_flags_t;
struct request {
struct list_head queuelist;
union {
- struct call_single_data csd;
+ call_single_data_t csd;
u64 fifo_time;
};
@@ -551,6 +551,7 @@ struct request_queue {
int node;
#ifdef CONFIG_BLK_DEV_IO_TRACE
struct blk_trace *blk_trace;
+ struct mutex blk_trace_mutex;
#endif
/*
* for flush operations
@@ -568,7 +569,6 @@ struct request_queue {
#if defined(CONFIG_BLK_DEV_BSG)
bsg_job_fn *bsg_job_fn;
- int bsg_job_size;
struct bsg_class_device bsg_dev;
#endif
@@ -601,38 +601,36 @@ struct request_queue {
u64 write_hints[BLK_MAX_WRITE_HINTS];
};
-#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
-#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
-#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
-#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
-#define QUEUE_FLAG_DYING 5 /* queue being torn down */
-#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
-#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
-#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
-#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
-#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
-#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
+#define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */
+#define QUEUE_FLAG_STOPPED 1 /* queue is stopped */
+#define QUEUE_FLAG_DYING 2 /* queue being torn down */
+#define QUEUE_FLAG_BYPASS 3 /* act as dumb FIFO queue */
+#define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */
+#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */
+#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */
+#define QUEUE_FLAG_STACKABLE 8 /* supports request stacking */
+#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
-#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
-#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
-#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */
-#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
-#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
-#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
-#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
-#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
-#define QUEUE_FLAG_WC 23 /* Write back caching */
-#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
-#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
-#define QUEUE_FLAG_DAX 26 /* device supports DAX */
-#define QUEUE_FLAG_STATS 27 /* track rq completion times */
-#define QUEUE_FLAG_POLL_STATS 28 /* collecting stats for hybrid polling */
-#define QUEUE_FLAG_REGISTERED 29 /* queue has been registered to a disk */
-#define QUEUE_FLAG_SCSI_PASSTHROUGH 30 /* queue supports SCSI commands */
-#define QUEUE_FLAG_QUIESCED 31 /* queue has been quiesced */
+#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */
+#define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */
+#define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */
+#define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */
+#define QUEUE_FLAG_SECERASE 14 /* supports secure erase */
+#define QUEUE_FLAG_SAME_FORCE 15 /* force complete on same CPU */
+#define QUEUE_FLAG_DEAD 16 /* queue tear-down finished */
+#define QUEUE_FLAG_INIT_DONE 17 /* queue is initialized */
+#define QUEUE_FLAG_NO_SG_MERGE 18 /* don't attempt to merge SG segments*/
+#define QUEUE_FLAG_POLL 19 /* IO polling enabled if set */
+#define QUEUE_FLAG_WC 20 /* Write back caching */
+#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */
+#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */
+#define QUEUE_FLAG_DAX 23 /* device supports DAX */
+#define QUEUE_FLAG_STATS 24 /* track rq completion times */
+#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */
+#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
+#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
+#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index d2e908586e3d..67b4d4dfc19c 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -28,10 +28,12 @@ struct blk_trace {
atomic_t dropped;
};
+struct blkcg;
+
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
-extern __printf(2, 3)
-void __trace_note_message(struct blk_trace *, const char *fmt, ...);
+extern __printf(3, 4)
+void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *fmt, ...);
/**
* blk_add_trace_msg - Add a (simple) message to the blktrace stream
@@ -46,12 +48,14 @@ void __trace_note_message(struct blk_trace *, const char *fmt, ...);
* NOTE: Can not use 'static inline' due to presence of var args...
*
**/
-#define blk_add_trace_msg(q, fmt, ...) \
+#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
do { \
struct blk_trace *bt = (q)->blk_trace; \
if (unlikely(bt)) \
- __trace_note_message(bt, fmt, ##__VA_ARGS__); \
+ __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
} while (0)
+#define blk_add_trace_msg(q, fmt, ...) \
+ blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
#define BLK_TN_MAX_MSG 128
static inline bool blk_trace_note_message_enabled(struct request_queue *q)
@@ -82,6 +86,7 @@ extern struct attribute_group blk_trace_attr_group;
# define blk_trace_startstop(q, start) (-ENOTTY)
# define blk_trace_remove(q) (-ENOTTY)
# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
+# define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0)
# define blk_trace_remove_sysfs(dev) do { } while (0)
# define blk_trace_note_message_enabled(q) (false)
static inline int blk_trace_init_sysfs(struct device *dev)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index b69e7a5869ff..f1af7d63d678 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -14,8 +14,10 @@
#include <linux/percpu.h>
#include <linux/err.h>
#include <linux/rbtree_latch.h>
+#include <linux/numa.h>
struct perf_event;
+struct bpf_prog;
struct bpf_map;
/* map is generic key/value storage optionally accesible by eBPF programs */
@@ -48,6 +50,7 @@ struct bpf_map {
u32 map_flags;
u32 pages;
u32 id;
+ int numa_node;
struct user_struct *user;
const struct bpf_map_ops *ops;
struct work_struct work;
@@ -117,39 +120,27 @@ enum bpf_access_type {
};
/* types of values stored in eBPF registers */
+/* Pointer types represent:
+ * pointer
+ * pointer + imm
+ * pointer + (u16) var
+ * pointer + (u16) var + imm
+ * if (range > 0) then [ptr, ptr + range - off) is safe to access
+ * if (id > 0) means that some 'var' was added
+ * if (off > 0) means that 'imm' was added
+ */
enum bpf_reg_type {
NOT_INIT = 0, /* nothing was written into register */
- UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
+ SCALAR_VALUE, /* reg doesn't contain a valid pointer */
PTR_TO_CTX, /* reg points to bpf_context */
CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
PTR_TO_MAP_VALUE, /* reg points to map element value */
PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
- FRAME_PTR, /* reg == frame_pointer */
- PTR_TO_STACK, /* reg == frame_pointer + imm */
- CONST_IMM, /* constant integer value */
-
- /* PTR_TO_PACKET represents:
- * skb->data
- * skb->data + imm
- * skb->data + (u16) var
- * skb->data + (u16) var + imm
- * if (range > 0) then [ptr, ptr + range - off) is safe to access
- * if (id > 0) means that some 'var' was added
- * if (off > 0) menas that 'imm' was added
- */
- PTR_TO_PACKET,
+ PTR_TO_STACK, /* reg == frame_pointer + offset */
+ PTR_TO_PACKET, /* reg points to skb->data */
PTR_TO_PACKET_END, /* skb->data + headlen */
-
- /* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map
- * elem value. We only allow this if we can statically verify that
- * access from this register are going to fall within the size of the
- * map element.
- */
- PTR_TO_MAP_VALUE_ADJ,
};
-struct bpf_prog;
-
/* The information passed from prog-specific *_is_valid_access
* back to the verifier.
*/
@@ -262,6 +253,7 @@ struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
void bpf_prog_sub(struct bpf_prog *prog, int i);
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
+struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
int __bpf_prog_charge(struct user_struct *user, u32 pages);
void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
@@ -272,7 +264,7 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
-void *bpf_map_area_alloc(size_t size);
+void *bpf_map_area_alloc(size_t size, int numa_node);
void bpf_map_area_free(void *base);
extern int sysctl_unprivileged_bpf_disabled;
@@ -318,6 +310,19 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+
+/* Map specifics */
+struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
+void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
+void __dev_map_flush(struct bpf_map *map);
+
+/* Return map's numa specified by userspace */
+static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
+{
+ return (attr->map_flags & BPF_F_NUMA_NODE) ?
+ attr->numa_node : NUMA_NO_NODE;
+}
+
#else
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -348,6 +353,12 @@ static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct bpf_prog *__must_check
+bpf_prog_inc_not_zero(struct bpf_prog *prog)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
{
return 0;
@@ -356,8 +367,44 @@ static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
}
+
+static inline int bpf_obj_get_user(const char __user *pathname)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
+ u32 key)
+{
+ return NULL;
+}
+
+static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index)
+{
+}
+
+static inline void __dev_map_flush(struct bpf_map *map)
+{
+}
#endif /* CONFIG_BPF_SYSCALL */
+#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL)
+struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
+int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
+#else
+static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
+{
+ return NULL;
+}
+
+static inline int sock_map_prog(struct bpf_map *map,
+ struct bpf_prog *prog,
+ u32 type)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
/* verifier prototypes for helper functions called from eBPF programs */
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
@@ -374,6 +421,7 @@ extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto;
+extern const struct bpf_func_proto bpf_sock_map_update_proto;
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 3d137c33d664..6f1a567667b8 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -11,6 +11,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb_prog_ops)
#endif
#ifdef CONFIG_BPF_EVENTS
BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe_prog_ops)
@@ -35,3 +36,9 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
+#ifdef CONFIG_NET
+BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
+#ifdef CONFIG_STREAM_PARSER
+BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
+#endif
+#endif
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 8e5d31f6faef..b8d200f60a40 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -9,41 +9,75 @@
#include <linux/bpf.h> /* for enum bpf_reg_type */
#include <linux/filter.h> /* for MAX_BPF_STACK */
+#include <linux/tnum.h>
- /* Just some arbitrary values so we can safely do math without overflowing and
- * are obviously wrong for any sort of memory access.
- */
-#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
-#define BPF_REGISTER_MIN_RANGE -1
+/* Maximum variable offset umax_value permitted when resolving memory accesses.
+ * In practice this is far bigger than any realistic pointer offset; this limit
+ * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
+ */
+#define BPF_MAX_VAR_OFF (1ULL << 31)
+/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
+ * that converting umax_value to int cannot overflow.
+ */
+#define BPF_MAX_VAR_SIZ INT_MAX
+
+/* Liveness marks, used for registers and spilled-regs (in stack slots).
+ * Read marks propagate upwards until they find a write mark; they record that
+ * "one of this state's descendants read this reg" (and therefore the reg is
+ * relevant for states_equal() checks).
+ * Write marks collect downwards and do not propagate; they record that "the
+ * straight-line code that reached this state (from its parent) wrote this reg"
+ * (and therefore that reads propagated from this state or its descendants
+ * should not propagate to its parent).
+ * A state with a write mark can receive read marks; it just won't propagate
+ * them to its parent, since the write mark is a property, not of the state,
+ * but of the link between it and its parent. See mark_reg_read() and
+ * mark_stack_slot_read() in kernel/bpf/verifier.c.
+ */
+enum bpf_reg_liveness {
+ REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
+ REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */
+ REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */
+};
struct bpf_reg_state {
enum bpf_reg_type type;
union {
- /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
- s64 imm;
-
- /* valid when type == PTR_TO_PACKET* */
- struct {
- u16 off;
- u16 range;
- };
+ /* valid when type == PTR_TO_PACKET */
+ u16 range;
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
* PTR_TO_MAP_VALUE_OR_NULL
*/
struct bpf_map *map_ptr;
};
+ /* Fixed part of pointer offset, pointer types only */
+ s32 off;
+ /* For PTR_TO_PACKET, used to find other pointers with the same variable
+ * offset, so they can share range knowledge.
+ * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
+ * came from, when one is tested for != NULL.
+ */
u32 id;
+ /* Ordering of fields matters. See states_equal() */
+ /* For scalar types (SCALAR_VALUE), this represents our knowledge of
+ * the actual value.
+ * For pointer types, this represents the variable part of the offset
+ * from the pointed-to object, and is shared with all bpf_reg_states
+ * with the same id as us.
+ */
+ struct tnum var_off;
/* Used to determine if any memory access using this register will
- * result in a bad access. These two fields must be last.
- * See states_equal()
+ * result in a bad access.
+ * These refer to the same value as var_off, not necessarily the actual
+ * contents of the register.
*/
- s64 min_value;
- u64 max_value;
- u32 min_align;
- u32 aux_off;
- u32 aux_off_align;
- bool value_from_signed;
+ s64 smin_value; /* minimum possible (s64)value */
+ s64 smax_value; /* maximum possible (s64)value */
+ u64 umin_value; /* minimum possible (u64)value */
+ u64 umax_value; /* maximum possible (u64)value */
+ /* This field must be last, for states_equal() reasons. */
+ enum bpf_reg_liveness live;
};
enum bpf_stack_slot_type {
@@ -61,6 +95,7 @@ struct bpf_verifier_state {
struct bpf_reg_state regs[MAX_BPF_REG];
u8 stack_slot_type[MAX_BPF_STACK];
struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
+ struct bpf_verifier_state *parent;
};
/* linked list of verifier states used to prune search */
@@ -103,7 +138,6 @@ struct bpf_verifier_env {
u32 id_gen; /* used to generate unique reg IDs */
bool allow_ptr_leaks;
bool seen_direct_write;
- bool varlen_map_value_access;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
};
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index e34dde2da0ef..b1be0233ce35 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -24,6 +24,7 @@
#define _BLK_BSG_
#include <linux/blkdev.h>
+#include <scsi/scsi_request.h>
struct request;
struct device;
@@ -37,6 +38,7 @@ struct bsg_buffer {
};
struct bsg_job {
+ struct scsi_request sreq;
struct device *dev;
struct request *req;
@@ -66,8 +68,9 @@ struct bsg_job {
void bsg_job_done(struct bsg_job *job, int result,
unsigned int reply_payload_rcv_len);
-struct request_queue *bsg_setup_queue(struct device *dev, char *name,
- bsg_job_fn *job_fn, int dd_job_size);
+struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
+ bsg_job_fn *job_fn, int dd_job_size,
+ void (*release)(struct device *));
void bsg_job_put(struct bsg_job *job);
int __must_check bsg_job_get(struct bsg_job *job);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c8dae555eccf..446b24cac67d 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -232,6 +232,7 @@ int generic_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
+void clean_page_buffers(struct page *page);
int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, unsigned, struct page **, void **,
get_block_t *, loff_t *);
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h
index 392041475c72..ffd215988392 100644
--- a/include/linux/byteorder/big_endian.h
+++ b/include/linux/byteorder/big_endian.h
@@ -3,5 +3,9 @@
#include <uapi/linux/byteorder/big_endian.h>
+#ifndef CONFIG_CPU_BIG_ENDIAN
+#warning inconsistent configuration, needs CONFIG_CPU_BIG_ENDIAN
+#endif
+
#include <linux/byteorder/generic.h>
#endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
index 08057377aa23..ba910bb9aad0 100644
--- a/include/linux/byteorder/little_endian.h
+++ b/include/linux/byteorder/little_endian.h
@@ -3,5 +3,9 @@
#include <uapi/linux/byteorder/little_endian.h>
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#warning inconsistent configuration, CONFIG_CPU_BIG_ENDIAN is set
+#endif
+
#include <linux/byteorder/generic.h>
#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 6ffb67e10c06..b52e278e4744 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -248,4 +248,6 @@ extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
/* audit system wants to get cap info from files as well */
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
+extern int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size);
+
#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 3285c944194a..7e9c991c95e0 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -20,12 +20,10 @@
#include <crypto/aes.h>
#include <crypto/sha.h>
-
struct ccp_device;
struct ccp_cmd;
-#if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \
- defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
+#if defined(CONFIG_CRYPTO_DEV_SP_CCP)
/**
* ccp_present - check if a CCP device is present
@@ -71,7 +69,7 @@ unsigned int ccp_version(void);
*/
int ccp_enqueue_cmd(struct ccp_cmd *cmd);
-#else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
+#else /* CONFIG_CRYPTO_DEV_CCP_SP_DEV is not enabled */
static inline int ccp_present(void)
{
@@ -88,7 +86,7 @@ static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
return -ENODEV;
}
-#endif /* CONFIG_CRYPTO_DEV_CCP_DD */
+#endif /* CONFIG_CRYPTO_DEV_SP_CCP */
/***** AES engine *****/
@@ -231,6 +229,7 @@ enum ccp_xts_aes_unit_size {
* AES operation the new IV overwrites the old IV.
*/
struct ccp_xts_aes_engine {
+ enum ccp_aes_type type;
enum ccp_aes_action action;
enum ccp_xts_aes_unit_size unit_size;
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index edf5b04b918a..b422170b791a 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -167,6 +167,8 @@ struct ceph_mon_request_header {
struct ceph_mon_statfs {
struct ceph_mon_request_header monhdr;
struct ceph_fsid fsid;
+ __u8 contains_data_pool;
+ __le64 data_pool;
} __attribute__ ((packed));
struct ceph_statfs {
@@ -669,7 +671,9 @@ enum {
extern const char *ceph_cap_op_name(int op);
/* flags field in client cap messages (version >= 10) */
-#define CEPH_CLIENT_CAPS_SYNC (0x1)
+#define CEPH_CLIENT_CAPS_SYNC (1<<0)
+#define CEPH_CLIENT_CAPS_NO_CAPSNAP (1<<1)
+#define CEPH_CLIENT_CAPS_PENDING_CAPSNAP (1<<2);
/*
* caps message, used for capability callbacks, acks, requests, etc.
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 8a79587e1317..4c846aabd9f6 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -84,17 +84,6 @@ struct ceph_options {
#define CEPH_AUTH_NAME_DEFAULT "guest"
-/*
- * Delay telling the MDS we no longer want caps, in case we reopen
- * the file. Delay a minimum amount of time, even if we send a cap
- * message for some other reason. Otherwise, take the oppotunity to
- * update the mds to avoid sending another message later.
- */
-#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */
-#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
-
-#define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4)
-
/* mount state */
enum {
CEPH_MOUNT_MOUNTING,
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index d5a3ecea578d..0fa990bf867a 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -133,8 +133,8 @@ void ceph_monc_renew_subs(struct ceph_mon_client *monc);
extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
unsigned long timeout);
-extern int ceph_monc_do_statfs(struct ceph_mon_client *monc,
- struct ceph_statfs *buf);
+int ceph_monc_do_statfs(struct ceph_mon_client *monc, u64 data_pool,
+ struct ceph_statfs *buf);
int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
u64 *newest);
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index b8281feda9c7..01408841c9c4 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -230,7 +230,6 @@ extern const char *ceph_osd_state_name(int s);
\
/* fancy write */ \
f(APPEND, __CEPH_OSD_OP(WR, DATA, 6), "append") \
- f(STARTSYNC, __CEPH_OSD_OP(WR, DATA, 7), "startsync") \
f(SETTRUNC, __CEPH_OSD_OP(WR, DATA, 8), "settrunc") \
f(TRIMTRUNC, __CEPH_OSD_OP(WR, DATA, 9), "trimtrunc") \
\
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 09f4c7df1478..ade4a78a54c2 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -74,6 +74,11 @@ enum {
* aren't writeable from inside the namespace.
*/
CGRP_ROOT_NS_DELEGATE = (1 << 3),
+
+ /*
+ * Enable cpuset controller in v1 cgroup to use v2 behavior.
+ */
+ CGRP_ROOT_CPUSET_V2_MODE = (1 << 4),
};
/* cftype->flags */
@@ -172,6 +177,14 @@ struct css_set {
/* reference count */
refcount_t refcount;
+ /*
+ * For a domain cgroup, the following points to self. If threaded,
+ * to the matching cset of the nearest domain ancestor. The
+ * dom_cset provides access to the domain cgroup and its csses to
+ * which domain level resource consumptions should be charged.
+ */
+ struct css_set *dom_cset;
+
/* the default cgroup associated with this css_set */
struct cgroup *dfl_cgrp;
@@ -200,6 +213,10 @@ struct css_set {
*/
struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
+ /* all threaded csets whose ->dom_cset points to this cset */
+ struct list_head threaded_csets;
+ struct list_head threaded_csets_node;
+
/*
* List running through all cgroup groups in the same hash
* slot. Protected by css_set_lock
@@ -261,13 +278,35 @@ struct cgroup {
*/
int level;
+ /* Maximum allowed descent tree depth */
+ int max_depth;
+
+ /*
+ * Keep track of total numbers of visible and dying descent cgroups.
+ * Dying cgroups are cgroups which were deleted by a user,
+ * but are still existing because someone else is holding a reference.
+ * max_descendants is a maximum allowed number of descent cgroups.
+ */
+ int nr_descendants;
+ int nr_dying_descendants;
+ int max_descendants;
+
/*
* Each non-empty css_set associated with this cgroup contributes
- * one to populated_cnt. All children with non-zero popuplated_cnt
- * of their own contribute one. The count is zero iff there's no
- * task in this cgroup or its subtree.
+ * one to nr_populated_csets. The counter is zero iff this cgroup
+ * doesn't have any tasks.
+ *
+ * All children which have non-zero nr_populated_csets and/or
+ * nr_populated_children of their own contribute one to either
+ * nr_populated_domain_children or nr_populated_threaded_children
+ * depending on their type. Each counter is zero iff all cgroups
+ * of the type in the subtree proper don't have any tasks.
*/
- int populated_cnt;
+ int nr_populated_csets;
+ int nr_populated_domain_children;
+ int nr_populated_threaded_children;
+
+ int nr_threaded_children; /* # of live threaded child cgroups */
struct kernfs_node *kn; /* cgroup kernfs entry */
struct cgroup_file procs_file; /* handle for "cgroup.procs" */
@@ -306,6 +345,15 @@ struct cgroup {
struct list_head e_csets[CGROUP_SUBSYS_COUNT];
/*
+ * If !threaded, self. If threaded, it points to the nearest
+ * domain ancestor. Inside a threaded subtree, cgroups are exempt
+ * from process granularity and no-internal-task constraint.
+ * Domain level resource consumptions which aren't tied to a
+ * specific task are charged to the dom_cgrp.
+ */
+ struct cgroup *dom_cgrp;
+
+ /*
* list of pidlists, up to two for each namespace (one for procs, one
* for tasks); created on demand.
*/
@@ -492,6 +540,18 @@ struct cgroup_subsys {
bool implicit_on_dfl:1;
/*
+ * If %true, the controller, supports threaded mode on the default
+ * hierarchy. In a threaded subtree, both process granularity and
+ * no-internal-process constraint are ignored and a threaded
+ * controllers should be able to handle that.
+ *
+ * Note that as an implicit controller is automatically enabled on
+ * all cgroups on the default hierarchy, it should also be
+ * threaded. implicit && !threaded is not supported.
+ */
+ bool threaded:1;
+
+ /*
* If %false, this subsystem is properly hierarchical -
* configuration, resource accounting and restriction on a parent
* cgroup cover those of its children. If %true, hierarchy support
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 710a005c6b7a..d023ac5e377f 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -36,18 +36,28 @@
#define CGROUP_WEIGHT_DFL 100
#define CGROUP_WEIGHT_MAX 10000
+/* walk only threadgroup leaders */
+#define CSS_TASK_ITER_PROCS (1U << 0)
+/* walk all threaded css_sets in the domain */
+#define CSS_TASK_ITER_THREADED (1U << 1)
+
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
struct cgroup_subsys *ss;
+ unsigned int flags;
struct list_head *cset_pos;
struct list_head *cset_head;
+ struct list_head *tcset_pos;
+ struct list_head *tcset_head;
+
struct list_head *task_pos;
struct list_head *tasks_head;
struct list_head *mg_tasks_head;
struct css_set *cur_cset;
+ struct css_set *cur_dcset;
struct task_struct *cur_task;
struct list_head iters_node; /* css_set->task_iters */
};
@@ -129,7 +139,7 @@ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
struct cgroup_subsys_state **dst_cssp);
-void css_task_iter_start(struct cgroup_subsys_state *css,
+void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
struct css_task_iter *it);
struct task_struct *css_task_iter_next(struct css_task_iter *it);
void css_task_iter_end(struct css_task_iter *it);
@@ -388,6 +398,16 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
percpu_ref_put_many(&css->refcnt, n);
}
+static inline void cgroup_get(struct cgroup *cgrp)
+{
+ css_get(&cgrp->self);
+}
+
+static inline bool cgroup_tryget(struct cgroup *cgrp)
+{
+ return css_tryget(&cgrp->self);
+}
+
static inline void cgroup_put(struct cgroup *cgrp)
{
css_put(&cgrp->self);
@@ -500,6 +520,20 @@ static inline struct cgroup *task_cgroup(struct task_struct *task,
return task_css(task, subsys_id)->cgroup;
}
+static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
+{
+ return task_css_set(task)->dfl_cgrp;
+}
+
+static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
+{
+ struct cgroup_subsys_state *parent_css = cgrp->self.parent;
+
+ if (parent_css)
+ return container_of(parent_css, struct cgroup, self);
+ return NULL;
+}
+
/**
* cgroup_is_descendant - test ancestry
* @cgrp: the cgroup to be tested
@@ -537,13 +571,14 @@ static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
/* no synchronization, the result can only be used as a hint */
static inline bool cgroup_is_populated(struct cgroup *cgrp)
{
- return cgrp->populated_cnt;
+ return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
+ cgrp->nr_populated_threaded_children;
}
/* returns ino associated with a cgroup */
static inline ino_t cgroup_ino(struct cgroup *cgrp)
{
- return cgrp->kn->ino;
+ return cgrp->kn->id.ino;
}
/* cft/css accessors for cftype->write() operation */
@@ -609,6 +644,13 @@ static inline void cgroup_kthread_ready(void)
current->no_cgroup_migration = 0;
}
+static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
+{
+ return &cgrp->kn->id;
+}
+
+void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
+ char *buf, size_t buflen);
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
@@ -631,12 +673,19 @@ static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
static inline void cgroup_init_kthreadd(void) {}
static inline void cgroup_kthread_ready(void) {}
+static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
+{
+ return NULL;
+}
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
struct cgroup *ancestor)
{
return true;
}
+
+static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
+ char *buf, size_t buflen) {}
#endif /* !CONFIG_CGROUPS */
/*
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index c59c62571e4f..5100ec1b5d55 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -343,6 +343,7 @@ struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
u8 clk_gate_flags, spinlock_t *lock);
void clk_unregister_gate(struct clk *clk);
void clk_hw_unregister_gate(struct clk_hw *hw);
+int clk_gate_is_enabled(struct clk_hw *hw);
struct clk_div_table {
unsigned int val;
@@ -565,6 +566,9 @@ struct clk_fractional_divider {
u8 nwidth;
u32 nmask;
u8 flags;
+ void (*approximation)(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate,
+ unsigned long *m, unsigned long *n);
spinlock_t *lock;
};
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 17f413bbbedf..6aca5ce8a99a 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -185,4 +185,29 @@
#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */
#define AT91_PMC_PCR_GCKEN (0x1 << 29) /* GCK Enable */
+#define AT91_PMC_AUDIO_PLL0 0x14c
+#define AT91_PMC_AUDIO_PLL_PLLEN (1 << 0)
+#define AT91_PMC_AUDIO_PLL_PADEN (1 << 1)
+#define AT91_PMC_AUDIO_PLL_PMCEN (1 << 2)
+#define AT91_PMC_AUDIO_PLL_RESETN (1 << 3)
+#define AT91_PMC_AUDIO_PLL_ND_OFFSET 8
+#define AT91_PMC_AUDIO_PLL_ND_MASK (0x7f << AT91_PMC_AUDIO_PLL_ND_OFFSET)
+#define AT91_PMC_AUDIO_PLL_ND(n) ((n) << AT91_PMC_AUDIO_PLL_ND_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPMC_OFFSET 16
+#define AT91_PMC_AUDIO_PLL_QDPMC_MASK (0x7f << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPMC(n) ((n) << AT91_PMC_AUDIO_PLL_QDPMC_OFFSET)
+
+#define AT91_PMC_AUDIO_PLL1 0x150
+#define AT91_PMC_AUDIO_PLL_FRACR_MASK 0x3fffff
+#define AT91_PMC_AUDIO_PLL_QDPAD_OFFSET 24
+#define AT91_PMC_AUDIO_PLL_QDPAD_MASK (0x7f << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET AT91_PMC_AUDIO_PLL_QDPAD_OFFSET
+#define AT91_PMC_AUDIO_PLL_QDPAD_DIV_MASK (0x3 << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD_DIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_DIV_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET 26
+#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX 0x1f
+#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MASK (AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET)
+#define AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV(n) ((n) << AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_OFFSET)
+
#endif
diff --git a/include/linux/clk/sunxi-ng.h b/include/linux/clk/sunxi-ng.h
new file mode 100644
index 000000000000..990f760f70e5
--- /dev/null
+++ b/include/linux/clk/sunxi-ng.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017 Chen-Yu Tsai. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_CLK_SUNXI_NG_H_
+#define _LINUX_CLK_SUNXI_NG_H_
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_SUNXI_CCU
+int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode);
+int sunxi_ccu_get_mmc_timing_mode(struct clk *clk);
+#else
+static inline int sunxi_ccu_set_mmc_timing_mode(struct clk *clk,
+ bool new_mode)
+{
+ return -ENOTSUPP;
+}
+
+static inline int sunxi_ccu_get_mmc_timing_mode(struct clk *clk)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+#endif
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 5a6a109b4a50..a5619de3437d 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -27,7 +27,7 @@
#endif
#ifndef __SC_DELOUSE
-#define __SC_DELOUSE(t,v) ((t)(unsigned long)(v))
+#define __SC_DELOUSE(t,v) ((__force t)(unsigned long)(v))
#endif
#define COMPAT_SYSCALL_DEFINE0(name) \
@@ -171,15 +171,6 @@ extern int get_compat_itimerspec64(struct itimerspec64 *its,
extern int put_compat_itimerspec64(const struct itimerspec64 *its,
struct compat_itimerspec __user *uits);
-/*
- * This function convert a timespec if necessary and returns a *user
- * space* pointer. If no conversion is necessary, it returns the
- * initial pointer. NULL is a legitimate argument and will always
- * output NULL.
- */
-extern int compat_convert_timespec(struct timespec __user **,
- const void __user *);
-
struct compat_iovec {
compat_uptr_t iov_base;
compat_size_t iov_len;
@@ -365,10 +356,10 @@ asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
asmlinkage long compat_sys_preadv64(unsigned long fd,
@@ -382,6 +373,18 @@ asmlinkage long compat_sys_pwritev64(unsigned long fd,
unsigned long vlen, loff_t pos);
#endif
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
+asmlinkage long compat_sys_readv64v2(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos, rwf_t flags);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
+asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos, rwf_t flags);
+#endif
+
asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 71b86a5d3061..16d41de92ee3 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -203,17 +203,6 @@
#endif
#endif
-#ifdef CONFIG_STACK_VALIDATION
-#define annotate_unreachable() ({ \
- asm("%c0:\t\n" \
- ".pushsection .discard.unreachable\t\n" \
- ".long %c0b - .\t\n" \
- ".popsection\t\n" : : "i" (__LINE__)); \
-})
-#else
-#define annotate_unreachable()
-#endif
-
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 43cac547f773..e95a2631e545 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -185,8 +185,34 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#endif
/* Unreachable code */
+#ifdef CONFIG_STACK_VALIDATION
+#define annotate_reachable() ({ \
+ asm("%c0:\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__LINE__)); \
+})
+#define annotate_unreachable() ({ \
+ asm("%c0:\n\t" \
+ ".pushsection .discard.unreachable\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__LINE__)); \
+})
+#define ASM_UNREACHABLE \
+ "999:\n\t" \
+ ".pushsection .discard.unreachable\n\t" \
+ ".long 999b - .\n\t" \
+ ".popsection\n\t"
+#else
+#define annotate_reachable()
+#define annotate_unreachable()
+#endif
+
+#ifndef ASM_UNREACHABLE
+# define ASM_UNREACHABLE
+#endif
#ifndef unreachable
-# define unreachable() do { } while (1)
+# define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
#endif
/*
@@ -521,7 +547,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
# define __compiletime_error_fallback(condition) do { } while (0)
#endif
-#define __compiletime_assert(condition, msg, prefix, suffix) \
+#ifdef __OPTIMIZE__
+# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
bool __cond = !(condition); \
extern void prefix ## suffix(void) __compiletime_error(msg); \
@@ -529,6 +556,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
prefix ## suffix(); \
__compiletime_error_fallback(__cond); \
} while (0)
+#else
+# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
+#endif
#define _compiletime_assert(condition, msg, prefix, suffix) \
__compiletime_assert(condition, msg, prefix, suffix)
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 5d5aaae3af43..cae5400022a3 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -9,6 +9,9 @@
*/
#include <linux/wait.h>
+#ifdef CONFIG_LOCKDEP_COMPLETIONS
+#include <linux/lockdep.h>
+#endif
/*
* struct completion - structure used to maintain state for a "completion"
@@ -25,13 +28,53 @@
struct completion {
unsigned int done;
wait_queue_head_t wait;
+#ifdef CONFIG_LOCKDEP_COMPLETIONS
+ struct lockdep_map_cross map;
+#endif
};
+#ifdef CONFIG_LOCKDEP_COMPLETIONS
+static inline void complete_acquire(struct completion *x)
+{
+ lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_);
+}
+
+static inline void complete_release(struct completion *x)
+{
+ lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_);
+}
+
+static inline void complete_release_commit(struct completion *x)
+{
+ lock_commit_crosslock((struct lockdep_map *)&x->map);
+}
+
+#define init_completion(x) \
+do { \
+ static struct lock_class_key __key; \
+ lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
+ "(complete)" #x, \
+ &__key, 0); \
+ __init_completion(x); \
+} while (0)
+#else
+#define init_completion(x) __init_completion(x)
+static inline void complete_acquire(struct completion *x) {}
+static inline void complete_release(struct completion *x) {}
+static inline void complete_release_commit(struct completion *x) {}
+#endif
+
+#ifdef CONFIG_LOCKDEP_COMPLETIONS
+#define COMPLETION_INITIALIZER(work) \
+ { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
+ STATIC_CROSS_LOCKDEP_MAP_INIT("(complete)" #work, &(work)) }
+#else
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+#endif
#define COMPLETION_INITIALIZER_ONSTACK(work) \
- ({ init_completion(&work); work; })
+ (*({ init_completion(&work); &work; }))
/**
* DECLARE_COMPLETION - declare and initialize a completion structure
@@ -70,7 +113,7 @@ struct completion {
* This inline function will initialize a dynamically created completion
* structure.
*/
-static inline void init_completion(struct completion *x)
+static inline void __init_completion(struct completion *x)
{
x->done = 0;
init_waitqueue_head(&x->wait);
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index 7d410260661b..edfeaba95429 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -24,6 +24,12 @@
/* ETMv3.5/PTM's ETMCR config bit */
#define ETM_OPT_CYCACC 12
#define ETM_OPT_TS 28
+#define ETM_OPT_RETSTK 29
+
+/* ETMv4 CONFIGR programming bits for the ETM OPTs */
+#define ETM4_CFG_BIT_CYCACC 4
+#define ETM4_CFG_BIT_TS 11
+#define ETM4_CFG_BIT_RETSTK 12
static inline int coresight_get_trace_id(int cpu)
{
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 4c671fc2081e..723e952fde0d 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -74,36 +74,36 @@ enum {
* Corrected Machine Check
*/
#define CPER_NOTIFY_CMC \
- UUID_LE(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
- 0xEB, 0xD4, 0xF8, 0x90)
+ GUID_INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
+ 0xEB, 0xD4, 0xF8, 0x90)
/* Corrected Platform Error */
#define CPER_NOTIFY_CPE \
- UUID_LE(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \
- 0xF2, 0x7E, 0xBE, 0xEE)
+ GUID_INIT(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \
+ 0xF2, 0x7E, 0xBE, 0xEE)
/* Machine Check Exception */
#define CPER_NOTIFY_MCE \
- UUID_LE(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
- 0xE1, 0x49, 0x13, 0xBB)
+ GUID_INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
+ 0xE1, 0x49, 0x13, 0xBB)
/* PCI Express Error */
#define CPER_NOTIFY_PCIE \
- UUID_LE(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \
- 0xAF, 0x67, 0xC1, 0x04)
+ GUID_INIT(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \
+ 0xAF, 0x67, 0xC1, 0x04)
/* INIT Record (for IPF) */
#define CPER_NOTIFY_INIT \
- UUID_LE(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \
- 0xD3, 0x9B, 0xC9, 0x8E)
+ GUID_INIT(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \
+ 0xD3, 0x9B, 0xC9, 0x8E)
/* Non-Maskable Interrupt */
#define CPER_NOTIFY_NMI \
- UUID_LE(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \
- 0x85, 0xD6, 0xE9, 0x8A)
+ GUID_INIT(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \
+ 0x85, 0xD6, 0xE9, 0x8A)
/* BOOT Error Record */
#define CPER_NOTIFY_BOOT \
- UUID_LE(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
- 0xD4, 0x64, 0xB3, 0x8F)
+ GUID_INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
+ 0xD4, 0x64, 0xB3, 0x8F)
/* DMA Remapping Error */
#define CPER_NOTIFY_DMAR \
- UUID_LE(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
- 0x72, 0x2D, 0xEB, 0x41)
+ GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
+ 0x72, 0x2D, 0xEB, 0x41)
/*
* Flags bits definitions for flags in struct cper_record_header
@@ -170,50 +170,50 @@ enum {
* Processor Generic
*/
#define CPER_SEC_PROC_GENERIC \
- UUID_LE(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \
- 0x93, 0xC4, 0xF3, 0xDB)
+ GUID_INIT(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \
+ 0x93, 0xC4, 0xF3, 0xDB)
/* Processor Specific: X86/X86_64 */
#define CPER_SEC_PROC_IA \
- UUID_LE(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
- 0x24, 0x2B, 0x6E, 0x1D)
+ GUID_INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
+ 0x24, 0x2B, 0x6E, 0x1D)
/* Processor Specific: IA64 */
#define CPER_SEC_PROC_IPF \
- UUID_LE(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \
- 0x80, 0xC7, 0x3C, 0x88, 0x81)
+ GUID_INIT(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \
+ 0x80, 0xC7, 0x3C, 0x88, 0x81)
/* Processor Specific: ARM */
#define CPER_SEC_PROC_ARM \
- UUID_LE(0xE19E3D16, 0xBC11, 0x11E4, 0x9C, 0xAA, 0xC2, 0x05, \
- 0x1D, 0x5D, 0x46, 0xB0)
+ GUID_INIT(0xE19E3D16, 0xBC11, 0x11E4, 0x9C, 0xAA, 0xC2, 0x05, \
+ 0x1D, 0x5D, 0x46, 0xB0)
/* Platform Memory */
#define CPER_SEC_PLATFORM_MEM \
- UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
- 0xED, 0x7C, 0x83, 0xB1)
+ GUID_INIT(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
+ 0xED, 0x7C, 0x83, 0xB1)
#define CPER_SEC_PCIE \
- UUID_LE(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \
- 0xCB, 0x3C, 0x6F, 0x35)
+ GUID_INIT(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \
+ 0xCB, 0x3C, 0x6F, 0x35)
/* Firmware Error Record Reference */
#define CPER_SEC_FW_ERR_REC_REF \
- UUID_LE(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \
- 0x9C, 0x8E, 0x69, 0xED)
+ GUID_INIT(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \
+ 0x9C, 0x8E, 0x69, 0xED)
/* PCI/PCI-X Bus */
#define CPER_SEC_PCI_X_BUS \
- UUID_LE(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \
- 0xD3, 0xF9, 0xC9, 0xDD)
+ GUID_INIT(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \
+ 0xD3, 0xF9, 0xC9, 0xDD)
/* PCI Component/Device */
#define CPER_SEC_PCI_DEV \
- UUID_LE(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \
- 0x8B, 0x00, 0x13, 0x26)
+ GUID_INIT(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \
+ 0x8B, 0x00, 0x13, 0x26)
#define CPER_SEC_DMAR_GENERIC \
- UUID_LE(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \
- 0xDE, 0x3E, 0x2C, 0x64)
+ GUID_INIT(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \
+ 0xDE, 0x3E, 0x2C, 0x64)
/* Intel VT for Directed I/O specific DMAr */
#define CPER_SEC_DMAR_VT \
- UUID_LE(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \
- 0xDD, 0x93, 0xE8, 0xCF)
+ GUID_INIT(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \
+ 0xDD, 0x93, 0xE8, 0xCF)
/* IOMMU specific DMAr */
#define CPER_SEC_DMAR_IOMMU \
- UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \
- 0xDF, 0xAA, 0x84, 0xEC)
+ GUID_INIT(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \
+ 0xDF, 0xAA, 0x84, 0xEC)
#define CPER_PROC_VALID_TYPE 0x0001
#define CPER_PROC_VALID_ISA 0x0002
@@ -290,10 +290,10 @@ struct cper_record_header {
__u32 validation_bits;
__u32 record_length;
__u64 timestamp;
- uuid_le platform_id;
- uuid_le partition_id;
- uuid_le creator_id;
- uuid_le notification_type;
+ guid_t platform_id;
+ guid_t partition_id;
+ guid_t creator_id;
+ guid_t notification_type;
__u64 record_id;
__u32 flags;
__u64 persistence_information;
@@ -309,8 +309,8 @@ struct cper_section_descriptor {
__u8 validation_bits;
__u8 reserved; /* must be zero */
__u32 flags;
- uuid_le section_type;
- uuid_le fru_id;
+ guid_t section_type;
+ guid_t fru_id;
__u32 section_severity;
__u8 fru_text[20];
};
@@ -343,7 +343,7 @@ struct cper_sec_proc_ia {
/* IA32/X64 Processor Error Information Structure */
struct cper_ia_err_info {
- uuid_le err_type;
+ guid_t err_type;
__u64 validation_bits;
__u64 check_info;
__u64 target_id;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index f10a9b3761cd..537ff842ff73 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -127,6 +127,15 @@ struct cpufreq_policy {
*/
unsigned int transition_delay_us;
+ /*
+ * Remote DVFS flag (Not added to the driver structure as we don't want
+ * to access another structure from scheduler hotpath).
+ *
+ * Should be set if CPUs can do DVFS on behalf of other CPUs from
+ * different cpufreq policies.
+ */
+ bool dvfs_possible_from_any_cpu;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
int cached_resolved_idx;
@@ -370,6 +379,12 @@ struct cpufreq_driver {
*/
#define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5)
+/*
+ * Set by drivers to disallow use of governors with "dynamic_switching" flag
+ * set.
+ */
+#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING (1 << 6)
+
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
@@ -487,14 +502,8 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
* polling frequency is 1000 times the transition latency of the processor. The
* ondemand governor will work on any processor with transition latency <= 10ms,
* using appropriate sampling rate.
- *
- * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
- * the ondemand governor will not work. All times here are in us (microseconds).
*/
-#define MIN_SAMPLING_RATE_RATIO (2)
#define LATENCY_MULTIPLIER (1000)
-#define MIN_LATENCY_MULTIPLIER (20)
-#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
@@ -507,9 +516,8 @@ struct cpufreq_governor {
char *buf);
int (*store_setspeed) (struct cpufreq_policy *policy,
unsigned int freq);
- unsigned int max_transition_latency; /* HW must be able to switch to
- next freq faster than this value in nano secs or we
- will fallback to performance governor */
+ /* For governors which change frequency dynamically by themselves */
+ bool dynamic_switching;
struct list_head governor_list;
struct module *owner;
};
@@ -525,6 +533,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation);
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq);
+unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
@@ -562,6 +571,17 @@ struct governor_attr {
size_t count);
};
+static inline bool cpufreq_can_do_remote_dvfs(struct cpufreq_policy *policy)
+{
+ /*
+ * Allow remote callbacks if:
+ * - dvfs_possible_from_any_cpu flag is set
+ * - the local and remote CPUs share cpufreq policy
+ */
+ return policy->dvfs_possible_from_any_cpu ||
+ cpumask_test_cpu(smp_processor_id(), policy->cpus);
+}
+
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 82b30e638430..6d508767e144 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -3,8 +3,27 @@
#include <linux/types.h>
+/*
+ * CPU-up CPU-down
+ *
+ * BP AP BP AP
+ *
+ * OFFLINE OFFLINE
+ * | ^
+ * v |
+ * BRINGUP_CPU->AP_OFFLINE BRINGUP_CPU <- AP_IDLE_DEAD (idle thread/play_dead)
+ * | AP_OFFLINE
+ * v (IRQ-off) ,---------------^
+ * AP_ONLNE | (stop_machine)
+ * | TEARDOWN_CPU <- AP_ONLINE_IDLE
+ * | ^
+ * v |
+ * AP_ACTIVE AP_ACTIVE
+ */
+
enum cpuhp_state {
- CPUHP_OFFLINE,
+ CPUHP_INVALID = -1,
+ CPUHP_OFFLINE = 0,
CPUHP_CREATE_THREADS,
CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
@@ -137,6 +156,9 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
+ CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_ONLINE_DYN,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index fc1e5d7fc1c7..8f7788d23b57 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -52,17 +52,18 @@ struct cpuidle_state {
int (*enter_dead) (struct cpuidle_device *dev, int index);
/*
- * CPUs execute ->enter_freeze with the local tick or entire timekeeping
+ * CPUs execute ->enter_s2idle with the local tick or entire timekeeping
* suspended, so it must not re-enable interrupts at any point (even
* temporarily) or attempt to change states of clock event devices.
*/
- void (*enter_freeze) (struct cpuidle_device *dev,
+ void (*enter_s2idle) (struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index);
};
/* Idle State Flags */
#define CPUIDLE_FLAG_NONE (0x00)
+#define CPUIDLE_FLAG_POLLING (0x01) /* polling state */
#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
@@ -197,14 +198,14 @@ static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
-extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern void cpuidle_use_deepest_state(bool enable);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
-static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_use_deepest_state(bool enable)
@@ -224,6 +225,12 @@ static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev,
}
#endif
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+void cpuidle_poll_state_init(struct cpuidle_driver *drv);
+#else
+static inline void cpuidle_poll_state_init(struct cpuidle_driver *drv) {}
+#endif
+
/******************************
* CPUIDLE GOVERNOR INTERFACE *
******************************/
@@ -250,12 +257,6 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
#endif
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-#define CPUIDLE_DRIVER_STATE_START 1
-#else
-#define CPUIDLE_DRIVER_STATE_START 0
-#endif
-
#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
({ \
int __ret; \
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 4bf4479a3a80..cd415b733c2a 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -32,15 +32,15 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
#if NR_CPUS == 1
-#define nr_cpu_ids 1
+#define nr_cpu_ids 1U
#else
-extern int nr_cpu_ids;
+extern unsigned int nr_cpu_ids;
#endif
#ifdef CONFIG_CPUMASK_OFFSTACK
/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
* not all bits may be allocated. */
-#define nr_cpumask_bits ((unsigned int)nr_cpu_ids)
+#define nr_cpumask_bits nr_cpu_ids
#else
#define nr_cpumask_bits ((unsigned int)NR_CPUS)
#endif
@@ -178,20 +178,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
}
-/**
- * cpumask_next - get the next cpu in a cpumask
- * @n: the cpu prior to the place to search (ie. return will be > @n)
- * @srcp: the cpumask pointer
- *
- * Returns >= nr_cpu_ids if no further cpus set.
- */
-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
-{
- /* -1 is a legal arg here. */
- if (n != -1)
- cpumask_check(n);
- return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
-}
+unsigned int cpumask_next(int n, const struct cpumask *srcp);
/**
* cpumask_next_zero - get the next unset cpu in a cpumask
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 898cfe2eeb42..a1e6a33a4b03 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -37,12 +37,6 @@ static inline bool cpusets_enabled(void)
return static_branch_unlikely(&cpusets_enabled_key);
}
-static inline int nr_cpusets(void)
-{
- /* jump label reference count + the top-level cpuset */
- return static_key_count(&cpusets_enabled_key.key) + 1;
-}
-
static inline void cpuset_inc(void)
{
static_branch_inc(&cpusets_pre_enable_key);
@@ -57,7 +51,9 @@ static inline void cpuset_dec(void)
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
+extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
+extern void cpuset_wait_for_hotplug(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -170,11 +166,15 @@ static inline bool cpusets_enabled(void) { return false; }
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
+static inline void cpuset_force_rebuild(void) { }
+
static inline void cpuset_update_active_cpus(void)
{
partition_sched_domains(1, NULL, NULL);
}
+static inline void cpuset_wait_for_hotplug(void) { }
+
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
diff --git a/include/linux/dax.h b/include/linux/dax.h
index df97b7af7e2c..122197124b9d 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -19,8 +19,6 @@ struct dax_operations {
/* copy_from_iter: required operation for fs-dax direct-i/o */
size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
struct iov_iter *);
- /* flush: optional driver-specific cache management after writes */
- void (*flush)(struct dax_device *, pgoff_t, void *, size_t);
};
extern struct attribute_group dax_attribute_group;
@@ -57,6 +55,7 @@ static inline void fs_put_dax(struct dax_device *dax_dev)
put_dax(dax_dev);
}
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
#else
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
{
@@ -71,6 +70,11 @@ static inline struct dax_device *fs_dax_get_by_host(const char *host)
static inline void fs_put_dax(struct dax_device *dax_dev)
{
}
+
+static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
+{
+ return NULL;
+}
#endif
int dax_read_lock(void);
@@ -84,39 +88,10 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
void **kaddr, pfn_t *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
-void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
- size_t size);
+void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
void dax_write_cache(struct dax_device *dax_dev, bool wc);
bool dax_write_cache_enabled(struct dax_device *dax_dev);
-/*
- * We use lowest available bit in exceptional entry for locking, one bit for
- * the entry size (PMD) and two more to tell us if the entry is a huge zero
- * page (HZP) or an empty entry that is just used for locking. In total four
- * special bits.
- *
- * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and
- * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
- * block allocation.
- */
-#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
-#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
-#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
-#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
-#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
-
-static inline unsigned long dax_radix_sector(void *entry)
-{
- return (unsigned long)entry >> RADIX_DAX_SHIFT;
-}
-
-static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
-{
- return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
- ((unsigned long)sector << RADIX_DAX_SHIFT) |
- RADIX_DAX_ENTRY_LOCK);
-}
-
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
@@ -124,8 +99,6 @@ int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
-void dax_wake_mapping_entry_waiter(struct address_space *mapping,
- pgoff_t index, void *entry, bool wake_all);
#ifdef CONFIG_FS_DAX
int __dax_zero_page_range(struct block_device *bdev,
@@ -140,21 +113,6 @@ static inline int __dax_zero_page_range(struct block_device *bdev,
}
#endif
-#ifdef CONFIG_FS_DAX_PMD
-static inline unsigned int dax_radix_order(void *entry)
-{
- if ((unsigned long)entry & RADIX_DAX_PMD)
- return PMD_SHIFT - PAGE_SHIFT;
- return 0;
-}
-#else
-static inline unsigned int dax_radix_order(void *entry)
-{
- return 0;
-}
-#endif
-int dax_pfn_mkwrite(struct vm_fault *vmf);
-
static inline bool dax_mapping(struct address_space *mapping)
{
return mapping->host && IS_DAX(mapping->host);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index aae1cdb76851..ed1a7cf6923a 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -147,7 +147,7 @@ struct dentry_operations {
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(const struct path *, bool);
struct dentry *(*d_real)(struct dentry *, const struct inode *,
- unsigned int);
+ unsigned int, unsigned int);
} ____cacheline_aligned;
/*
@@ -562,11 +562,15 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
return upper;
}
+/* d_real() flags */
+#define D_REAL_UPPER 0x2 /* return upper dentry or NULL if non-upper */
+
/**
* d_real - Return the real dentry
* @dentry: the dentry to query
* @inode: inode to select the dentry from multiple layers (can be NULL)
- * @flags: open flags to control copy-up behavior
+ * @open_flags: open flags to control copy-up behavior
+ * @flags: flags to control what is returned by this function
*
* If dentry is on a union/overlay, then return the underlying, real dentry.
* Otherwise return the dentry itself.
@@ -575,10 +579,10 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
*/
static inline struct dentry *d_real(struct dentry *dentry,
const struct inode *inode,
- unsigned int flags)
+ unsigned int open_flags, unsigned int flags)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
- return dentry->d_op->d_real(dentry, inode, flags);
+ return dentry->d_op->d_real(dentry, inode, open_flags, flags);
else
return dentry;
}
@@ -593,7 +597,7 @@ static inline struct dentry *d_real(struct dentry *dentry,
static inline struct inode *d_real_inode(const struct dentry *dentry)
{
/* This usage of d_real() results in const dentry */
- return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0));
+ return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0, 0));
}
struct name_snapshot {
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index aa86e6d8c1aa..b93efc8feecd 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -196,6 +196,14 @@ static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_file_unsafe(const char *name,
+ umode_t mode, struct dentry *parent,
+ void *data,
+ const struct file_operations *fops)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops,
@@ -289,6 +297,14 @@ static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_ulong(const char *name,
+ umode_t mode,
+ struct dentry *parent,
+ unsigned long *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode,
struct dentry *parent,
u8 *value)
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 6c220e4ebb6b..597294e0cc40 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -214,19 +214,6 @@ extern void devm_devfreq_unregister_notifier(struct device *dev,
extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
int index);
-/**
- * devfreq_update_stats() - update the last_status pointer in struct devfreq
- * @df: the devfreq instance whose status needs updating
- *
- * Governors are recommended to use this function along with last_status,
- * which allows other entities to reuse the last_status without affecting
- * the values fetched later by governors.
- */
-static inline int devfreq_update_stats(struct devfreq *df)
-{
- return df->profile->get_dev_status(df->dev.parent, &df->last_status);
-}
-
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
/**
* struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 1473455d0341..a5538433c927 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -134,8 +134,6 @@ typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn);
typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i);
-typedef void (*dm_dax_flush_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr,
- size_t size);
#define PAGE_SECTORS (PAGE_SIZE / 512)
void dm_error(const char *message);
@@ -186,7 +184,6 @@ struct target_type {
dm_io_hints_fn io_hints;
dm_dax_direct_access_fn direct_access;
dm_dax_copy_from_iter_fn dax_copy_from_iter;
- dm_dax_flush_fn dax_flush;
/* For internal device-mapper use. */
struct list_head list;
@@ -387,7 +384,7 @@ struct dm_arg {
* Validate the next argument, either returning it as *value or, if invalid,
* returning -EINVAL and setting *error.
*/
-int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
+int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
unsigned *value, char **error);
/*
@@ -395,7 +392,7 @@ int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
* arg->min and arg->max further arguments. Either return the size as
* *num_args or, if invalid, return -EINVAL and set *error.
*/
-int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
+int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
unsigned *num_args, char **error);
/*
@@ -549,46 +546,29 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
*---------------------------------------------------------------*/
#define DM_NAME "device-mapper"
-#ifdef CONFIG_PRINTK
-extern struct ratelimit_state dm_ratelimit_state;
-
-#define dm_ratelimit() __ratelimit(&dm_ratelimit_state)
-#else
-#define dm_ratelimit() 0
-#endif
+#define DM_RATELIMIT(pr_func, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ \
+ if (__ratelimit(&rs)) \
+ pr_func(DM_FMT(fmt), ##__VA_ARGS__); \
+} while (0)
#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMERR_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMERR(fmt, ##__VA_ARGS__); \
-} while (0)
-
+#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__)
#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMWARN_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMWARN(fmt, ##__VA_ARGS__); \
-} while (0)
-
+#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__)
#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMINFO_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMINFO(fmt, ##__VA_ARGS__); \
-} while (0)
+#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__)
#ifdef CONFIG_DM_DEBUG
#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
-#define DMDEBUG_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMDEBUG(fmt, ##__VA_ARGS__); \
-} while (0)
+#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__)
#else
#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
diff --git a/include/linux/device.h b/include/linux/device.h
index beabdbc08420..66fe271c2544 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -307,8 +307,6 @@ struct driver_attribute {
size_t count);
};
-#define DRIVER_ATTR(_name, _mode, _show, _store) \
- struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
#define DRIVER_ATTR_RW(_name) \
struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
#define DRIVER_ATTR_RO(_name) \
@@ -375,7 +373,7 @@ int subsys_virtual_register(struct bus_type *subsys,
* @suspend: Used to put the device to sleep mode, usually to a low power
* state.
* @resume: Used to bring the device from the sleep mode.
- * @shutdown: Called at shut-down time to quiesce the device.
+ * @shutdown_pre: Called at shut-down time before driver shutdown.
* @ns_type: Callbacks so sysfs can detemine namespaces.
* @namespace: Namespace of the device belongs to this class.
* @pm: The default device power management operations of this class.
@@ -404,7 +402,7 @@ struct class {
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
- int (*shutdown)(struct device *dev);
+ int (*shutdown_pre)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
const void *(*namespace)(struct device *dev);
@@ -838,7 +836,7 @@ struct dev_links_info {
* @driver_data: Private pointer for driver specific info.
* @links: Links to suppliers and consumers of this device.
* @power: For device power management.
- * See Documentation/power/admin-guide/devices.rst for details.
+ * See Documentation/driver-api/pm/devices.rst for details.
* @pm_domain: Provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions
* along with subsystem-level and driver-level callbacks.
@@ -847,6 +845,7 @@ struct dev_links_info {
* @msi_list: Hosts MSI descriptors
* @msi_domain: The generic MSI domain this device is using.
* @numa_node: NUMA node this device is close to.
+ * @dma_ops: DMA mapping operations for this device.
* @dma_mask: Dma mask (if dma'ble device).
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
* hardware supports 64-bit addresses for consistent allocations
@@ -1200,6 +1199,36 @@ struct device *device_create_with_groups(struct class *cls,
const char *fmt, ...);
extern void device_destroy(struct class *cls, dev_t devt);
+extern int __must_check device_add_groups(struct device *dev,
+ const struct attribute_group **groups);
+extern void device_remove_groups(struct device *dev,
+ const struct attribute_group **groups);
+
+static inline int __must_check device_add_group(struct device *dev,
+ const struct attribute_group *grp)
+{
+ const struct attribute_group *groups[] = { grp, NULL };
+
+ return device_add_groups(dev, groups);
+}
+
+static inline void device_remove_group(struct device *dev,
+ const struct attribute_group *grp)
+{
+ const struct attribute_group *groups[] = { grp, NULL };
+
+ return device_remove_groups(dev, groups);
+}
+
+extern int __must_check devm_device_add_groups(struct device *dev,
+ const struct attribute_group **groups);
+extern void devm_device_remove_groups(struct device *dev,
+ const struct attribute_group **groups);
+extern int __must_check devm_device_add_group(struct device *dev,
+ const struct attribute_group *grp);
+extern void devm_device_remove_group(struct device *dev,
+ const struct attribute_group *grp);
+
/*
* Platform "fixup" functions - allow the platform to have their say
* about devices and actions that the general device layer doesn't
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 0a186c4f3981..171895072435 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -338,6 +338,19 @@ dma_fence_is_signaled(struct dma_fence *fence)
}
/**
+ * __dma_fence_is_later - return if f1 is chronologically later than f2
+ * @f1: [in] the first fence's seqno
+ * @f2: [in] the second fence's seqno from the same context
+ *
+ * Returns true if f1 is chronologically later than f2. Both fences must be
+ * from the same context, since a seqno is not common across contexts.
+ */
+static inline bool __dma_fence_is_later(u32 f1, u32 f2)
+{
+ return (int)(f1 - f2) > 0;
+}
+
+/**
* dma_fence_is_later - return if f1 is chronologically later than f2
* @f1: [in] the first fence from the same context
* @f2: [in] the second fence from the same context
@@ -351,7 +364,7 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
if (WARN_ON(f1->context != f2->context))
return false;
- return (int)(f1->seqno - f2->seqno) > 0;
+ return __dma_fence_is_later(f1->seqno, f2->seqno);
}
/**
@@ -418,8 +431,8 @@ int dma_fence_get_status(struct dma_fence *fence);
static inline void dma_fence_set_error(struct dma_fence *fence,
int error)
{
- BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
- BUG_ON(error >= 0 || error < -MAX_ERRNO);
+ WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+ WARN_ON(error >= 0 || error < -MAX_ERRNO);
fence->error = error;
}
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 2189c79cde5d..29ce9815da87 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -550,26 +550,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
}
-static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
-{
- return dma_alloc_attrs(dev, size, dma_handle, gfp,
- DMA_ATTR_NON_CONSISTENT);
-}
-
-static inline void dma_free_noncoherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- dma_free_attrs(dev, size, cpu_addr, dma_handle,
- DMA_ATTR_NON_CONSISTENT);
-}
-
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- debug_dma_mapping_error(dev, dma_addr);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
- if (get_dma_ops(dev)->mapping_error)
- return get_dma_ops(dev)->mapping_error(dev, dma_addr);
+ debug_dma_mapping_error(dev, dma_addr);
+ if (ops->mapping_error)
+ return ops->mapping_error(dev, dma_addr);
return 0;
}
@@ -720,10 +707,7 @@ static inline int dma_get_cache_alignment(void)
#endif
/* flags for the coherent memory api */
-#define DMA_MEMORY_MAP 0x01
-#define DMA_MEMORY_IO 0x02
-#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
-#define DMA_MEMORY_EXCLUSIVE 0x08
+#define DMA_MEMORY_EXCLUSIVE 0x01
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
@@ -736,7 +720,7 @@ static inline int
dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
- return 0;
+ return -ENOSYS;
}
static inline void
diff --git a/include/linux/dma/qcom_bam_dma.h b/include/linux/dma/qcom_bam_dma.h
new file mode 100644
index 000000000000..077d43a358e5
--- /dev/null
+++ b/include/linux/dma/qcom_bam_dma.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QCOM_BAM_DMA_H
+#define _QCOM_BAM_DMA_H
+
+#include <asm/byteorder.h>
+
+/*
+ * This data type corresponds to the native Command Element
+ * supported by BAM DMA Engine.
+ *
+ * @cmd_and_addr - upper 8 bits command and lower 24 bits register address.
+ * @data - for write command: content to be written into peripheral register.
+ * for read command: dest addr to write peripheral register value.
+ * @mask - register mask.
+ * @reserved - for future usage.
+ *
+ */
+struct bam_cmd_element {
+ __le32 cmd_and_addr;
+ __le32 data;
+ __le32 mask;
+ __le32 reserved;
+};
+
+/*
+ * This enum indicates the command type in a command element
+ */
+enum bam_command_type {
+ BAM_WRITE_COMMAND = 0,
+ BAM_READ_COMMAND,
+};
+
+/*
+ * prep_bam_ce_le32 - Wrapper function to prepare a single BAM command
+ * element with the data already in le32 format.
+ *
+ * @bam_ce: bam command element
+ * @addr: target address
+ * @cmd: BAM command
+ * @data: actual data for write and dest addr for read in le32
+ */
+static inline void
+bam_prep_ce_le32(struct bam_cmd_element *bam_ce, u32 addr,
+ enum bam_command_type cmd, __le32 data)
+{
+ bam_ce->cmd_and_addr =
+ cpu_to_le32((addr & 0xffffff) | ((cmd & 0xff) << 24));
+ bam_ce->data = data;
+ bam_ce->mask = cpu_to_le32(0xffffffff);
+}
+
+/*
+ * bam_prep_ce - Wrapper function to prepare a single BAM command element
+ * with the data.
+ *
+ * @bam_ce: BAM command element
+ * @addr: target address
+ * @cmd: BAM command
+ * @data: actual data for write and dest addr for read
+ */
+static inline void
+bam_prep_ce(struct bam_cmd_element *bam_ce, u32 addr,
+ enum bam_command_type cmd, u32 data)
+{
+ bam_prep_ce_le32(bam_ce, addr, cmd, cpu_to_le32(data));
+}
+#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 533680860865..8319101170fc 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -68,7 +68,6 @@ enum dma_transaction_type {
DMA_MEMSET,
DMA_MEMSET_SG,
DMA_INTERRUPT,
- DMA_SG,
DMA_PRIVATE,
DMA_ASYNC_TX,
DMA_SLAVE,
@@ -186,6 +185,9 @@ struct dma_interleaved_template {
* on the result of this operation
* @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
* cleared or freed
+ * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
+ * data and the descriptor should be in different format from normal
+ * data descriptors.
*/
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
@@ -195,6 +197,7 @@ enum dma_ctrl_flags {
DMA_PREP_CONTINUE = (1 << 4),
DMA_PREP_FENCE = (1 << 5),
DMA_CTRL_REUSE = (1 << 6),
+ DMA_PREP_CMD = (1 << 7),
};
/**
@@ -771,11 +774,6 @@ struct dma_device {
unsigned int nents, int value, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
- struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
@@ -905,19 +903,6 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
len, flags);
}
-static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
- struct dma_chan *chan,
- struct scatterlist *dst_sg, unsigned int dst_nents,
- struct scatterlist *src_sg, unsigned int src_nents,
- unsigned long flags)
-{
- if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
- return NULL;
-
- return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
- src_sg, src_nents, flags);
-}
-
/**
* dmaengine_terminate_all() - Terminate all active DMA transfers
* @chan: The channel for which to terminate the transfers
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 002611c85318..2d0259327721 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -51,7 +51,7 @@
#endif
extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.4.7"
+#define REL_VERSION "8.4.10"
#define API_VERSION 1
#define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 101
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h
index 2896f93808ae..4e6d4d4c7056 100644
--- a/include/linux/drbd_genl.h
+++ b/include/linux/drbd_genl.h
@@ -132,7 +132,8 @@ GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf,
__flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF)
__flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF)
__flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF)
- __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED)
+ __flg_field_def(24, 0 /* OPTIONAL */, discard_zeroes_if_aligned, DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF)
+ __flg_field_def(26, 0 /* OPTIONAL */, disable_write_same, DRBD_DISABLE_WRITE_SAME_DEF)
)
GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts,
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index ddac68422a96..24ae1b9b76c7 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -209,12 +209,18 @@
#define DRBD_MD_FLUSHES_DEF 1
#define DRBD_TCP_CORK_DEF 1
#define DRBD_AL_UPDATES_DEF 1
+
/* We used to ignore the discard_zeroes_data setting.
* To not change established (and expected) behaviour,
* by default assume that, for discard_zeroes_data=0,
* we can make that an effective discard_zeroes_data=1,
* if we only explicitly zero-out unaligned partial chunks. */
-#define DRBD_DISCARD_ZEROES_IF_ALIGNED 1
+#define DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF 1
+
+/* Some backends pretend to support WRITE SAME,
+ * but fail such requests when they are actually submitted.
+ * This is to tell DRBD to not even try. */
+#define DRBD_DISABLE_WRITE_SAME_DEF 0
#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0
#define DRBD_ALWAYS_ASBP_DEF 0
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 8ae0f45fafd6..cd75c173fd00 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -619,7 +619,6 @@ struct mem_ctl_info {
*/
struct device *pdev;
const char *mod_name;
- const char *mod_ver;
const char *ctl_name;
const char *dev_name;
void *pvt_info;
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
index 885f587a3555..915898759280 100644
--- a/include/linux/eeprom_93xx46.h
+++ b/include/linux/eeprom_93xx46.h
@@ -2,8 +2,7 @@
* Module: eeprom_93xx46
* platform description for 93xx46 EEPROMs.
*/
-
-struct gpio_desc;
+#include <linux/gpio/consumer.h>
struct eeprom_93xx46_platform_data {
unsigned char flags;
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 4e47f78430be..66f4a4e79f4b 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -47,10 +47,10 @@ typedef u16 efi_char16_t; /* UNICODE character */
typedef u64 efi_physical_addr_t;
typedef void *efi_handle_t;
-typedef uuid_le efi_guid_t;
+typedef guid_t efi_guid_t;
#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
- UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
+ GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
/*
* Generic EFI table header
@@ -1020,6 +1020,28 @@ extern int efi_memattr_init(void);
extern int efi_memattr_apply_permissions(struct mm_struct *mm,
efi_memattr_perm_setter fn);
+/*
+ * efi_early_memdesc_ptr - get the n-th EFI memmap descriptor
+ * @map: the start of efi memmap
+ * @desc_size: the size of space for each EFI memmap descriptor
+ * @n: the index of efi memmap descriptor
+ *
+ * EFI boot service provides the GetMemoryMap() function to get a copy of the
+ * current memory map which is an array of memory descriptors, each of
+ * which describes a contiguous block of memory. It also gets the size of the
+ * map, and the size of each descriptor, etc.
+ *
+ * Note that per section 6.2 of UEFI Spec 2.6 Errata A, the returned size of
+ * each descriptor might not be equal to sizeof(efi_memory_memdesc_t),
+ * since efi_memory_memdesc_t may be extended in the future. Thus the OS
+ * MUST use the returned size of the descriptor to find the start of each
+ * efi_memory_memdesc_t in the memory map array. This should only be used
+ * during bootup since for_each_efi_memory_desc_xxx() is available after the
+ * kernel initializes the EFI subsystem to set up struct efi_memory_map.
+ */
+#define efi_early_memdesc_ptr(map, desc_size, n) \
+ (efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size)))
+
/* Iterate through an efi_memory_map */
#define for_each_efi_memory_desc_in_map(m, md) \
for ((md) = (m)->map; \
@@ -1504,6 +1526,13 @@ enum efi_secureboot_mode {
};
enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table);
+#ifdef CONFIG_RESET_ATTACK_MITIGATION
+void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg);
+#else
+static inline void
+efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { }
+#endif
+
/*
* Arch code can implement the following three template macros, avoiding
* reptition for the void/non-void return cases of {__,}efi_call_virt():
@@ -1564,6 +1593,8 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
void *priv,
efi_exit_boot_map_processing priv_func);
+#define EFI_RANDOM_SEED_SIZE 64U
+
struct linux_efi_random_seed {
u32 size;
u8 bits[];
diff --git a/include/linux/errseq.h b/include/linux/errseq.h
index 9e0d444ac88d..f746bd8fe4d0 100644
--- a/include/linux/errseq.h
+++ b/include/linux/errseq.h
@@ -1,18 +1,12 @@
+/*
+ * See Documentation/errseq.rst and lib/errseq.c
+ */
#ifndef _LINUX_ERRSEQ_H
#define _LINUX_ERRSEQ_H
-/* See lib/errseq.c for more info */
-
typedef u32 errseq_t;
-errseq_t __errseq_set(errseq_t *eseq, int err);
-static inline void errseq_set(errseq_t *eseq, int err)
-{
- /* Optimize for the common case of no error */
- if (unlikely(err))
- __errseq_set(eseq, err);
-}
-
+errseq_t errseq_set(errseq_t *eseq, int err);
errseq_t errseq_sample(errseq_t *eseq);
int errseq_check(errseq_t *eseq, errseq_t since);
int errseq_check_and_advance(errseq_t *eseq, errseq_t *since);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 83cc9863444b..4587a4c36923 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -137,6 +137,17 @@ struct ethtool_link_ksettings {
__set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
/**
+ * ethtool_link_ksettings_del_link_mode - clear bit in link_ksettings
+ * link mode mask
+ * @ptr : pointer to struct ethtool_link_ksettings
+ * @name : one of supported/advertising/lp_advertising
+ * @mode : one of the ETHTOOL_LINK_MODE_*_BIT
+ * (not atomic, no bound checking)
+ */
+#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \
+ __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
+
+/**
* ethtool_link_ksettings_test_link_mode - test bit in ksettings link mode mask
* @ptr : pointer to struct ethtool_link_ksettings
* @name : one of supported/advertising/lp_advertising
@@ -374,5 +385,9 @@ struct ethtool_ops {
struct ethtool_link_ksettings *);
int (*set_link_ksettings)(struct net_device *,
const struct ethtool_link_ksettings *);
+ int (*get_fecparam)(struct net_device *,
+ struct ethtool_fecparam *);
+ int (*set_fecparam)(struct net_device *,
+ struct ethtool_fecparam *);
};
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 7e206a9f88db..744d60ca80c3 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -1,5 +1,5 @@
/*
- * External connector (extcon) class driver
+ * External Connector (extcon) framework
*
* Copyright (C) 2015 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
@@ -20,8 +20,7 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
-*/
+ */
#ifndef __LINUX_EXTCON_H__
#define __LINUX_EXTCON_H__
@@ -93,7 +92,7 @@
#define EXTCON_NUM 63
/*
- * Define the property of supported external connectors.
+ * Define the properties of supported external connectors.
*
* When adding the new extcon property, they *must* have
* the type/value/default information. Also, you *have to*
@@ -176,44 +175,42 @@ struct extcon_dev;
#if IS_ENABLED(CONFIG_EXTCON)
-/*
- * Following APIs are for notifiers or configurations.
- * Notifiers are the external port and connection devices.
- */
+/* Following APIs register/unregister the extcon device. */
extern int extcon_dev_register(struct extcon_dev *edev);
extern void extcon_dev_unregister(struct extcon_dev *edev);
extern int devm_extcon_dev_register(struct device *dev,
- struct extcon_dev *edev);
+ struct extcon_dev *edev);
extern void devm_extcon_dev_unregister(struct device *dev,
- struct extcon_dev *edev);
-extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
+ struct extcon_dev *edev);
-/*
- * Following APIs control the memory of extcon device.
- */
+/* Following APIs allocate/free the memory of the extcon device. */
extern struct extcon_dev *extcon_dev_allocate(const unsigned int *cable);
extern void extcon_dev_free(struct extcon_dev *edev);
extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const unsigned int *cable);
+ const unsigned int *cable);
extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
+/* Synchronize the state and property value for each external connector. */
+extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
+
/*
- * get/set_state access each bit of the 32b encoded state value.
- * They are used to access the status of each cable based on the cable id.
+ * Following APIs get/set the connected state of each external connector.
+ * The 'id' argument indicates the defined external connector.
*/
extern int extcon_get_state(struct extcon_dev *edev, unsigned int id);
extern int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool cable_state);
+ bool state);
extern int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool cable_state);
-/*
- * Synchronize the state and property data for a specific external connector.
- */
-extern int extcon_sync(struct extcon_dev *edev, unsigned int id);
+ bool state);
/*
- * get/set_property access the property value of each external connector.
- * They are used to access the property of each cable based on the property id.
+ * Following APIs get/set the property of each external connector.
+ * The 'id' argument indicates the defined external connector
+ * and the 'prop' indicates the extcon property.
+ *
+ * And extcon_get/set_property_capability() set the capability of the property
+ * for each external connector. They are used to set the capability of the
+ * property of each external connector based on the id and property.
*/
extern int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
@@ -224,28 +221,24 @@ extern int extcon_set_property(struct extcon_dev *edev, unsigned int id,
extern int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value prop_val);
-
-/*
- * get/set_property_capability set the capability of the property for each
- * external connector. They are used to set the capability of the property
- * of each external connector based on the id and property.
- */
extern int extcon_get_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
extern int extcon_set_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
/*
- * Following APIs are to monitor the status change of the external connectors.
+ * Following APIs register the notifier block in order to detect
+ * the change of both state and property value for each external connector.
+ *
* extcon_register_notifier(*edev, id, *nb) : Register a notifier block
* for specific external connector of the extcon.
* extcon_register_notifier_all(*edev, *nb) : Register a notifier block
* for all supported external connectors of the extcon.
*/
extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
- struct notifier_block *nb);
+ struct notifier_block *nb);
extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
- struct notifier_block *nb);
+ struct notifier_block *nb);
extern int devm_extcon_register_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
@@ -265,16 +258,15 @@ extern void devm_extcon_unregister_notifier_all(struct device *dev,
struct notifier_block *nb);
/*
- * Following API get the extcon device from devicetree.
- * This function use phandle of devicetree to get extcon device directly.
+ * Following APIs get the extcon_dev from devicetree or by through extcon name.
*/
+extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
int index);
-/* Following API to get information of extcon device */
+/* Following API get the name of extcon device. */
extern const char *extcon_get_edev_name(struct extcon_dev *edev);
-
#else /* CONFIG_EXTCON */
static inline int extcon_dev_register(struct extcon_dev *edev)
{
@@ -284,13 +276,13 @@ static inline int extcon_dev_register(struct extcon_dev *edev)
static inline void extcon_dev_unregister(struct extcon_dev *edev) { }
static inline int devm_extcon_dev_register(struct device *dev,
- struct extcon_dev *edev)
+ struct extcon_dev *edev)
{
return -EINVAL;
}
static inline void devm_extcon_dev_unregister(struct device *dev,
- struct extcon_dev *edev) { }
+ struct extcon_dev *edev) { }
static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
{
@@ -300,7 +292,7 @@ static inline struct extcon_dev *extcon_dev_allocate(const unsigned int *cable)
static inline void extcon_dev_free(struct extcon_dev *edev) { }
static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
- const unsigned int *cable)
+ const unsigned int *cable)
{
return ERR_PTR(-ENOSYS);
}
@@ -314,13 +306,13 @@ static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id)
}
static inline int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
+ bool state)
{
return 0;
}
static inline int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
+ bool state)
{
return 0;
}
@@ -331,52 +323,45 @@ static inline int extcon_sync(struct extcon_dev *edev, unsigned int id)
}
static inline int extcon_get_property(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value *prop_val)
+ unsigned int prop,
+ union extcon_property_value *prop_val)
{
return 0;
}
static inline int extcon_set_property(struct extcon_dev *edev, unsigned int id,
- unsigned int prop,
- union extcon_property_value prop_val)
+ unsigned int prop,
+ union extcon_property_value prop_val)
{
return 0;
}
static inline int extcon_set_property_sync(struct extcon_dev *edev,
- unsigned int id, unsigned int prop,
- union extcon_property_value prop_val)
+ unsigned int id, unsigned int prop,
+ union extcon_property_value prop_val)
{
return 0;
}
static inline int extcon_get_property_capability(struct extcon_dev *edev,
- unsigned int id, unsigned int prop)
+ unsigned int id, unsigned int prop)
{
return 0;
}
static inline int extcon_set_property_capability(struct extcon_dev *edev,
- unsigned int id, unsigned int prop)
+ unsigned int id, unsigned int prop)
{
return 0;
}
-static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
-{
- return NULL;
-}
-
static inline int extcon_register_notifier(struct extcon_dev *edev,
- unsigned int id,
- struct notifier_block *nb)
+ unsigned int id, struct notifier_block *nb)
{
return 0;
}
static inline int extcon_unregister_notifier(struct extcon_dev *edev,
- unsigned int id,
- struct notifier_block *nb)
+ unsigned int id, struct notifier_block *nb)
{
return 0;
}
@@ -392,8 +377,13 @@ static inline void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb) { }
+static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
- int index)
+ int index)
{
return ERR_PTR(-ENODEV);
}
@@ -411,26 +401,14 @@ struct extcon_specific_cable_nb {
};
static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
- const char *extcon_name, const char *cable_name,
- struct notifier_block *nb)
+ const char *extcon_name, const char *cable_name,
+ struct notifier_block *nb)
{
return -EINVAL;
}
-static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
- *obj)
+static inline int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
{
return -EINVAL;
}
-
-static inline int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id)
-{
- return extcon_get_state(edev, id);
-}
-
-static inline int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
-{
- return extcon_set_state_sync(edev, id, cable_state);
-}
#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index b6feed6547ce..2a0c453d7235 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -186,6 +186,8 @@ struct f2fs_extent {
#define F2FS_NAME_LEN 255
#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
+#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
+ get_extra_isize(inode))
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
@@ -205,9 +207,7 @@ struct f2fs_extent {
#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
-
-#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
- F2FS_INLINE_XATTR_ADDRS - 1))
+#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
struct f2fs_inode {
__le16 i_mode; /* file mode */
@@ -235,8 +235,16 @@ struct f2fs_inode {
struct f2fs_extent i_ext; /* caching a largest extent */
- __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
-
+ union {
+ struct {
+ __le16 i_extra_isize; /* extra inode attribute size */
+ __le16 i_padding; /* padding */
+ __le32 i_projid; /* project id */
+ __le32 i_inode_checksum;/* inode meta checksum */
+ __le32 i_extra_end[0]; /* for attribute size calculation */
+ };
+ __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
+ };
__le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
double_indirect(1) node id */
} __packed;
@@ -465,7 +473,7 @@ typedef __le32 f2fs_hash_t;
#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
/*
- * space utilization of regular dentry and inline dentry
+ * space utilization of regular dentry and inline dentry (w/o extra reservation)
* regular dentry inline dentry
* bitmap 1 * 27 = 27 1 * 23 = 23
* reserved 1 * 3 = 3 1 * 7 = 7
@@ -501,24 +509,6 @@ struct f2fs_dentry_block {
__u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN];
} __packed;
-/* for inline dir */
-#define NR_INLINE_DENTRY (MAX_INLINE_DATA * BITS_PER_BYTE / \
- ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
- BITS_PER_BYTE + 1))
-#define INLINE_DENTRY_BITMAP_SIZE ((NR_INLINE_DENTRY + \
- BITS_PER_BYTE - 1) / BITS_PER_BYTE)
-#define INLINE_RESERVED_SIZE (MAX_INLINE_DATA - \
- ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
- NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE))
-
-/* inline directory entry structure */
-struct f2fs_inline_dentry {
- __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE];
- __u8 reserved[INLINE_RESERVED_SIZE];
- struct f2fs_dir_entry dentry[NR_INLINE_DENTRY];
- __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN];
-} __packed;
-
/* file types used in inode_info->flags */
enum {
F2FS_FT_UNKNOWN,
@@ -534,4 +524,6 @@ enum {
#define S_SHIFT 12
+#define F2FS_DEF_PROJID 0 /* default project ID */
+
#endif /* _LINUX_F2FS_FS_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index a964d076b4dc..f4386b0ccf40 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -400,7 +400,7 @@ struct fb_tile_ops {
#endif /* CONFIG_FB_TILEBLITTING */
/* FBINFO_* = fb_info.flags bit flags */
-#define FBINFO_MODULE 0x0001 /* Low-level driver is a module */
+#define FBINFO_DEFAULT 0
#define FBINFO_HWACCEL_DISABLED 0x0002
/* When FBINFO_HWACCEL_DISABLED is set:
* Hardware acceleration is turned off. Software implementations
@@ -533,14 +533,6 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
return a;
}
-#ifdef MODULE
-#define FBINFO_DEFAULT FBINFO_MODULE
-#else
-#define FBINFO_DEFAULT 0
-#endif
-
-// This will go away
-#define FBINFO_FLAG_MODULE FBINFO_MODULE
#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT
/* This will go away
diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h
new file mode 100644
index 000000000000..f68a7db14165
--- /dev/null
+++ b/include/linux/fbcon.h
@@ -0,0 +1,12 @@
+#ifndef _LINUX_FBCON_H
+#define _LINUX_FBCON_H
+
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE
+void __init fb_console_init(void);
+void __exit fb_console_exit(void);
+#else
+static inline void fb_console_init(void) {}
+static inline void fb_console_exit(void) {}
+#endif
+
+#endif /* _LINUX_FBCON_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index bfef1e5734f8..d29e58fde364 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -711,7 +711,24 @@ bool bpf_helper_changes_pkt_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+
+/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
+ * same cpu context. Further for best results no more than a single map
+ * for the do_redirect/do_flush pair should be used. This limitation is
+ * because we only track one map and force a flush when the map changes.
+ * This does not appear to be a real limitation for existing software.
+ */
+int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
+ struct bpf_prog *prog);
+int xdp_do_redirect(struct net_device *dev,
+ struct xdp_buff *xdp,
+ struct bpf_prog *prog);
+void xdp_do_flush_map(void);
+
void bpf_warn_invalid_xdp_action(u32 act);
+void bpf_warn_invalid_xdp_redirect(u32 ifindex);
+
+struct sock *do_sk_redirect_map(void);
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
diff --git a/include/linux/fmc.h b/include/linux/fmc.h
index a5f0aa5c2a8d..3dc8a1b2db7b 100644
--- a/include/linux/fmc.h
+++ b/include/linux/fmc.h
@@ -132,6 +132,8 @@ struct fmc_operations {
uint32_t (*read32)(struct fmc_device *fmc, int offset);
void (*write32)(struct fmc_device *fmc, uint32_t value, int offset);
int (*validate)(struct fmc_device *fmc, struct fmc_driver *drv);
+ int (*reprogram_raw)(struct fmc_device *f, struct fmc_driver *d,
+ void *gw, unsigned long len);
int (*reprogram)(struct fmc_device *f, struct fmc_driver *d, char *gw);
int (*irq_request)(struct fmc_device *fmc, irq_handler_t h,
char *name, int flags);
@@ -144,6 +146,8 @@ struct fmc_operations {
};
/* Prefer this helper rather than calling of fmc->reprogram directly */
+int fmc_reprogram_raw(struct fmc_device *fmc, struct fmc_driver *d,
+ void *gw, unsigned long len, int sdb_entry);
extern int fmc_reprogram(struct fmc_device *f, struct fmc_driver *d, char *gw,
int sdb_entry);
@@ -180,6 +184,9 @@ struct fmc_device {
uint32_t device_id; /* Filled by the device */
char *mezzanine_name; /* Defaults to ``fmc'' */
void *mezzanine_data;
+
+ struct dentry *dbg_dir;
+ struct dentry *dbg_sdb_dump;
};
#define to_fmc_device(x) container_of((x), struct fmc_device, dev)
@@ -217,14 +224,23 @@ static inline void fmc_set_drvdata(struct fmc_device *fmc, void *data)
dev_set_drvdata(&fmc->dev, data);
}
-/* The 4 access points */
+struct fmc_gateware {
+ void *bitstream;
+ unsigned long len;
+};
+
+/* The 5 access points */
extern int fmc_driver_register(struct fmc_driver *drv);
extern void fmc_driver_unregister(struct fmc_driver *drv);
extern int fmc_device_register(struct fmc_device *tdev);
+extern int fmc_device_register_gw(struct fmc_device *tdev,
+ struct fmc_gateware *gw);
extern void fmc_device_unregister(struct fmc_device *tdev);
-/* Two more for device sets, all driven by the same FPGA */
+/* Three more for device sets, all driven by the same FPGA */
extern int fmc_device_register_n(struct fmc_device **devs, int n);
+extern int fmc_device_register_n_gw(struct fmc_device **devs, int n,
+ struct fmc_gateware *gw);
extern void fmc_device_unregister_n(struct fmc_device **devs, int n);
/* Internal cross-calls between files; not exported to other modules */
@@ -232,6 +248,23 @@ extern int fmc_match(struct device *dev, struct device_driver *drv);
extern int fmc_fill_id_info(struct fmc_device *fmc);
extern void fmc_free_id_info(struct fmc_device *fmc);
extern void fmc_dump_eeprom(const struct fmc_device *fmc);
-extern void fmc_dump_sdb(const struct fmc_device *fmc);
+
+/* helpers for FMC operations */
+extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
+ char *name, int flags);
+extern void fmc_irq_free(struct fmc_device *fmc);
+extern void fmc_irq_ack(struct fmc_device *fmc);
+extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv);
+extern int fmc_gpio_config(struct fmc_device *fmc, struct fmc_gpio *gpio,
+ int ngpio);
+extern int fmc_read_ee(struct fmc_device *fmc, int pos, void *d, int l);
+extern int fmc_write_ee(struct fmc_device *fmc, int pos, const void *d, int l);
+
+/* helpers for FMC operations */
+extern int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
+ char *name, int flags);
+extern void fmc_irq_free(struct fmc_device *fmc);
+extern void fmc_irq_ack(struct fmc_device *fmc);
+extern int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv);
#endif /* __LINUX_FMC_H__ */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index b4ac24c4411d..bfa14bc023fb 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -67,10 +67,14 @@ enum fpga_mgr_states {
* FPGA Manager flags
* FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
* FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
+ * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
+ * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
#define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2)
+#define FPGA_MGR_BITSTREAM_LSB_FIRST BIT(3)
+#define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4)
/**
* struct fpga_image_info - information specific to a FPGA image
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6e1fd5d21248..13dab191a23e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -72,6 +72,8 @@ extern int leases_enable, lease_break_time;
extern int sysctl_protected_symlinks;
extern int sysctl_protected_hardlinks;
+typedef __kernel_rwf_t rwf_t;
+
struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
@@ -146,8 +148,8 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
-/* File is capable of returning -EAGAIN if AIO will block */
-#define FMODE_AIO_NOWAIT ((__force fmode_t)0x8000000)
+/* File is capable of returning -EAGAIN if I/O will block */
+#define FMODE_NOWAIT ((__force fmode_t)0x8000000)
/*
* Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
@@ -390,7 +392,7 @@ struct address_space {
struct radix_tree_root page_tree; /* radix tree of all pages */
spinlock_t tree_lock; /* and lock protecting it */
atomic_t i_mmap_writable;/* count VM_SHARED mappings */
- struct rb_root i_mmap; /* tree of private and shared mappings */
+ struct rb_root_cached i_mmap; /* tree of private and shared mappings */
struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
/* Protected by tree_lock together with the radix tree */
unsigned long nrpages; /* number of total pages */
@@ -401,7 +403,7 @@ struct address_space {
unsigned long flags; /* error bits */
spinlock_t private_lock; /* for use by the address_space */
gfp_t gfp_mask; /* implicit gfp mask for allocations */
- struct list_head private_list; /* ditto */
+ struct list_head private_list; /* for use by the address_space */
void *private_data; /* ditto */
errseq_t wb_err;
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
@@ -427,6 +429,7 @@ struct block_device {
#endif
struct block_device * bd_contains;
unsigned bd_block_size;
+ u8 bd_partno;
struct hd_struct * bd_part;
/* number of times partitions within this device have been opened. */
unsigned bd_part_count;
@@ -484,7 +487,7 @@ static inline void i_mmap_unlock_read(struct address_space *mapping)
*/
static inline int mapping_mapped(struct address_space *mapping)
{
- return !RB_EMPTY_ROOT(&mapping->i_mmap);
+ return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root);
}
/*
@@ -907,9 +910,9 @@ static inline struct file *get_file(struct file *f)
/* Page cache limit. The filesystems should put that into their s_maxbytes
limits, otherwise bad things can happen in VM. */
#if BITS_PER_LONG==32
-#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
+#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
#elif BITS_PER_LONG==64
-#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
+#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
#endif
#define FL_POSIX 1
@@ -1000,7 +1003,6 @@ struct file_lock {
unsigned char fl_type;
unsigned int fl_pid;
int fl_link_cpu; /* what cpu's list is this on? */
- struct pid *fl_nspid;
wait_queue_head_t fl_wait;
struct file *fl_file;
loff_t fl_start;
@@ -1233,7 +1235,7 @@ static inline struct inode *file_inode(const struct file *f)
static inline struct dentry *file_dentry(const struct file *file)
{
- return d_real(file->f_path.dentry, file_inode(file), 0);
+ return d_real(file->f_path.dentry, file_inode(file), 0, 0);
}
static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
@@ -1268,7 +1270,32 @@ extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
extern int send_sigurg(struct fown_struct *fown);
-struct mm_struct;
+/*
+ * sb->s_flags. Note that these mirror the equivalent MS_* flags where
+ * represented in both.
+ */
+#define SB_RDONLY 1 /* Mount read-only */
+#define SB_NOSUID 2 /* Ignore suid and sgid bits */
+#define SB_NODEV 4 /* Disallow access to device special files */
+#define SB_NOEXEC 8 /* Disallow program execution */
+#define SB_SYNCHRONOUS 16 /* Writes are synced at once */
+#define SB_MANDLOCK 64 /* Allow mandatory locks on an FS */
+#define SB_DIRSYNC 128 /* Directory modifications are synchronous */
+#define SB_NOATIME 1024 /* Do not update access times. */
+#define SB_NODIRATIME 2048 /* Do not update directory access times */
+#define SB_SILENT 32768
+#define SB_POSIXACL (1<<16) /* VFS does not apply the umask */
+#define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */
+#define SB_I_VERSION (1<<23) /* Update inode I_version field */
+#define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
+
+/* These sb flags are internal to the kernel */
+#define SB_SUBMOUNT (1<<26)
+#define SB_NOREMOTELOCK (1<<27)
+#define SB_NOSEC (1<<28)
+#define SB_BORN (1<<29)
+#define SB_ACTIVE (1<<30)
+#define SB_NOUSER (1<<31)
/*
* Umount options
@@ -1754,13 +1781,10 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
struct iovec **ret_pointer);
extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
-extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
- unsigned long, loff_t *, int);
-extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
- unsigned long, loff_t *, int);
+ unsigned long, loff_t *, rwf_t);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
@@ -1836,7 +1860,7 @@ struct super_operations {
* possible to override it selectively if you really wanted to with some
* ioctl() that is not currently implemented.
*
- * Exception: MS_RDONLY is always applied to the entire file system.
+ * Exception: SB_RDONLY is always applied to the entire file system.
*
* Unfortunately, it is possible to change a filesystems flags with it mounted
* with files in use. This means that all of the inodes will not have their
@@ -1845,19 +1869,20 @@ struct super_operations {
*/
#define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg))
-#define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY)
-#define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \
+static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & MS_RDONLY; }
+#define IS_RDONLY(inode) sb_rdonly((inode)->i_sb)
+#define IS_SYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS) || \
((inode)->i_flags & S_SYNC))
-#define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
+#define IS_DIRSYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS|SB_DIRSYNC) || \
((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
-#define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK)
-#define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
-#define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION)
+#define IS_MANDLOCK(inode) __IS_FLG(inode, SB_MANDLOCK)
+#define IS_NOATIME(inode) __IS_FLG(inode, SB_RDONLY|SB_NOATIME)
+#define IS_I_VERSION(inode) __IS_FLG(inode, SB_I_VERSION)
#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
-#define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL)
+#define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL)
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
@@ -2178,7 +2203,7 @@ static inline int __mandatory_lock(struct inode *ino)
}
/*
- * ... and these candidates should be on MS_MANDLOCK mounted fs,
+ * ... and these candidates should be on SB_MANDLOCK mounted fs,
* otherwise these will be advisory locks
*/
@@ -2471,9 +2496,13 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev,
#endif
/* fs/char_dev.c */
-#define CHRDEV_MAJOR_HASH_SIZE 255
+#define CHRDEV_MAJOR_MAX 512
/* Marks the bottom of the first segment of free char majors */
#define CHRDEV_MAJOR_DYN_END 234
+/* Marks the top and bottom of the second segment of free char majors */
+#define CHRDEV_MAJOR_DYN_EXT_START 511
+#define CHRDEV_MAJOR_DYN_EXT_END 384
+
extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
extern int register_chrdev_region(dev_t, unsigned, const char *);
extern int __register_chrdev(unsigned int major, unsigned int baseminor,
@@ -2500,14 +2529,14 @@ static inline void unregister_chrdev(unsigned int major, const char *name)
#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
#ifdef CONFIG_BLOCK
-#define BLKDEV_MAJOR_HASH_SIZE 255
+#define BLKDEV_MAJOR_MAX 512
extern const char *__bdevname(dev_t, char *buffer);
extern const char *bdevname(struct block_device *bdev, char *buffer);
extern struct block_device *lookup_bdev(const char *);
extern void blkdev_show(struct seq_file *,off_t);
#else
-#define BLKDEV_MAJOR_HASH_SIZE 0
+#define BLKDEV_MAJOR_MAX 0
#endif
extern void init_special_inode(struct inode *, umode_t, dev_t);
@@ -2539,12 +2568,19 @@ extern int invalidate_inode_pages2_range(struct address_space *mapping,
extern int write_inode_now(struct inode *, int);
extern int filemap_fdatawrite(struct address_space *);
extern int filemap_flush(struct address_space *);
-extern int filemap_fdatawait(struct address_space *);
extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
loff_t lend);
+
+static inline int filemap_fdatawait(struct address_space *mapping)
+{
+ return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
+}
+
extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
loff_t lend);
+extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
+ loff_t lend);
extern int filemap_write_and_wait(struct address_space *mapping);
extern int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
@@ -2553,12 +2589,19 @@ extern int __filemap_fdatawrite_range(struct address_space *mapping,
extern int filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end);
extern int filemap_check_errors(struct address_space *mapping);
-
extern void __filemap_set_wb_err(struct address_space *mapping, int err);
+
+extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
+ loff_t lend);
extern int __must_check file_check_and_advance_wb_err(struct file *file);
extern int __must_check file_write_and_wait_range(struct file *file,
loff_t start, loff_t end);
+static inline int file_write_and_wait(struct file *file)
+{
+ return file_write_and_wait_range(file, 0, LLONG_MAX);
+}
+
/**
* filemap_set_wb_err - set a writeback error on an address_space
* @mapping: mapping in which to set writeback error
@@ -2572,8 +2615,6 @@ extern int __must_check file_write_and_wait_range(struct file *file,
* When a writeback error occurs, most filesystems will want to call
* filemap_set_wb_err to record the error in the mapping so that it will be
* automatically reported whenever fsync is called on the file.
- *
- * FIXME: mention FS_* flag here?
*/
static inline void filemap_set_wb_err(struct address_space *mapping, int err)
{
@@ -2772,15 +2813,15 @@ static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
return kernel_read_file_str[id];
}
-extern int kernel_read(struct file *, loff_t, char *, unsigned long);
extern int kernel_read_file(struct file *, void **, loff_t *, loff_t,
enum kernel_read_file_id);
-extern int kernel_read_file_from_path(char *, void **, loff_t *, loff_t,
+extern int kernel_read_file_from_path(const char *, void **, loff_t *, loff_t,
enum kernel_read_file_id);
extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t,
enum kernel_read_file_id);
-extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t);
-extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
+extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *);
+extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *);
+extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *);
extern struct file * open_exec(const char *);
/* fs/dcache.c -- generic fs support functions */
@@ -2831,6 +2872,7 @@ static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
#endif
extern void unlock_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
+extern void evict_inodes(struct super_block *sb);
extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
@@ -2874,9 +2916,9 @@ extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
- int flags);
+ rwf_t flags);
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
- int flags);
+ rwf_t flags);
/* fs/block_dev.c */
extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
@@ -2998,6 +3040,10 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes);
void __inode_sub_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
+static inline loff_t __inode_get_bytes(struct inode *inode)
+{
+ return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
+}
loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
const char *simple_get_link(struct dentry *, struct inode *,
@@ -3022,8 +3068,7 @@ static inline int vfs_lstat(const char __user *name, struct kstat *stat)
static inline int vfs_fstatat(int dfd, const char __user *filename,
struct kstat *stat, int flags)
{
- return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
- stat, STATX_BASIC_STATS);
+ return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS);
}
static inline int vfs_fstat(int fd, struct kstat *stat)
{
@@ -3143,13 +3188,13 @@ static inline int iocb_flags(struct file *file)
return res;
}
-static inline int kiocb_set_rw_flags(struct kiocb *ki, int flags)
+static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
{
if (unlikely(flags & ~RWF_SUPPORTED))
return -EOPNOTSUPP;
if (flags & RWF_NOWAIT) {
- if (!(ki->ki_filp->f_mode & FMODE_AIO_NOWAIT))
+ if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
return -EOPNOTSUPP;
ki->ki_flags |= IOCB_NOWAIT;
}
@@ -3274,7 +3319,7 @@ static inline int check_sticky(struct inode *dir, struct inode *inode)
static inline void inode_has_no_xattr(struct inode *inode)
{
- if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
+ if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & SB_NOSEC))
inode->i_flags |= S_NOSEC;
}
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 115bb81912cc..f4ff47d4a893 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -143,15 +143,6 @@ struct fscache_cookie_def {
void (*mark_page_cached)(void *cookie_netfs_data,
struct address_space *mapping,
struct page *page);
-
- /* indicate the cookie is no longer cached
- * - this function is called when the backing store currently caching
- * a cookie is removed
- * - the netfs should use this to clean up any markers indicating
- * cached pages
- * - this is mandatory for any object that may have data
- */
- void (*now_uncached)(void *cookie_netfs_data);
};
/*
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6383115e9d2c..2e028854bac7 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -307,7 +307,7 @@ DECLARE_PER_CPU(int, disable_stack_tracer);
static inline void stack_tracer_disable(void)
{
/* Preemption or interupts must be disabled */
- if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_inc(disable_stack_tracer);
}
@@ -320,7 +320,7 @@ static inline void stack_tracer_disable(void)
*/
static inline void stack_tracer_enable(void)
{
- if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
this_cpu_dec(disable_stack_tracer);
}
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 7c5b694864cd..f36bfd26f998 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -54,7 +54,6 @@ union futex_key {
#ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr);
-extern void exit_pi_state_list(struct task_struct *curr);
#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
#define futex_cmpxchg_enabled 1
#else
@@ -64,8 +63,14 @@ extern int futex_cmpxchg_enabled;
static inline void exit_robust_list(struct task_struct *curr)
{
}
+#endif
+
+#ifdef CONFIG_FUTEX_PI
+extern void exit_pi_state_list(struct task_struct *curr);
+#else
static inline void exit_pi_state_list(struct task_struct *curr)
{
}
#endif
+
#endif
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 50893a1646cf..0c35b6caf0f6 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -14,20 +14,9 @@
#include <linux/types.h>
-enum fwnode_type {
- FWNODE_INVALID = 0,
- FWNODE_OF,
- FWNODE_ACPI,
- FWNODE_ACPI_DATA,
- FWNODE_ACPI_STATIC,
- FWNODE_PDATA,
- FWNODE_IRQCHIP
-};
-
struct fwnode_operations;
struct fwnode_handle {
- enum fwnode_type type;
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
};
@@ -44,6 +33,20 @@ struct fwnode_endpoint {
const struct fwnode_handle *local_fwnode;
};
+#define NR_FWNODE_REFERENCE_ARGS 8
+
+/**
+ * struct fwnode_reference_args - Fwnode reference with additional arguments
+ * @fwnode:- A reference to the base fwnode
+ * @nargs: Number of elements in @args array
+ * @args: Integer arguments on the fwnode
+ */
+struct fwnode_reference_args {
+ struct fwnode_handle *fwnode;
+ unsigned int nargs;
+ unsigned int args[NR_FWNODE_REFERENCE_ARGS];
+};
+
/**
* struct fwnode_operations - Operations for fwnode interface
* @get: Get a reference to an fwnode.
@@ -57,6 +60,7 @@ struct fwnode_endpoint {
* @get_parent: Return the parent of an fwnode.
* @get_next_child_node: Return the next child node in an iteration.
* @get_named_child_node: Return a child node with a given name.
+ * @get_reference_args: Return a reference pointed to by a property, with args
* @graph_get_next_endpoint: Return an endpoint node in an iteration.
* @graph_get_remote_endpoint: Return the remote endpoint node of a local
* endpoint node.
@@ -66,30 +70,36 @@ struct fwnode_endpoint {
struct fwnode_operations {
void (*get)(struct fwnode_handle *fwnode);
void (*put)(struct fwnode_handle *fwnode);
- bool (*device_is_available)(struct fwnode_handle *fwnode);
- bool (*property_present)(struct fwnode_handle *fwnode,
+ bool (*device_is_available)(const struct fwnode_handle *fwnode);
+ bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
- int (*property_read_int_array)(struct fwnode_handle *fwnode,
+ int (*property_read_int_array)(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval);
- int (*property_read_string_array)(struct fwnode_handle *fwnode_handle,
- const char *propname,
- const char **val, size_t nval);
- struct fwnode_handle *(*get_parent)(struct fwnode_handle *fwnode);
+ int
+ (*property_read_string_array)(const struct fwnode_handle *fwnode_handle,
+ const char *propname, const char **val,
+ size_t nval);
+ struct fwnode_handle *(*get_parent)(const struct fwnode_handle *fwnode);
struct fwnode_handle *
- (*get_next_child_node)(struct fwnode_handle *fwnode,
+ (*get_next_child_node)(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
struct fwnode_handle *
- (*get_named_child_node)(struct fwnode_handle *fwnode, const char *name);
+ (*get_named_child_node)(const struct fwnode_handle *fwnode,
+ const char *name);
+ int (*get_reference_args)(const struct fwnode_handle *fwnode,
+ const char *prop, const char *nargs_prop,
+ unsigned int nargs, unsigned int index,
+ struct fwnode_reference_args *args);
struct fwnode_handle *
- (*graph_get_next_endpoint)(struct fwnode_handle *fwnode,
+ (*graph_get_next_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev);
struct fwnode_handle *
- (*graph_get_remote_endpoint)(struct fwnode_handle *fwnode);
+ (*graph_get_remote_endpoint)(const struct fwnode_handle *fwnode);
struct fwnode_handle *
(*graph_get_port_parent)(struct fwnode_handle *fwnode);
- int (*graph_parse_endpoint)(struct fwnode_handle *fwnode,
+ int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
};
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 29d4385903d4..6dfec4d638df 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -38,12 +38,13 @@ struct device_node;
struct gen_pool;
/**
- * Allocation callback function type definition
+ * typedef genpool_algo_t: Allocation callback function type definition
* @map: Pointer to bitmap
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
- * @data: optional additional data used by @genpool_algo_t
+ * @data: optional additional data used by the callback
+ * @pool: the pool being allocated from
*/
typedef unsigned long (*genpool_algo_t)(unsigned long *map,
unsigned long size,
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index e619fae2f037..ea652bfcd675 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -362,24 +362,12 @@ static inline void free_part_stats(struct hd_struct *part)
#define part_stat_sub(cpu, gendiskp, field, subnd) \
part_stat_add(cpu, gendiskp, field, -subnd)
-static inline void part_inc_in_flight(struct hd_struct *part, int rw)
-{
- atomic_inc(&part->in_flight[rw]);
- if (part->partno)
- atomic_inc(&part_to_disk(part)->part0.in_flight[rw]);
-}
-
-static inline void part_dec_in_flight(struct hd_struct *part, int rw)
-{
- atomic_dec(&part->in_flight[rw]);
- if (part->partno)
- atomic_dec(&part_to_disk(part)->part0.in_flight[rw]);
-}
-
-static inline int part_in_flight(struct hd_struct *part)
-{
- return atomic_read(&part->in_flight[0]) + atomic_read(&part->in_flight[1]);
-}
+void part_in_flight(struct request_queue *q, struct hd_struct *part,
+ unsigned int inflight[2]);
+void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
+ int rw);
+void part_inc_in_flight(struct request_queue *q, struct hd_struct *part,
+ int rw);
static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk)
{
@@ -395,7 +383,7 @@ static inline void free_part_info(struct hd_struct *part)
}
/* block/blk-core.c */
-extern void part_round_stats(int cpu, struct hd_struct *part);
+extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part);
/* block/genhd.c */
extern void device_add_disk(struct device *parent, struct gendisk *disk);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index bcfb9f7c46f5..f780718b7391 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -288,8 +288,6 @@ struct vm_area_struct;
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
#define GFP_NOIO (__GFP_RECLAIM)
#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
-#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
- __GFP_RECLAIMABLE)
#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
#define GFP_DMA __GFP_DMA
#define GFP_DMA32 __GFP_DMA32
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index af20369ec8e7..c97f8325e8bf 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -180,8 +180,27 @@ struct gpio_chip {
* If CONFIG_OF is enabled, then all GPIO controllers described in the
* device tree automatically may have an OF translation
*/
+
+ /**
+ * @of_node:
+ *
+ * Pointer to a device tree node representing this GPIO controller.
+ */
struct device_node *of_node;
- int of_gpio_n_cells;
+
+ /**
+ * @of_gpio_n_cells:
+ *
+ * Number of cells used to form the GPIO specifier.
+ */
+ unsigned int of_gpio_n_cells;
+
+ /**
+ * @of_xlate:
+ *
+ * Callback to translate a device tree GPIO specifier into a chip-
+ * relative GPIO number and flags.
+ */
int (*of_xlate)(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec, u32 *flags);
#endif
@@ -327,11 +346,10 @@ int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset,
/**
* struct gpio_pin_range - pin range controlled by a gpio chip
- * @head: list for maintaining set of pin ranges, used internally
+ * @node: list for maintaining set of pin ranges, used internally
* @pctldev: pinctrl device which handles corresponding pins
* @range: actual range of pins controlled by a gpio controller
*/
-
struct gpio_pin_range {
struct list_head node;
struct pinctrl_dev *pctldev;
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 6e76b16fcade..ba4ccfd900f9 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -60,11 +60,14 @@ struct gpiod_lookup_table {
#ifdef CONFIG_GPIOLIB
void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
+void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n);
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
#else
static inline
void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
static inline
+void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {}
+static inline
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
#endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 5006f9b5d837..ab05a86269dc 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -173,6 +173,7 @@ struct hid_item {
#define HID_UP_LOGIVENDOR3 0xff430000
#define HID_UP_LNVENDOR 0xffa00000
#define HID_UP_SENSOR 0x00200000
+#define HID_UP_ASUSVENDOR 0xff310000
#define HID_USAGE 0x0000ffff
@@ -292,6 +293,7 @@ struct hid_item {
#define HID_DG_BARRELSWITCH2 0x000d005a
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
+#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076
/*
* HID report types --- Ouch! HID spec says 1 2 3!
*/
@@ -363,6 +365,12 @@ struct hid_item {
#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
/*
+ * HID protocol status
+ */
+#define HID_REPORT_PROTOCOL 1
+#define HID_BOOT_PROTOCOL 0
+
+/*
* This is the global environment of the parser. This information is
* persistent for main-items. The global environment can be saved and
* restored with PUSH/POP statements.
@@ -526,7 +534,6 @@ struct hid_device { /* device report descriptor */
struct hid_report_enum report_enum[HID_REPORT_TYPES];
struct work_struct led_work; /* delayed LED worker */
- struct semaphore driver_lock; /* protects the current driver, except during input */
struct semaphore driver_input_lock; /* protects the current driver */
struct device dev; /* device */
struct hid_driver *driver;
@@ -542,16 +549,18 @@ struct hid_device { /* device report descriptor */
* battery is non-NULL.
*/
struct power_supply *battery;
+ __s32 battery_capacity;
__s32 battery_min;
__s32 battery_max;
__s32 battery_report_type;
__s32 battery_report_id;
+ bool battery_reported;
#endif
unsigned int status; /* see STAT flags above */
unsigned claimed; /* Claimed by hidinput, hiddev? */
unsigned quirks; /* Various quirks the device can pull on us */
- bool io_started; /* Protected by driver_lock. If IO has started */
+ bool io_started; /* If IO has started */
struct list_head inputs; /* The list of inputs */
void *hiddev; /* The hiddev structure */
@@ -777,6 +786,17 @@ struct hid_ll_driver {
int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
};
+extern struct hid_ll_driver i2c_hid_ll_driver;
+extern struct hid_ll_driver hidp_hid_driver;
+extern struct hid_ll_driver uhid_hid_driver;
+extern struct hid_ll_driver usb_hid_driver;
+
+static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
+ struct hid_ll_driver *driver)
+{
+ return hdev->ll_driver == driver;
+}
+
#define PM_HINT_FULLON 1<<5
#define PM_HINT_NORMAL 1<<1
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
new file mode 100644
index 000000000000..96e69979f84d
--- /dev/null
+++ b/include/linux/hmm.h
@@ -0,0 +1,520 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Authors: Jérôme Glisse <jglisse@redhat.com>
+ */
+/*
+ * Heterogeneous Memory Management (HMM)
+ *
+ * See Documentation/vm/hmm.txt for reasons and overview of what HMM is and it
+ * is for. Here we focus on the HMM API description, with some explanation of
+ * the underlying implementation.
+ *
+ * Short description: HMM provides a set of helpers to share a virtual address
+ * space between CPU and a device, so that the device can access any valid
+ * address of the process (while still obeying memory protection). HMM also
+ * provides helpers to migrate process memory to device memory, and back. Each
+ * set of functionality (address space mirroring, and migration to and from
+ * device memory) can be used independently of the other.
+ *
+ *
+ * HMM address space mirroring API:
+ *
+ * Use HMM address space mirroring if you want to mirror range of the CPU page
+ * table of a process into a device page table. Here, "mirror" means "keep
+ * synchronized". Prerequisites: the device must provide the ability to write-
+ * protect its page tables (at PAGE_SIZE granularity), and must be able to
+ * recover from the resulting potential page faults.
+ *
+ * HMM guarantees that at any point in time, a given virtual address points to
+ * either the same memory in both CPU and device page tables (that is: CPU and
+ * device page tables each point to the same pages), or that one page table (CPU
+ * or device) points to no entry, while the other still points to the old page
+ * for the address. The latter case happens when the CPU page table update
+ * happens first, and then the update is mirrored over to the device page table.
+ * This does not cause any issue, because the CPU page table cannot start
+ * pointing to a new page until the device page table is invalidated.
+ *
+ * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any
+ * updates to each device driver that has registered a mirror. It also provides
+ * some API calls to help with taking a snapshot of the CPU page table, and to
+ * synchronize with any updates that might happen concurrently.
+ *
+ *
+ * HMM migration to and from device memory:
+ *
+ * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with
+ * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page
+ * of the device memory, and allows the device driver to manage its memory
+ * using those struct pages. Having struct pages for device memory makes
+ * migration easier. Because that memory is not addressable by the CPU it must
+ * never be pinned to the device; in other words, any CPU page fault can always
+ * cause the device memory to be migrated (copied/moved) back to regular memory.
+ *
+ * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that
+ * allows use of a device DMA engine to perform the copy operation between
+ * regular system memory and device memory.
+ */
+#ifndef LINUX_HMM_H
+#define LINUX_HMM_H
+
+#include <linux/kconfig.h>
+
+#if IS_ENABLED(CONFIG_HMM)
+
+#include <linux/device.h>
+#include <linux/migrate.h>
+#include <linux/memremap.h>
+#include <linux/completion.h>
+
+struct hmm;
+
+/*
+ * hmm_pfn_t - HMM uses its own pfn type to keep several flags per page
+ *
+ * Flags:
+ * HMM_PFN_VALID: pfn is valid
+ * HMM_PFN_READ: CPU page table has read permission set
+ * HMM_PFN_WRITE: CPU page table has write permission set
+ * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
+ * HMM_PFN_EMPTY: corresponding CPU page table entry is pte_none()
+ * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
+ * result of vm_insert_pfn() or vm_insert_page(). Therefore, it should not
+ * be mirrored by a device, because the entry will never have HMM_PFN_VALID
+ * set and the pfn value is undefined.
+ * HMM_PFN_DEVICE_UNADDRESSABLE: unaddressable device memory (ZONE_DEVICE)
+ */
+typedef unsigned long hmm_pfn_t;
+
+#define HMM_PFN_VALID (1 << 0)
+#define HMM_PFN_READ (1 << 1)
+#define HMM_PFN_WRITE (1 << 2)
+#define HMM_PFN_ERROR (1 << 3)
+#define HMM_PFN_EMPTY (1 << 4)
+#define HMM_PFN_SPECIAL (1 << 5)
+#define HMM_PFN_DEVICE_UNADDRESSABLE (1 << 6)
+#define HMM_PFN_SHIFT 7
+
+/*
+ * hmm_pfn_t_to_page() - return struct page pointed to by a valid hmm_pfn_t
+ * @pfn: hmm_pfn_t to convert to struct page
+ * Returns: struct page pointer if pfn is a valid hmm_pfn_t, NULL otherwise
+ *
+ * If the hmm_pfn_t is valid (ie valid flag set) then return the struct page
+ * matching the pfn value stored in the hmm_pfn_t. Otherwise return NULL.
+ */
+static inline struct page *hmm_pfn_t_to_page(hmm_pfn_t pfn)
+{
+ if (!(pfn & HMM_PFN_VALID))
+ return NULL;
+ return pfn_to_page(pfn >> HMM_PFN_SHIFT);
+}
+
+/*
+ * hmm_pfn_t_to_pfn() - return pfn value store in a hmm_pfn_t
+ * @pfn: hmm_pfn_t to extract pfn from
+ * Returns: pfn value if hmm_pfn_t is valid, -1UL otherwise
+ */
+static inline unsigned long hmm_pfn_t_to_pfn(hmm_pfn_t pfn)
+{
+ if (!(pfn & HMM_PFN_VALID))
+ return -1UL;
+ return (pfn >> HMM_PFN_SHIFT);
+}
+
+/*
+ * hmm_pfn_t_from_page() - create a valid hmm_pfn_t value from struct page
+ * @page: struct page pointer for which to create the hmm_pfn_t
+ * Returns: valid hmm_pfn_t for the page
+ */
+static inline hmm_pfn_t hmm_pfn_t_from_page(struct page *page)
+{
+ return (page_to_pfn(page) << HMM_PFN_SHIFT) | HMM_PFN_VALID;
+}
+
+/*
+ * hmm_pfn_t_from_pfn() - create a valid hmm_pfn_t value from pfn
+ * @pfn: pfn value for which to create the hmm_pfn_t
+ * Returns: valid hmm_pfn_t for the pfn
+ */
+static inline hmm_pfn_t hmm_pfn_t_from_pfn(unsigned long pfn)
+{
+ return (pfn << HMM_PFN_SHIFT) | HMM_PFN_VALID;
+}
+
+
+#if IS_ENABLED(CONFIG_HMM_MIRROR)
+/*
+ * Mirroring: how to synchronize device page table with CPU page table.
+ *
+ * A device driver that is participating in HMM mirroring must always
+ * synchronize with CPU page table updates. For this, device drivers can either
+ * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device
+ * drivers can decide to register one mirror per device per process, or just
+ * one mirror per process for a group of devices. The pattern is:
+ *
+ * int device_bind_address_space(..., struct mm_struct *mm, ...)
+ * {
+ * struct device_address_space *das;
+ *
+ * // Device driver specific initialization, and allocation of das
+ * // which contains an hmm_mirror struct as one of its fields.
+ * ...
+ *
+ * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops);
+ * if (ret) {
+ * // Cleanup on error
+ * return ret;
+ * }
+ *
+ * // Other device driver specific initialization
+ * ...
+ * }
+ *
+ * Once an hmm_mirror is registered for an address space, the device driver
+ * will get callbacks through sync_cpu_device_pagetables() operation (see
+ * hmm_mirror_ops struct).
+ *
+ * Device driver must not free the struct containing the hmm_mirror struct
+ * before calling hmm_mirror_unregister(). The expected usage is to do that when
+ * the device driver is unbinding from an address space.
+ *
+ *
+ * void device_unbind_address_space(struct device_address_space *das)
+ * {
+ * // Device driver specific cleanup
+ * ...
+ *
+ * hmm_mirror_unregister(&das->mirror);
+ *
+ * // Other device driver specific cleanup, and now das can be freed
+ * ...
+ * }
+ */
+
+struct hmm_mirror;
+
+/*
+ * enum hmm_update_type - type of update
+ * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
+ */
+enum hmm_update_type {
+ HMM_UPDATE_INVALIDATE,
+};
+
+/*
+ * struct hmm_mirror_ops - HMM mirror device operations callback
+ *
+ * @update: callback to update range on a device
+ */
+struct hmm_mirror_ops {
+ /* sync_cpu_device_pagetables() - synchronize page tables
+ *
+ * @mirror: pointer to struct hmm_mirror
+ * @update_type: type of update that occurred to the CPU page table
+ * @start: virtual start address of the range to update
+ * @end: virtual end address of the range to update
+ *
+ * This callback ultimately originates from mmu_notifiers when the CPU
+ * page table is updated. The device driver must update its page table
+ * in response to this callback. The update argument tells what action
+ * to perform.
+ *
+ * The device driver must not return from this callback until the device
+ * page tables are completely updated (TLBs flushed, etc); this is a
+ * synchronous call.
+ */
+ void (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
+ enum hmm_update_type update_type,
+ unsigned long start,
+ unsigned long end);
+};
+
+/*
+ * struct hmm_mirror - mirror struct for a device driver
+ *
+ * @hmm: pointer to struct hmm (which is unique per mm_struct)
+ * @ops: device driver callback for HMM mirror operations
+ * @list: for list of mirrors of a given mm
+ *
+ * Each address space (mm_struct) being mirrored by a device must register one
+ * instance of an hmm_mirror struct with HMM. HMM will track the list of all
+ * mirrors for each mm_struct.
+ */
+struct hmm_mirror {
+ struct hmm *hmm;
+ const struct hmm_mirror_ops *ops;
+ struct list_head list;
+};
+
+int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
+void hmm_mirror_unregister(struct hmm_mirror *mirror);
+
+
+/*
+ * struct hmm_range - track invalidation lock on virtual address range
+ *
+ * @list: all range lock are on a list
+ * @start: range virtual start address (inclusive)
+ * @end: range virtual end address (exclusive)
+ * @pfns: array of pfns (big enough for the range)
+ * @valid: pfns array did not change since it has been fill by an HMM function
+ */
+struct hmm_range {
+ struct list_head list;
+ unsigned long start;
+ unsigned long end;
+ hmm_pfn_t *pfns;
+ bool valid;
+};
+
+/*
+ * To snapshot the CPU page table, call hmm_vma_get_pfns(), then take a device
+ * driver lock that serializes device page table updates, then call
+ * hmm_vma_range_done(), to check if the snapshot is still valid. The same
+ * device driver page table update lock must also be used in the
+ * hmm_mirror_ops.sync_cpu_device_pagetables() callback, so that CPU page
+ * table invalidation serializes on it.
+ *
+ * YOU MUST CALL hmm_vma_range_done() ONCE AND ONLY ONCE EACH TIME YOU CALL
+ * hmm_vma_get_pfns() WITHOUT ERROR !
+ *
+ * IF YOU DO NOT FOLLOW THE ABOVE RULE THE SNAPSHOT CONTENT MIGHT BE INVALID !
+ */
+int hmm_vma_get_pfns(struct vm_area_struct *vma,
+ struct hmm_range *range,
+ unsigned long start,
+ unsigned long end,
+ hmm_pfn_t *pfns);
+bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range);
+
+
+/*
+ * Fault memory on behalf of device driver. Unlike handle_mm_fault(), this will
+ * not migrate any device memory back to system memory. The hmm_pfn_t array will
+ * be updated with the fault result and current snapshot of the CPU page table
+ * for the range.
+ *
+ * The mmap_sem must be taken in read mode before entering and it might be
+ * dropped by the function if the block argument is false. In that case, the
+ * function returns -EAGAIN.
+ *
+ * Return value does not reflect if the fault was successful for every single
+ * address or not. Therefore, the caller must to inspect the hmm_pfn_t array to
+ * determine fault status for each address.
+ *
+ * Trying to fault inside an invalid vma will result in -EINVAL.
+ *
+ * See the function description in mm/hmm.c for further documentation.
+ */
+int hmm_vma_fault(struct vm_area_struct *vma,
+ struct hmm_range *range,
+ unsigned long start,
+ unsigned long end,
+ hmm_pfn_t *pfns,
+ bool write,
+ bool block);
+#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
+
+
+#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
+struct hmm_devmem;
+
+struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
+ unsigned long addr);
+
+/*
+ * struct hmm_devmem_ops - callback for ZONE_DEVICE memory events
+ *
+ * @free: call when refcount on page reach 1 and thus is no longer use
+ * @fault: call when there is a page fault to unaddressable memory
+ *
+ * Both callback happens from page_free() and page_fault() callback of struct
+ * dev_pagemap respectively. See include/linux/memremap.h for more details on
+ * those.
+ *
+ * The hmm_devmem_ops callback are just here to provide a coherent and
+ * uniq API to device driver and device driver should not register their
+ * own page_free() or page_fault() but rely on the hmm_devmem_ops call-
+ * back.
+ */
+struct hmm_devmem_ops {
+ /*
+ * free() - free a device page
+ * @devmem: device memory structure (see struct hmm_devmem)
+ * @page: pointer to struct page being freed
+ *
+ * Call back occurs whenever a device page refcount reach 1 which
+ * means that no one is holding any reference on the page anymore
+ * (ZONE_DEVICE page have an elevated refcount of 1 as default so
+ * that they are not release to the general page allocator).
+ *
+ * Note that callback has exclusive ownership of the page (as no
+ * one is holding any reference).
+ */
+ void (*free)(struct hmm_devmem *devmem, struct page *page);
+ /*
+ * fault() - CPU page fault or get user page (GUP)
+ * @devmem: device memory structure (see struct hmm_devmem)
+ * @vma: virtual memory area containing the virtual address
+ * @addr: virtual address that faulted or for which there is a GUP
+ * @page: pointer to struct page backing virtual address (unreliable)
+ * @flags: FAULT_FLAG_* (see include/linux/mm.h)
+ * @pmdp: page middle directory
+ * Returns: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR
+ * on error
+ *
+ * The callback occurs whenever there is a CPU page fault or GUP on a
+ * virtual address. This means that the device driver must migrate the
+ * page back to regular memory (CPU accessible).
+ *
+ * The device driver is free to migrate more than one page from the
+ * fault() callback as an optimization. However if device decide to
+ * migrate more than one page it must always priotirize the faulting
+ * address over the others.
+ *
+ * The struct page pointer is only given as an hint to allow quick
+ * lookup of internal device driver data. A concurrent migration
+ * might have already free that page and the virtual address might
+ * not longer be back by it. So it should not be modified by the
+ * callback.
+ *
+ * Note that mmap semaphore is held in read mode at least when this
+ * callback occurs, hence the vma is valid upon callback entry.
+ */
+ int (*fault)(struct hmm_devmem *devmem,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ const struct page *page,
+ unsigned int flags,
+ pmd_t *pmdp);
+};
+
+/*
+ * struct hmm_devmem - track device memory
+ *
+ * @completion: completion object for device memory
+ * @pfn_first: first pfn for this resource (set by hmm_devmem_add())
+ * @pfn_last: last pfn for this resource (set by hmm_devmem_add())
+ * @resource: IO resource reserved for this chunk of memory
+ * @pagemap: device page map for that chunk
+ * @device: device to bind resource to
+ * @ops: memory operations callback
+ * @ref: per CPU refcount
+ *
+ * This an helper structure for device drivers that do not wish to implement
+ * the gory details related to hotplugging new memoy and allocating struct
+ * pages.
+ *
+ * Device drivers can directly use ZONE_DEVICE memory on their own if they
+ * wish to do so.
+ */
+struct hmm_devmem {
+ struct completion completion;
+ unsigned long pfn_first;
+ unsigned long pfn_last;
+ struct resource *resource;
+ struct device *device;
+ struct dev_pagemap pagemap;
+ const struct hmm_devmem_ops *ops;
+ struct percpu_ref ref;
+};
+
+/*
+ * To add (hotplug) device memory, HMM assumes that there is no real resource
+ * that reserves a range in the physical address space (this is intended to be
+ * use by unaddressable device memory). It will reserve a physical range big
+ * enough and allocate struct page for it.
+ *
+ * The device driver can wrap the hmm_devmem struct inside a private device
+ * driver struct. The device driver must call hmm_devmem_remove() before the
+ * device goes away and before freeing the hmm_devmem struct memory.
+ */
+struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
+ struct device *device,
+ unsigned long size);
+struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
+ struct device *device,
+ struct resource *res);
+void hmm_devmem_remove(struct hmm_devmem *devmem);
+
+/*
+ * hmm_devmem_page_set_drvdata - set per-page driver data field
+ *
+ * @page: pointer to struct page
+ * @data: driver data value to set
+ *
+ * Because page can not be on lru we have an unsigned long that driver can use
+ * to store a per page field. This just a simple helper to do that.
+ */
+static inline void hmm_devmem_page_set_drvdata(struct page *page,
+ unsigned long data)
+{
+ unsigned long *drvdata = (unsigned long *)&page->pgmap;
+
+ drvdata[1] = data;
+}
+
+/*
+ * hmm_devmem_page_get_drvdata - get per page driver data field
+ *
+ * @page: pointer to struct page
+ * Return: driver data value
+ */
+static inline unsigned long hmm_devmem_page_get_drvdata(struct page *page)
+{
+ unsigned long *drvdata = (unsigned long *)&page->pgmap;
+
+ return drvdata[1];
+}
+
+
+/*
+ * struct hmm_device - fake device to hang device memory onto
+ *
+ * @device: device struct
+ * @minor: device minor number
+ */
+struct hmm_device {
+ struct device device;
+ unsigned int minor;
+};
+
+/*
+ * A device driver that wants to handle multiple devices memory through a
+ * single fake device can use hmm_device to do so. This is purely a helper and
+ * it is not strictly needed, in order to make use of any HMM functionality.
+ */
+struct hmm_device *hmm_device_new(void *drvdata);
+void hmm_device_put(struct hmm_device *hmm_device);
+#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
+#endif /* IS_ENABLED(CONFIG_HMM) */
+
+/* Below are for HMM internal use only! Not to be used by device driver! */
+#if IS_ENABLED(CONFIG_HMM_MIRROR)
+void hmm_mm_destroy(struct mm_struct *mm);
+
+static inline void hmm_mm_init(struct mm_struct *mm)
+{
+ mm->hmm = NULL;
+}
+#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
+static inline void hmm_mm_destroy(struct mm_struct *mm) {}
+static inline void hmm_mm_init(struct mm_struct *mm) {}
+#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
+
+
+#else /* IS_ENABLED(CONFIG_HMM) */
+static inline void hmm_mm_destroy(struct mm_struct *mm) {}
+static inline void hmm_mm_init(struct mm_struct *mm) {}
+#endif /* LINUX_HMM_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ee696347f928..14bc21c2ee7f 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -147,7 +147,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
pmd_t *____pmd = (__pmd); \
- if (pmd_trans_huge(*____pmd) \
+ if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
|| pmd_devmap(*____pmd)) \
__split_huge_pmd(__vma, __pmd, __address, \
false, NULL); \
@@ -178,12 +178,18 @@ extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma);
extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma);
+
+static inline int is_swap_pmd(pmd_t pmd)
+{
+ return !pmd_none(pmd) && !pmd_present(pmd);
+}
+
/* mmap_sem must be held on entry */
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
- if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
+ if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
else
return NULL;
@@ -233,6 +239,11 @@ void mm_put_huge_zero_page(struct mm_struct *mm);
#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
+static inline bool thp_migration_supported(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
+}
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
@@ -294,6 +305,10 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
long adjust_next)
{
}
+static inline int is_swap_pmd(pmd_t pmd)
+{
+ return 0;
+}
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
@@ -336,6 +351,11 @@ static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
{
return NULL;
}
+
+static inline bool thp_migration_supported(void)
+{
+ return false;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 0ed8e41aaf11..8bbbd37ab105 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -358,6 +358,7 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx);
/* arch callback */
+int __init __alloc_bootmem_huge_page(struct hstate *h);
int __init alloc_bootmem_huge_page(struct hstate *h);
void __init hugetlb_bad_size(void);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index b7d7bbec74e0..6431087816ba 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -124,10 +124,7 @@ struct hv_ring_buffer_info {
spinlock_t ring_lock;
u32 ring_datasize; /* < ring_size */
- u32 ring_data_startoffset;
- u32 priv_write_index;
u32 priv_read_index;
- u32 cached_read_index;
};
/*
@@ -180,19 +177,6 @@ static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
return write;
}
-static inline u32 hv_get_cached_bytes_to_write(
- const struct hv_ring_buffer_info *rbi)
-{
- u32 read_loc, write_loc, dsize, write;
-
- dsize = rbi->ring_datasize;
- read_loc = rbi->cached_read_index;
- write_loc = rbi->ring_buffer->write_index;
-
- write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
- read_loc - write_loc;
- return write;
-}
/*
* VMBUS version is 32 bit entity broken up into
* two 16 bit quantities: major_number. minor_number.
@@ -677,18 +661,6 @@ union hv_connection_id {
} u;
};
-/* Definition of the hv_signal_event hypercall input structure. */
-struct hv_input_signal_event {
- union hv_connection_id connectionid;
- u16 flag_number;
- u16 rsvdz;
-};
-
-struct hv_input_signal_event_buffer {
- u64 align8;
- struct hv_input_signal_event event;
-};
-
enum hv_numa_policy {
HV_BALANCED = 0,
HV_LOCALIZED,
@@ -770,8 +742,7 @@ struct vmbus_channel {
} callback_mode;
bool is_dedicated_interrupt;
- struct hv_input_signal_event_buffer sig_buf;
- struct hv_input_signal_event *sig_event;
+ u64 sig_event;
/*
* Starting with win8, this field will be used to specify
@@ -895,6 +866,8 @@ struct vmbus_channel {
*/
enum hv_numa_policy affinity_policy;
+ bool probe_done;
+
};
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
@@ -1030,13 +1003,6 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel,
enum vmbus_packet_type type,
u32 flags);
-extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
- void *buffer,
- u32 bufferLen,
- u64 requestid,
- enum vmbus_packet_type type,
- u32 flags);
-
extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
struct hv_page_buffer pagebuffers[],
u32 pagecount,
@@ -1044,20 +1010,6 @@ extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
u32 bufferlen,
u64 requestid);
-extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount,
- void *buffer,
- u32 bufferlen,
- u64 requestid,
- u32 flags);
-
-extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
- struct hv_multipage_buffer *mpb,
- void *buffer,
- u32 bufferlen,
- u64 requestid);
-
extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct vmbus_packet_mpb_array *mpb,
u32 desc_size,
@@ -1186,8 +1138,6 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
resource_size_t size, resource_size_t align,
bool fb_overlap_ok);
void vmbus_free_mmio(resource_size_t start, resource_size_t size);
-int vmbus_cpu_number_to_vp_number(int cpu_number);
-u64 hv_do_hypercall(u64 control, void *input, void *output);
/*
* GUID definitions of various offer types - services offered to the guest.
@@ -1453,7 +1403,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version);
-void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
+void hv_process_channel_removal(u32 relid);
void vmbus_setevent(struct vmbus_channel *channel);
/*
@@ -1474,55 +1424,6 @@ hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
}
/*
- * To optimize the flow management on the send-side,
- * when the sender is blocked because of lack of
- * sufficient space in the ring buffer, potential the
- * consumer of the ring buffer can signal the producer.
- * This is controlled by the following parameters:
- *
- * 1. pending_send_sz: This is the size in bytes that the
- * producer is trying to send.
- * 2. The feature bit feat_pending_send_sz set to indicate if
- * the consumer of the ring will signal when the ring
- * state transitions from being full to a state where
- * there is room for the producer to send the pending packet.
- */
-
-static inline void hv_signal_on_read(struct vmbus_channel *channel)
-{
- u32 cur_write_sz, cached_write_sz;
- u32 pending_sz;
- struct hv_ring_buffer_info *rbi = &channel->inbound;
-
- /*
- * Issue a full memory barrier before making the signaling decision.
- * Here is the reason for having this barrier:
- * If the reading of the pend_sz (in this function)
- * were to be reordered and read before we commit the new read
- * index (in the calling function) we could
- * have a problem. If the host were to set the pending_sz after we
- * have sampled pending_sz and go to sleep before we commit the
- * read index, we could miss sending the interrupt. Issue a full
- * memory barrier to address this.
- */
- virt_mb();
-
- pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
- /* If the other end is not blocked on write don't bother. */
- if (pending_sz == 0)
- return;
-
- cur_write_sz = hv_get_bytes_to_write(rbi);
-
- if (cur_write_sz < pending_sz)
- return;
-
- cached_write_sz = hv_get_cached_bytes_to_write(rbi);
- if (cached_write_sz < pending_sz)
- vmbus_setevent(channel);
-}
-
-/*
* Mask off host interrupt callback notifications
*/
static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
diff --git a/include/linux/i2c-mux-pinctrl.h b/include/linux/i2c-mux-pinctrl.h
deleted file mode 100644
index a65c86429e84..000000000000
--- a/include/linux/i2c-mux-pinctrl.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * i2c-mux-pinctrl platform data
- *
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _LINUX_I2C_MUX_PINCTRL_H
-#define _LINUX_I2C_MUX_PINCTRL_H
-
-/**
- * struct i2c_mux_pinctrl_platform_data - Platform data for i2c-mux-pinctrl
- * @parent_bus_num: Parent I2C bus number
- * @base_bus_num: Base I2C bus number for the child busses. 0 for dynamic.
- * @bus_count: Number of child busses. Also the number of elements in
- * @pinctrl_states
- * @pinctrl_states: The names of the pinctrl state to select for each child bus
- * @pinctrl_state_idle: The pinctrl state to select when no child bus is being
- * accessed. If NULL, the most recently used pinctrl state will be left
- * selected.
- */
-struct i2c_mux_pinctrl_platform_data {
- int parent_bus_num;
- int base_bus_num;
- int bus_count;
- const char **pinctrl_states;
- const char *pinctrl_state_idle;
-};
-
-#endif
diff --git a/include/linux/i2c/bfin_twi.h b/include/linux/i2c/bfin_twi.h
deleted file mode 100644
index 135a4e0876ae..000000000000
--- a/include/linux/i2c/bfin_twi.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * i2c-bfin-twi.h - interface to ADI TWI controller
- *
- * Copyright 2005-2014 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef __I2C_BFIN_TWI_H__
-#define __I2C_BFIN_TWI_H__
-
-#include <linux/types.h>
-#include <linux/i2c.h>
-
-/*
- * ADI twi registers layout
- */
-struct bfin_twi_regs {
- u16 clkdiv;
- u16 dummy1;
- u16 control;
- u16 dummy2;
- u16 slave_ctl;
- u16 dummy3;
- u16 slave_stat;
- u16 dummy4;
- u16 slave_addr;
- u16 dummy5;
- u16 master_ctl;
- u16 dummy6;
- u16 master_stat;
- u16 dummy7;
- u16 master_addr;
- u16 dummy8;
- u16 int_stat;
- u16 dummy9;
- u16 int_mask;
- u16 dummy10;
- u16 fifo_ctl;
- u16 dummy11;
- u16 fifo_stat;
- u16 dummy12;
- u32 __pad[20];
- u16 xmt_data8;
- u16 dummy13;
- u16 xmt_data16;
- u16 dummy14;
- u16 rcv_data8;
- u16 dummy15;
- u16 rcv_data16;
- u16 dummy16;
-};
-
-struct bfin_twi_iface {
- int irq;
- spinlock_t lock;
- char read_write;
- u8 command;
- u8 *transPtr;
- int readNum;
- int writeNum;
- int cur_mode;
- int manual_stop;
- int result;
- struct i2c_adapter adap;
- struct completion complete;
- struct i2c_msg *pmsg;
- int msg_num;
- int cur_msg;
- u16 saved_clkdiv;
- u16 saved_control;
- struct bfin_twi_regs __iomem *regs_base;
-};
-
-/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ********************/
-/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
-#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
-#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
-
-/* TWI_PRESCALE Masks */
-#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
-#define TWI_ENA 0x0080 /* TWI Enable */
-#define SCCB 0x0200 /* SCCB Compatibility Enable */
-
-/* TWI_SLAVE_CTL Masks */
-#define SEN 0x0001 /* Slave Enable */
-#define SADD_LEN 0x0002 /* Slave Address Length */
-#define STDVAL 0x0004 /* Slave Transmit Data Valid */
-#define NAK 0x0008 /* NAK Generated At Conclusion Of Transfer */
-#define GEN 0x0010 /* General Call Address Matching Enabled */
-
-/* TWI_SLAVE_STAT Masks */
-#define SDIR 0x0001 /* Slave Transfer Direction (RX/TX*) */
-#define GCALL 0x0002 /* General Call Indicator */
-
-/* TWI_MASTER_CTL Masks */
-#define MEN 0x0001 /* Master Mode Enable */
-#define MADD_LEN 0x0002 /* Master Address Length */
-#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
-#define FAST 0x0008 /* Use Fast Mode Timing Specs */
-#define STOP 0x0010 /* Issue Stop Condition */
-#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
-#define DCNT 0x3FC0 /* Data Bytes To Transfer */
-#define SDAOVR 0x4000 /* Serial Data Override */
-#define SCLOVR 0x8000 /* Serial Clock Override */
-
-/* TWI_MASTER_STAT Masks */
-#define MPROG 0x0001 /* Master Transfer In Progress */
-#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
-#define ANAK 0x0004 /* Address Not Acknowledged */
-#define DNAK 0x0008 /* Data Not Acknowledged */
-#define BUFRDERR 0x0010 /* Buffer Read Error */
-#define BUFWRERR 0x0020 /* Buffer Write Error */
-#define SDASEN 0x0040 /* Serial Data Sense */
-#define SCLSEN 0x0080 /* Serial Clock Sense */
-#define BUSBUSY 0x0100 /* Bus Busy Indicator */
-
-/* TWI_INT_SRC and TWI_INT_ENABLE Masks */
-#define SINIT 0x0001 /* Slave Transfer Initiated */
-#define SCOMP 0x0002 /* Slave Transfer Complete */
-#define SERR 0x0004 /* Slave Transfer Error */
-#define SOVF 0x0008 /* Slave Overflow */
-#define MCOMP 0x0010 /* Master Transfer Complete */
-#define MERR 0x0020 /* Master Transfer Error */
-#define XMTSERV 0x0040 /* Transmit FIFO Service */
-#define RCVSERV 0x0080 /* Receive FIFO Service */
-
-/* TWI_FIFO_CTRL Masks */
-#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
-#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
-#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
-#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
-
-/* TWI_FIFO_STAT Masks */
-#define XMTSTAT 0x0003 /* Transmit FIFO Status */
-#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
-#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
-#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
-
-#define RCVSTAT 0x000C /* Receive FIFO Status */
-#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
-#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
-#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
-
-#endif
diff --git a/include/linux/idr.h b/include/linux/idr.h
index bf70b3ef0a07..7c3a365f7e12 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -80,19 +80,75 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
*/
void idr_preload(gfp_t gfp_mask);
-int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t);
+
+int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index,
+ unsigned long start, unsigned long end, gfp_t gfp,
+ bool ext);
+
+/**
+ * idr_alloc - allocate an id
+ * @idr: idr handle
+ * @ptr: pointer to be associated with the new id
+ * @start: the minimum id (inclusive)
+ * @end: the maximum id (exclusive)
+ * @gfp: memory allocation flags
+ *
+ * Allocates an unused ID in the range [start, end). Returns -ENOSPC
+ * if there are no unused IDs in that range.
+ *
+ * Note that @end is treated as max when <= 0. This is to always allow
+ * using @start + N as @end as long as N is inside integer range.
+ *
+ * Simultaneous modifications to the @idr are not allowed and should be
+ * prevented by the user, usually with a lock. idr_alloc() may be called
+ * concurrently with read-only accesses to the @idr, such as idr_find() and
+ * idr_for_each_entry().
+ */
+static inline int idr_alloc(struct idr *idr, void *ptr,
+ int start, int end, gfp_t gfp)
+{
+ unsigned long id;
+ int ret;
+
+ if (WARN_ON_ONCE(start < 0))
+ return -EINVAL;
+
+ ret = idr_alloc_cmn(idr, ptr, &id, start, end, gfp, false);
+
+ if (ret)
+ return ret;
+
+ return id;
+}
+
+static inline int idr_alloc_ext(struct idr *idr, void *ptr,
+ unsigned long *index,
+ unsigned long start,
+ unsigned long end,
+ gfp_t gfp)
+{
+ return idr_alloc_cmn(idr, ptr, index, start, end, gfp, true);
+}
+
int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
int idr_for_each(const struct idr *,
int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *, int *nextid);
+void *idr_get_next_ext(struct idr *idr, unsigned long *nextid);
void *idr_replace(struct idr *, void *, int id);
+void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id);
void idr_destroy(struct idr *);
-static inline void *idr_remove(struct idr *idr, int id)
+static inline void *idr_remove_ext(struct idr *idr, unsigned long id)
{
return radix_tree_delete_item(&idr->idr_rt, id, NULL);
}
+static inline void *idr_remove(struct idr *idr, int id)
+{
+ return idr_remove_ext(idr, id);
+}
+
static inline void idr_init(struct idr *idr)
{
INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
@@ -128,11 +184,16 @@ static inline void idr_preload_end(void)
* This function can be called under rcu_read_lock(), given that the leaf
* pointers lifetimes are correctly managed.
*/
-static inline void *idr_find(const struct idr *idr, int id)
+static inline void *idr_find_ext(const struct idr *idr, unsigned long id)
{
return radix_tree_lookup(&idr->idr_rt, id);
}
+static inline void *idr_find(const struct idr *idr, int id)
+{
+ return idr_find_ext(idr, id);
+}
+
/**
* idr_for_each_entry - iterate over an idr's elements of a given type
* @idr: idr handle
@@ -145,6 +206,8 @@ static inline void *idr_find(const struct idr *idr, int id)
*/
#define idr_for_each_entry(idr, entry, id) \
for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
+#define idr_for_each_entry_ext(idr, entry, id) \
+ for (id = 0; ((entry) = idr_get_next_ext(idr, &(id))) != NULL; ++id)
/**
* idr_for_each_entry_continue - continue iteration over an idr's elements of a given type
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 97caf1821de8..f8231854b5d6 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -118,7 +118,8 @@ extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
struct ip_msfilter __user *optval, int __user *optlen);
extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
struct group_filter __user *optval, int __user *optlen);
-extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif);
+extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt,
+ int dif, int sdif);
extern void ip_mc_init_dev(struct in_device *);
extern void ip_mc_destroy_dev(struct in_device *);
extern void ip_mc_up(struct in_device *);
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 5ba430cc9a87..1fc7abd28b0b 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -111,6 +111,9 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
unsigned int size, unsigned int *val);
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
+ unsigned int reset_length);
+
int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, int *val);
int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 97f1b465d04f..7b0fa8b5c120 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -332,4 +332,16 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
struct device_attribute *attr, char *buf);
+#ifdef CONFIG_OF
+void st_sensors_of_name_probe(struct device *dev,
+ const struct of_device_id *match,
+ char *name, int len);
+#else
+static inline void st_sensors_of_name_probe(struct device *dev,
+ const struct of_device_id *match,
+ char *name, int len)
+{
+}
+#endif
+
#endif /* ST_SENSORS_H */
diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h
index 254de3c7dde8..0a2c25e06d1f 100644
--- a/include/linux/iio/common/st_sensors_i2c.h
+++ b/include/linux/iio/common/st_sensors_i2c.h
@@ -18,16 +18,6 @@
void st_sensors_i2c_configure(struct iio_dev *indio_dev,
struct i2c_client *client, struct st_sensor_data *sdata);
-#ifdef CONFIG_OF
-void st_sensors_of_i2c_probe(struct i2c_client *client,
- const struct of_device_id *match);
-#else
-static inline void st_sensors_of_i2c_probe(struct i2c_client *client,
- const struct of_device_id *match)
-{
-}
-#endif
-
#ifdef CONFIG_ACPI
int st_sensors_match_acpi_device(struct device *dev);
#else
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index d68bec297a45..c380daa40c0e 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -535,7 +535,7 @@ struct iio_buffer_setup_ops {
* @scan_timestamp: [INTERN] set if any buffers have requested timestamp
* @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @trig: [INTERN] current device trigger (buffer modes)
- * @trig_readonly [INTERN] mark the current trigger immutable
+ * @trig_readonly: [INTERN] mark the current trigger immutable
* @pollfunc: [DRIVER] function run on trigger being received
* @pollfunc_event: [DRIVER] function run on events trigger being received
* @channels: [DRIVER] channel specification structure table
diff --git a/include/linux/iio/timer/stm32-lptim-trigger.h b/include/linux/iio/timer/stm32-lptim-trigger.h
new file mode 100644
index 000000000000..34d59bfdce2d
--- /dev/null
+++ b/include/linux/iio/timer/stm32-lptim-trigger.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) STMicroelectronics 2017
+ *
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STM32_LPTIM_TRIGGER_H_
+#define _STM32_LPTIM_TRIGGER_H_
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+
+#define LPTIM1_OUT "lptim1_out"
+#define LPTIM2_OUT "lptim2_out"
+#define LPTIM3_OUT "lptim3_out"
+
+#if IS_ENABLED(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
+bool is_stm32_lptim_trigger(struct iio_trigger *trig);
+#else
+static inline bool is_stm32_lptim_trigger(struct iio_trigger *trig)
+{
+ return false;
+}
+#endif
+#endif
diff --git a/include/linux/iio/timer/stm32-timer-trigger.h b/include/linux/iio/timer/stm32-timer-trigger.h
index fa7d786ed99e..d68add80ab86 100644
--- a/include/linux/iio/timer/stm32-timer-trigger.h
+++ b/include/linux/iio/timer/stm32-timer-trigger.h
@@ -55,10 +55,24 @@
#define TIM9_CH1 "tim9_ch1"
#define TIM9_CH2 "tim9_ch2"
+#define TIM10_OC1 "tim10_oc1"
+
+#define TIM11_OC1 "tim11_oc1"
+
#define TIM12_TRGO "tim12_trgo"
#define TIM12_CH1 "tim12_ch1"
#define TIM12_CH2 "tim12_ch2"
+#define TIM13_OC1 "tim13_oc1"
+
+#define TIM14_OC1 "tim14_oc1"
+
+#define TIM15_TRGO "tim15_trgo"
+
+#define TIM16_OC1 "tim16_oc1"
+
+#define TIM17_OC1 "tim17_oc1"
+
bool is_stm32_timer_trigger(struct iio_trigger *trig);
#endif
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index ea08302f2d7b..7142d8d6e470 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -144,8 +144,8 @@ void devm_iio_trigger_unregister(struct device *dev,
/**
* iio_trigger_set_immutable() - set an immutable trigger on destination
*
- * @indio_dev - IIO device structure containing the device
- * @trig - trigger to assign to device
+ * @indio_dev: IIO device structure containing the device
+ * @trig: trigger to assign to device
*
**/
int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig);
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 65da430e260f..ee251c585854 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -25,6 +25,13 @@ struct inet_diag_handler {
struct inet_diag_msg *r,
void *info);
+ int (*idiag_get_aux)(struct sock *sk,
+ bool net_admin,
+ struct sk_buff *skb);
+
+ size_t (*idiag_get_aux_size)(struct sock *sk,
+ bool net_admin);
+
int (*destroy)(struct sk_buff *in_skb,
const struct inet_diag_req_v2 *req);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index a2f6707e9fc0..3c07ace5b431 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -126,17 +126,11 @@ extern struct group_info init_groups;
#endif
#ifdef CONFIG_PREEMPT_RCU
-#define INIT_TASK_RCU_TREE_PREEMPT() \
- .rcu_blocked_node = NULL,
-#else
-#define INIT_TASK_RCU_TREE_PREEMPT(tsk)
-#endif
-#ifdef CONFIG_PREEMPT_RCU
#define INIT_TASK_RCU_PREEMPT(tsk) \
.rcu_read_lock_nesting = 0, \
.rcu_read_unlock_special.s = 0, \
.rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
- INIT_TASK_RCU_TREE_PREEMPT()
+ .rcu_blocked_node = NULL,
#else
#define INIT_TASK_RCU_PREEMPT(tsk)
#endif
@@ -181,9 +175,8 @@ extern struct cred init_cred;
#ifdef CONFIG_RT_MUTEXES
# define INIT_RT_MUTEXES(tsk) \
- .pi_waiters = RB_ROOT, \
- .pi_top_task = NULL, \
- .pi_waiters_leftmost = NULL,
+ .pi_waiters = RB_ROOT_CACHED, \
+ .pi_top_task = NULL,
#else
# define INIT_RT_MUTEXES(tsk)
#endif
diff --git a/include/linux/input.h b/include/linux/input.h
index a65e3b24fb18..fb5e23c7ed98 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -529,6 +529,7 @@ int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code,
int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file);
int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file);
+int input_ff_flush(struct input_dev *dev, struct file *file);
int input_ff_create_memless(struct input_dev *dev, void *data,
int (*play_effect)(struct input_dev *, void *, struct ff_effect *));
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a2fddddb0d60..59ba11661b6e 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -18,6 +18,7 @@
#include <linux/atomic.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
+#include <asm/sections.h>
/*
* These correspond to the IORESOURCE_IRQ_* defines in
@@ -726,7 +727,6 @@ extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
-#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
/*
* We want to know which function is an entrypoint of a hardirq or a softirq.
*/
@@ -734,16 +734,4 @@ extern int arch_early_irq_init(void);
#define __softirq_entry \
__attribute__((__section__(".softirqentry.text")))
-/* Limits of hardirq entrypoints */
-extern char __irqentry_text_start[];
-extern char __irqentry_text_end[];
-/* Limits of softirq entrypoints */
-extern char __softirqentry_text_start[];
-extern char __softirqentry_text_end[];
-
-#else
-#define __irq_entry
-#define __softirq_entry
-#endif
-
#endif
diff --git a/include/linux/interval_tree.h b/include/linux/interval_tree.h
index 724556aa3c95..202ee1283f4b 100644
--- a/include/linux/interval_tree.h
+++ b/include/linux/interval_tree.h
@@ -11,13 +11,15 @@ struct interval_tree_node {
};
extern void
-interval_tree_insert(struct interval_tree_node *node, struct rb_root *root);
+interval_tree_insert(struct interval_tree_node *node,
+ struct rb_root_cached *root);
extern void
-interval_tree_remove(struct interval_tree_node *node, struct rb_root *root);
+interval_tree_remove(struct interval_tree_node *node,
+ struct rb_root_cached *root);
extern struct interval_tree_node *
-interval_tree_iter_first(struct rb_root *root,
+interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
extern struct interval_tree_node *
diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h
index 58370e1862ad..1f97ce26cccc 100644
--- a/include/linux/interval_tree_generic.h
+++ b/include/linux/interval_tree_generic.h
@@ -33,7 +33,7 @@
* ITSTATIC: 'static' or empty
* ITPREFIX: prefix to use for the inline tree definitions
*
- * Note - before using this, please consider if non-generic version
+ * Note - before using this, please consider if generic version
* (interval_tree.h) would work for you...
*/
@@ -65,11 +65,13 @@ RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \
\
/* Insert / remove interval nodes from the tree */ \
\
-ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \
+ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, \
+ struct rb_root_cached *root) \
{ \
- struct rb_node **link = &root->rb_node, *rb_parent = NULL; \
+ struct rb_node **link = &root->rb_root.rb_node, *rb_parent = NULL; \
ITTYPE start = ITSTART(node), last = ITLAST(node); \
ITSTRUCT *parent; \
+ bool leftmost = true; \
\
while (*link) { \
rb_parent = *link; \
@@ -78,18 +80,22 @@ ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \
parent->ITSUBTREE = last; \
if (start < ITSTART(parent)) \
link = &parent->ITRB.rb_left; \
- else \
+ else { \
link = &parent->ITRB.rb_right; \
+ leftmost = false; \
+ } \
} \
\
node->ITSUBTREE = last; \
rb_link_node(&node->ITRB, rb_parent, link); \
- rb_insert_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
+ rb_insert_augmented_cached(&node->ITRB, root, \
+ leftmost, &ITPREFIX ## _augment); \
} \
\
-ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, struct rb_root *root) \
+ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, \
+ struct rb_root_cached *root) \
{ \
- rb_erase_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
+ rb_erase_augmented_cached(&node->ITRB, root, &ITPREFIX ## _augment); \
} \
\
/* \
@@ -140,15 +146,35 @@ ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
} \
\
ITSTATIC ITSTRUCT * \
-ITPREFIX ## _iter_first(struct rb_root *root, ITTYPE start, ITTYPE last) \
+ITPREFIX ## _iter_first(struct rb_root_cached *root, \
+ ITTYPE start, ITTYPE last) \
{ \
- ITSTRUCT *node; \
+ ITSTRUCT *node, *leftmost; \
\
- if (!root->rb_node) \
+ if (!root->rb_root.rb_node) \
return NULL; \
- node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \
+ \
+ /* \
+ * Fastpath range intersection/overlap between A: [a0, a1] and \
+ * B: [b0, b1] is given by: \
+ * \
+ * a0 <= b1 && b0 <= a1 \
+ * \
+ * ... where A holds the lock range and B holds the smallest \
+ * 'start' and largest 'last' in the tree. For the later, we \
+ * rely on the root node, which by augmented interval tree \
+ * property, holds the largest value in its last-in-subtree. \
+ * This allows mitigating some of the tree walk overhead for \
+ * for non-intersecting ranges, maintained and consulted in O(1). \
+ */ \
+ node = rb_entry(root->rb_root.rb_node, ITSTRUCT, ITRB); \
if (node->ITSUBTREE < start) \
return NULL; \
+ \
+ leftmost = rb_entry(root->rb_leftmost, ITSTRUCT, ITRB); \
+ if (ITSTART(leftmost) > last) \
+ return NULL; \
+ \
return ITPREFIX ## _subtree_search(node, start, last); \
} \
\
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 2cb54adc4a33..41b8c5757859 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -167,7 +167,11 @@ struct iommu_resv_region {
* @map: map a physically contiguous memory region to an iommu domain
* @unmap: unmap a physically contiguous memory region from an iommu domain
* @map_sg: map a scatter-gather list of physically contiguous memory chunks
- * to an iommu domain
+ * to an iommu domain
+ * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
+ * @tlb_range_add: Add a given iova range to the flush queue for this domain
+ * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
+ * queue
* @iova_to_phys: translate iova to physical address
* @add_device: add device to iommu grouping
* @remove_device: remove device from iommu grouping
@@ -199,6 +203,10 @@ struct iommu_ops {
size_t size);
size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot);
+ void (*flush_iotlb_all)(struct iommu_domain *domain);
+ void (*iotlb_range_add)(struct iommu_domain *domain,
+ unsigned long iova, size_t size);
+ void (*iotlb_sync)(struct iommu_domain *domain);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
int (*add_device)(struct device *dev);
void (*remove_device)(struct device *dev);
@@ -225,6 +233,7 @@ struct iommu_ops {
u32 (*domain_get_windows)(struct iommu_domain *domain);
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
+ bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
unsigned long pgsize_bitmap;
};
@@ -240,7 +249,7 @@ struct iommu_device {
struct list_head list;
const struct iommu_ops *ops;
struct fwnode_handle *fwnode;
- struct device dev;
+ struct device *dev;
};
int iommu_device_register(struct iommu_device *iommu);
@@ -265,6 +274,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
iommu->fwnode = fwnode;
}
+static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
+{
+ return (struct iommu_device *)dev_get_drvdata(dev);
+}
+
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
@@ -286,7 +300,9 @@ extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size);
+ size_t size);
+extern size_t iommu_unmap_fast(struct iommu_domain *domain,
+ unsigned long iova, size_t size);
extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents,
int prot);
@@ -343,6 +359,25 @@ extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
+static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+{
+ if (domain->ops->flush_iotlb_all)
+ domain->ops->flush_iotlb_all(domain);
+}
+
+static inline void iommu_tlb_range_add(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ if (domain->ops->iotlb_range_add)
+ domain->ops->iotlb_range_add(domain, iova, size);
+}
+
+static inline void iommu_tlb_sync(struct iommu_domain *domain)
+{
+ if (domain->ops->iotlb_sync)
+ domain->ops->iotlb_sync(domain);
+}
+
static inline size_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
@@ -425,13 +460,19 @@ static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
}
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int prot)
+ phys_addr_t paddr, size_t size, int prot)
{
return -ENODEV;
}
static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- int gfp_order)
+ size_t size)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova,
+ int gfp_order)
{
return -ENODEV;
}
@@ -443,6 +484,19 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain,
return -ENODEV;
}
+static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+{
+}
+
+static inline void iommu_tlb_range_add(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+}
+
+static inline void iommu_tlb_sync(struct iommu_domain *domain)
+{
+}
+
static inline int iommu_domain_window_enable(struct iommu_domain *domain,
u32 wnd_nr, phys_addr_t paddr,
u64 size, int prot)
@@ -589,6 +643,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
{
}
+static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
+{
+ return NULL;
+}
+
static inline void iommu_device_unregister(struct iommu_device *iommu)
{
}
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 6230064d7f95..f5cf32e80041 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -130,6 +130,8 @@ enum {
IORES_DESC_ACPI_NV_STORAGE = 3,
IORES_DESC_PERSISTENT_MEMORY = 4,
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
+ IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
+ IORES_DESC_DEVICE_PUBLIC_MEMORY = 7,
};
/* helpers to define resources */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index e0a892ae45c0..d179b9bf7814 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/rbtree.h>
+#include <linux/atomic.h>
#include <linux/dma-mapping.h>
/* iova structure */
@@ -36,6 +37,35 @@ struct iova_rcache {
struct iova_cpu_rcache __percpu *cpu_rcaches;
};
+struct iova_domain;
+
+/* Call-Back from IOVA code into IOMMU drivers */
+typedef void (* iova_flush_cb)(struct iova_domain *domain);
+
+/* Destructor for per-entry data */
+typedef void (* iova_entry_dtor)(unsigned long data);
+
+/* Number of entries per Flush Queue */
+#define IOVA_FQ_SIZE 256
+
+/* Timeout (in ms) after which entries are flushed from the Flush-Queue */
+#define IOVA_FQ_TIMEOUT 10
+
+/* Flush Queue entry for defered flushing */
+struct iova_fq_entry {
+ unsigned long iova_pfn;
+ unsigned long pages;
+ unsigned long data;
+ u64 counter; /* Flush counter when this entrie was added */
+};
+
+/* Per-CPU Flush Queue structure */
+struct iova_fq {
+ struct iova_fq_entry entries[IOVA_FQ_SIZE];
+ unsigned head, tail;
+ spinlock_t lock;
+};
+
/* holds all the iova translations for a domain */
struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
@@ -45,6 +75,25 @@ struct iova_domain {
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
+
+ iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
+ TLBs */
+
+ iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
+ iova entry */
+
+ struct iova_fq __percpu *fq; /* Flush Queue */
+
+ atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
+ have been started */
+
+ atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
+ have been finished */
+
+ struct timer_list fq_timer; /* Timer to regularily empty the
+ flush-queues */
+ atomic_t fq_timer_on; /* 1 when timer is active, 0
+ when not */
};
static inline unsigned long iova_size(struct iova *iova)
@@ -95,6 +144,9 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
bool size_aligned);
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
unsigned long size);
+void queue_iova(struct iova_domain *iovad,
+ unsigned long pfn, unsigned long pages,
+ unsigned long data);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
@@ -102,6 +154,8 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn, unsigned long pfn_32bit);
+int init_iova_flush_queue(struct iova_domain *iovad,
+ iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,
@@ -148,6 +202,12 @@ static inline void free_iova_fast(struct iova_domain *iovad,
{
}
+static inline void queue_iova(struct iova_domain *iovad,
+ unsigned long pfn, unsigned long pages,
+ unsigned long data)
+{
+}
+
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn)
@@ -174,6 +234,13 @@ static inline void init_iova_domain(struct iova_domain *iovad,
{
}
+static inline int init_iova_flush_queue(struct iova_domain *iovad,
+ iova_flush_cb flush_cb,
+ iova_entry_dtor entry_dtor)
+{
+ return -ENODEV;
+}
+
static inline struct iova *find_iova(struct iova_domain *iovad,
unsigned long pfn)
{
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index fadd579d577d..92a2ccff80c5 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -3,7 +3,9 @@
#include <linux/spinlock.h>
#include <linux/uidgid.h>
+#include <linux/rhashtable.h>
#include <uapi/linux/ipc.h>
+#include <linux/refcount.h>
#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
@@ -21,8 +23,10 @@ struct kern_ipc_perm {
unsigned long seq;
void *security;
+ struct rhash_head khtnode;
+
struct rcu_head rcu;
- atomic_t refcount;
+ refcount_t refcount;
} ____cacheline_aligned_in_smp __randomize_layout;
#endif /* _LINUX_IPC_H */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 65327ee0936b..83f0bf7a587d 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -7,19 +7,23 @@
#include <linux/notifier.h>
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
+#include <linux/refcount.h>
+#include <linux/rhashtable.h>
struct user_namespace;
struct ipc_ids {
int in_use;
unsigned short seq;
+ bool tables_initialized;
struct rw_semaphore rwsem;
struct idr ipcs_idr;
int next_id;
+ struct rhashtable key_ht;
};
struct ipc_namespace {
- atomic_t count;
+ refcount_t count;
struct ipc_ids ids[3];
int sem_ctls[4];
@@ -118,7 +122,7 @@ extern struct ipc_namespace *copy_ipcs(unsigned long flags,
static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
{
if (ns)
- atomic_inc(&ns->count);
+ refcount_inc(&ns->count);
return ns;
}
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 474d6bbc158c..ac2da4e11d5e 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -159,6 +159,16 @@ static inline bool inet6_is_jumbogram(const struct sk_buff *skb)
}
/* can not be used in TCP layer after tcp_v6_fill_cb */
+static inline int inet6_sdif(const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
+ return IP6CB(skb)->iif;
+#endif
+ return 0;
+}
+
+/* can not be used in TCP layer after tcp_v6_fill_cb */
static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
{
#if defined(CONFIG_NET_L3_MASTER_DEV)
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d2d543794093..d4728bf6a537 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -568,6 +568,8 @@ extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
extern int irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
+extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h
new file mode 100644
index 000000000000..0380d899b955
--- /dev/null
+++ b/include/linux/irq_sim.h
@@ -0,0 +1,44 @@
+#ifndef _LINUX_IRQ_SIM_H
+#define _LINUX_IRQ_SIM_H
+/*
+ * Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/irq_work.h>
+#include <linux/device.h>
+
+/*
+ * Provides a framework for allocating simulated interrupts which can be
+ * requested like normal irqs and enqueued from process context.
+ */
+
+struct irq_sim_work_ctx {
+ struct irq_work work;
+ int irq;
+};
+
+struct irq_sim_irq_ctx {
+ int irqnum;
+ bool enabled;
+};
+
+struct irq_sim {
+ struct irq_sim_work_ctx work_ctx;
+ int irq_base;
+ unsigned int irq_count;
+ struct irq_sim_irq_ctx *irqs;
+};
+
+int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs);
+int devm_irq_sim_init(struct device *dev, struct irq_sim *sim,
+ unsigned int num_irqs);
+void irq_sim_fini(struct irq_sim *sim);
+void irq_sim_fire(struct irq_sim *sim, unsigned int offset);
+int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset);
+
+#endif /* _LINUX_IRQ_SIM_H */
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index c647b0547bcd..0a83b4379f34 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -27,6 +27,8 @@ struct gic_kvm_info {
unsigned int maint_irq;
/* Virtual control interface */
struct resource vctrl;
+ /* vlpi support */
+ bool has_v4;
};
const struct gic_kvm_info *gic_get_kvm_info(void);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 6a1f87ff94e2..1ea576c8126f 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -204,6 +204,7 @@
#define GICR_TYPER_PLPIS (1U << 0)
#define GICR_TYPER_VLPIS (1U << 1)
+#define GICR_TYPER_DirectLPIS (1U << 3)
#define GICR_TYPER_LAST (1U << 4)
#define GIC_V3_REDIST_SIZE 0x20000
@@ -212,6 +213,69 @@
#define LPI_PROP_ENABLED (1 << 0)
/*
+ * Re-Distributor registers, offsets from VLPI_base
+ */
+#define GICR_VPROPBASER 0x0070
+
+#define GICR_VPROPBASER_IDBITS_MASK 0x1f
+
+#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10)
+#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7)
+#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56)
+
+#define GICR_VPROPBASER_SHAREABILITY_MASK \
+ GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK)
+#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK)
+#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK)
+#define GICR_VPROPBASER_CACHEABILITY_MASK \
+ GICR_VPROPBASER_INNER_CACHEABILITY_MASK
+
+#define GICR_VPROPBASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable)
+
+#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB)
+#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC)
+#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
+#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
+#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt)
+#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb)
+#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt)
+#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb)
+
+#define GICR_VPENDBASER 0x0078
+
+#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10)
+#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7)
+#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56)
+#define GICR_VPENDBASER_SHAREABILITY_MASK \
+ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK)
+#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK)
+#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \
+ GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK)
+#define GICR_VPENDBASER_CACHEABILITY_MASK \
+ GICR_VPENDBASER_INNER_CACHEABILITY_MASK
+
+#define GICR_VPENDBASER_NonShareable \
+ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable)
+
+#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
+#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
+#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
+#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
+#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt)
+#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb)
+#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt)
+#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb)
+
+#define GICR_VPENDBASER_Dirty (1ULL << 60)
+#define GICR_VPENDBASER_PendingLast (1ULL << 61)
+#define GICR_VPENDBASER_IDAI (1ULL << 62)
+#define GICR_VPENDBASER_Valid (1ULL << 63)
+
+/*
* ITS registers, offsets from ITS_base
*/
#define GITS_CTLR 0x0000
@@ -234,15 +298,21 @@
#define GITS_TRANSLATER 0x10040
#define GITS_CTLR_ENABLE (1U << 0)
+#define GITS_CTLR_ImDe (1U << 1)
+#define GITS_CTLR_ITS_NUMBER_SHIFT 4
+#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT)
#define GITS_CTLR_QUIESCENT (1U << 31)
#define GITS_TYPER_PLPIS (1UL << 0)
+#define GITS_TYPER_VLPIS (1UL << 1)
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
+#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_PTA (1UL << 19)
#define GITS_TYPER_HWCOLLCNT_SHIFT 24
+#define GITS_TYPER_VMOVP (1ULL << 37)
#define GITS_IIDR_REV_SHIFT 12
#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT)
@@ -342,6 +412,18 @@
#define GITS_CMD_SYNC 0x05
/*
+ * GICv4 ITS specific commands
+ */
+#define GITS_CMD_GICv4(x) ((x) | 0x20)
+#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL)
+#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC)
+#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
+#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
+#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
+/* VMOVP is the odd one, as it doesn't have a physical counterpart */
+#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
+
+/*
* ITS error numbers
*/
#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107
@@ -487,6 +569,8 @@ struct rdists {
struct page *prop_page;
int id_bits;
u64 flags;
+ bool has_vlpis;
+ bool has_direct_lpi;
};
struct irq_domain;
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
new file mode 100644
index 000000000000..58a4d89aa82c
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V4_H
+#define __LINUX_IRQCHIP_ARM_GIC_V4_H
+
+struct its_vpe;
+
+/* Embedded in kvm.arch */
+struct its_vm {
+ struct fwnode_handle *fwnode;
+ struct irq_domain *domain;
+ struct page *vprop_page;
+ struct its_vpe **vpes;
+ int nr_vpes;
+ irq_hw_number_t db_lpi_base;
+ unsigned long *db_bitmap;
+ int nr_db_lpis;
+};
+
+/* Embedded in kvm_vcpu.arch */
+struct its_vpe {
+ struct page *vpt_page;
+ struct its_vm *its_vm;
+ /* Doorbell interrupt */
+ int irq;
+ irq_hw_number_t vpe_db_lpi;
+ /* VPE proxy mapping */
+ int vpe_proxy_event;
+ /*
+ * This collection ID is used to indirect the target
+ * redistributor for this VPE. The ID itself isn't involved in
+ * programming of the ITS.
+ */
+ u16 col_idx;
+ /* Unique (system-wide) VPE identifier */
+ u16 vpe_id;
+ /* Implementation Defined Area Invalid */
+ bool idai;
+ /* Pending VLPIs on schedule out? */
+ bool pending_last;
+};
+
+/*
+ * struct its_vlpi_map: structure describing the mapping of a
+ * VLPI. Only to be interpreted in the context of a physical interrupt
+ * it complements. To be used as the vcpu_info passed to
+ * irq_set_vcpu_affinity().
+ *
+ * @vm: Pointer to the GICv4 notion of a VM
+ * @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE)
+ * @vintid: Virtual LPI number
+ * @db_enabled: Is the VPE doorbell to be generated?
+ */
+struct its_vlpi_map {
+ struct its_vm *vm;
+ struct its_vpe *vpe;
+ u32 vintid;
+ bool db_enabled;
+};
+
+enum its_vcpu_info_cmd_type {
+ MAP_VLPI,
+ GET_VLPI,
+ PROP_UPDATE_VLPI,
+ PROP_UPDATE_AND_INV_VLPI,
+ SCHEDULE_VPE,
+ DESCHEDULE_VPE,
+ INVALL_VPE,
+};
+
+struct its_cmd_info {
+ enum its_vcpu_info_cmd_type cmd_type;
+ union {
+ struct its_vlpi_map *map;
+ u8 config;
+ };
+};
+
+int its_alloc_vcpu_irqs(struct its_vm *vm);
+void its_free_vcpu_irqs(struct its_vm *vm);
+int its_schedule_vpe(struct its_vpe *vpe, bool on);
+int its_invall_vpe(struct its_vpe *vpe);
+int its_map_vlpi(int irq, struct its_vlpi_map *map);
+int its_get_vlpi(int irq, struct its_vlpi_map *map);
+int its_unmap_vlpi(int irq);
+int its_prop_update_vlpi(int irq, u8 config, bool inv);
+
+int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
+
+#endif
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
deleted file mode 100644
index 2b0e56619e53..000000000000
--- a/include/linux/irqchip/mips-gic.h
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000, 07 MIPS Technologies, Inc.
- */
-#ifndef __LINUX_IRQCHIP_MIPS_GIC_H
-#define __LINUX_IRQCHIP_MIPS_GIC_H
-
-#include <linux/clocksource.h>
-#include <linux/ioport.h>
-
-#define GIC_MAX_INTRS 256
-
-/* Constants */
-#define GIC_POL_POS 1
-#define GIC_POL_NEG 0
-#define GIC_TRIG_EDGE 1
-#define GIC_TRIG_LEVEL 0
-#define GIC_TRIG_DUAL_ENABLE 1
-#define GIC_TRIG_DUAL_DISABLE 0
-
-#define MSK(n) ((1 << (n)) - 1)
-
-/* Accessors */
-#define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS)
-
-/* GIC Address Space */
-#define SHARED_SECTION_OFS 0x0000
-#define SHARED_SECTION_SIZE 0x8000
-#define VPE_LOCAL_SECTION_OFS 0x8000
-#define VPE_LOCAL_SECTION_SIZE 0x4000
-#define VPE_OTHER_SECTION_OFS 0xc000
-#define VPE_OTHER_SECTION_SIZE 0x4000
-#define USM_VISIBLE_SECTION_OFS 0x10000
-#define USM_VISIBLE_SECTION_SIZE 0x10000
-
-/* Register Map for Shared Section */
-
-#define GIC_SH_CONFIG_OFS 0x0000
-
-/* Shared Global Counter */
-#define GIC_SH_COUNTER_31_00_OFS 0x0010
-/* 64-bit counter register for CM3 */
-#define GIC_SH_COUNTER_OFS GIC_SH_COUNTER_31_00_OFS
-#define GIC_SH_COUNTER_63_32_OFS 0x0014
-#define GIC_SH_REVISIONID_OFS 0x0020
-
-/* Convert an interrupt number to a byte offset/bit for multi-word registers */
-#define GIC_INTR_OFS(intr) ({ \
- unsigned bits = mips_cm_is64 ? 64 : 32; \
- unsigned reg_idx = (intr) / bits; \
- unsigned reg_width = bits / 8; \
- \
- reg_idx * reg_width; \
-})
-#define GIC_INTR_BIT(intr) ((intr) % (mips_cm_is64 ? 64 : 32))
-
-/* Polarity : Reset Value is always 0 */
-#define GIC_SH_SET_POLARITY_OFS 0x0100
-
-/* Triggering : Reset Value is always 0 */
-#define GIC_SH_SET_TRIGGER_OFS 0x0180
-
-/* Dual edge triggering : Reset Value is always 0 */
-#define GIC_SH_SET_DUAL_OFS 0x0200
-
-/* Set/Clear corresponding bit in Edge Detect Register */
-#define GIC_SH_WEDGE_OFS 0x0280
-
-/* Mask manipulation */
-#define GIC_SH_RMASK_OFS 0x0300
-#define GIC_SH_SMASK_OFS 0x0380
-
-/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */
-#define GIC_SH_MASK_OFS 0x0400
-
-/* Pending Global Interrupts (RO) */
-#define GIC_SH_PEND_OFS 0x0480
-
-/* Maps Interrupt X to a Pin */
-#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
-#define GIC_SH_MAP_TO_PIN(intr) (4 * (intr))
-
-/* Maps Interrupt X to a VPE */
-#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
-#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
- ((32 * (intr)) + (((vpe) / 32) * 4))
-#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32))
-
-/* Register Map for Local Section */
-#define GIC_VPE_CTL_OFS 0x0000
-#define GIC_VPE_PEND_OFS 0x0004
-#define GIC_VPE_MASK_OFS 0x0008
-#define GIC_VPE_RMASK_OFS 0x000c
-#define GIC_VPE_SMASK_OFS 0x0010
-#define GIC_VPE_WD_MAP_OFS 0x0040
-#define GIC_VPE_COMPARE_MAP_OFS 0x0044
-#define GIC_VPE_TIMER_MAP_OFS 0x0048
-#define GIC_VPE_FDC_MAP_OFS 0x004c
-#define GIC_VPE_PERFCTR_MAP_OFS 0x0050
-#define GIC_VPE_SWINT0_MAP_OFS 0x0054
-#define GIC_VPE_SWINT1_MAP_OFS 0x0058
-#define GIC_VPE_OTHER_ADDR_OFS 0x0080
-#define GIC_VP_IDENT_OFS 0x0088
-#define GIC_VPE_WD_CONFIG0_OFS 0x0090
-#define GIC_VPE_WD_COUNT0_OFS 0x0094
-#define GIC_VPE_WD_INITIAL0_OFS 0x0098
-#define GIC_VPE_COMPARE_LO_OFS 0x00a0
-/* 64-bit Compare register on CM3 */
-#define GIC_VPE_COMPARE_OFS GIC_VPE_COMPARE_LO_OFS
-#define GIC_VPE_COMPARE_HI_OFS 0x00a4
-
-#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100
-#define GIC_VPE_EIC_SS(intr) (4 * (intr))
-
-#define GIC_VPE_EIC_VEC_BASE_OFS 0x0800
-#define GIC_VPE_EIC_VEC(intr) (4 * (intr))
-
-#define GIC_VPE_TENABLE_NMI_OFS 0x1000
-#define GIC_VPE_TENABLE_YQ_OFS 0x1004
-#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080
-#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084
-
-/* User Mode Visible Section Register Map */
-#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000
-#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004
-
-/* Masks */
-#define GIC_SH_CONFIG_COUNTSTOP_SHF 28
-#define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF)
-
-#define GIC_SH_CONFIG_COUNTBITS_SHF 24
-#define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF)
-
-#define GIC_SH_CONFIG_NUMINTRS_SHF 16
-#define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF)
-
-#define GIC_SH_CONFIG_NUMVPES_SHF 0
-#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF)
-
-#define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31))
-#define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31))
-
-#define GIC_MAP_TO_PIN_SHF 31
-#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF)
-#define GIC_MAP_TO_NMI_SHF 30
-#define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF)
-#define GIC_MAP_TO_YQ_SHF 29
-#define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF)
-#define GIC_MAP_SHF 0
-#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF)
-
-/* GIC_VPE_CTL Masks */
-#define GIC_VPE_CTL_FDC_RTBL_SHF 4
-#define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF)
-#define GIC_VPE_CTL_SWINT_RTBL_SHF 3
-#define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF)
-#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2
-#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF)
-#define GIC_VPE_CTL_TIMER_RTBL_SHF 1
-#define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF)
-#define GIC_VPE_CTL_EIC_MODE_SHF 0
-#define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF)
-
-/* GIC_VPE_PEND Masks */
-#define GIC_VPE_PEND_WD_SHF 0
-#define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF)
-#define GIC_VPE_PEND_CMP_SHF 1
-#define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF)
-#define GIC_VPE_PEND_TIMER_SHF 2
-#define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF)
-#define GIC_VPE_PEND_PERFCOUNT_SHF 3
-#define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF)
-#define GIC_VPE_PEND_SWINT0_SHF 4
-#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF)
-#define GIC_VPE_PEND_SWINT1_SHF 5
-#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF)
-#define GIC_VPE_PEND_FDC_SHF 6
-#define GIC_VPE_PEND_FDC_MSK (MSK(1) << GIC_VPE_PEND_FDC_SHF)
-
-/* GIC_VPE_RMASK Masks */
-#define GIC_VPE_RMASK_WD_SHF 0
-#define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF)
-#define GIC_VPE_RMASK_CMP_SHF 1
-#define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF)
-#define GIC_VPE_RMASK_TIMER_SHF 2
-#define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF)
-#define GIC_VPE_RMASK_PERFCNT_SHF 3
-#define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF)
-#define GIC_VPE_RMASK_SWINT0_SHF 4
-#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF)
-#define GIC_VPE_RMASK_SWINT1_SHF 5
-#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF)
-#define GIC_VPE_RMASK_FDC_SHF 6
-#define GIC_VPE_RMASK_FDC_MSK (MSK(1) << GIC_VPE_RMASK_FDC_SHF)
-
-/* GIC_VPE_SMASK Masks */
-#define GIC_VPE_SMASK_WD_SHF 0
-#define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF)
-#define GIC_VPE_SMASK_CMP_SHF 1
-#define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF)
-#define GIC_VPE_SMASK_TIMER_SHF 2
-#define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF)
-#define GIC_VPE_SMASK_PERFCNT_SHF 3
-#define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF)
-#define GIC_VPE_SMASK_SWINT0_SHF 4
-#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF)
-#define GIC_VPE_SMASK_SWINT1_SHF 5
-#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
-#define GIC_VPE_SMASK_FDC_SHF 6
-#define GIC_VPE_SMASK_FDC_MSK (MSK(1) << GIC_VPE_SMASK_FDC_SHF)
-
-/* GIC_VP_IDENT fields */
-#define GIC_VP_IDENT_VCNUM_SHF 0
-#define GIC_VP_IDENT_VCNUM_MSK (MSK(6) << GIC_VP_IDENT_VCNUM_SHF)
-
-/* GIC nomenclature for Core Interrupt Pins. */
-#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
-#define GIC_CPU_INT1 1 /* . */
-#define GIC_CPU_INT2 2 /* . */
-#define GIC_CPU_INT3 3 /* . */
-#define GIC_CPU_INT4 4 /* . */
-#define GIC_CPU_INT5 5 /* Core Interrupt 7 */
-
-/* Add 2 to convert GIC CPU pin to core interrupt */
-#define GIC_CPU_PIN_OFFSET 2
-
-/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */
-#define GIC_CPU_TO_VEC_OFFSET 2
-
-/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
-#define GIC_PIN_TO_VEC_OFFSET 1
-
-/* Local GIC interrupts. */
-#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */
-#define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */
-#define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */
-#define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */
-#define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */
-#define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */
-#define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */
-#define GIC_NUM_LOCAL_INTRS 7
-
-/* Convert between local/shared IRQ number and GIC HW IRQ number. */
-#define GIC_LOCAL_HWIRQ_BASE 0
-#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
-#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
-#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
-#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
-#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
-
-#ifdef CONFIG_MIPS_GIC
-
-extern unsigned int gic_present;
-
-extern void gic_init(unsigned long gic_base_addr,
- unsigned long gic_addrspace_size, unsigned int cpu_vec,
- unsigned int irqbase);
-extern u64 gic_read_count(void);
-extern unsigned int gic_get_count_width(void);
-extern u64 gic_read_compare(void);
-extern void gic_write_compare(u64 cnt);
-extern void gic_write_cpu_compare(u64 cnt, int cpu);
-extern void gic_start_count(void);
-extern void gic_stop_count(void);
-extern int gic_get_c0_compare_int(void);
-extern int gic_get_c0_perfcount_int(void);
-extern int gic_get_c0_fdc_int(void);
-extern int gic_get_usm_range(struct resource *gic_usm_res);
-
-#else /* CONFIG_MIPS_GIC */
-
-#define gic_present 0
-
-static inline int gic_get_usm_range(struct resource *gic_usm_res)
-{
- /* Shouldn't be called. */
- return -1;
-}
-
-#endif /* CONFIG_MIPS_GIC */
-
-/**
- * gic_read_local_vp_id() - read the local VPs VCNUM
- *
- * Read the VCNUM of the local VP from the GIC_VP_IDENT register and
- * return it to the caller. This ID should be used to refer to the VP
- * via the GICs VP-other region, or when calculating an offset to a
- * bit representing the VP in interrupt masks.
- *
- * Return: The VCNUM value for the local VP.
- */
-extern unsigned gic_read_local_vp_id(void);
-
-#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index cac77a5c5555..81e4889ca6dd 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -265,9 +265,11 @@ static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
return node ? &node->fwnode : NULL;
}
+extern const struct fwnode_operations irqchip_fwnode_ops;
+
static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode)
{
- return fwnode && fwnode->type == FWNODE_IRQCHIP;
+ return fwnode && fwnode->ops == &irqchip_fwnode_ops;
}
extern void irq_domain_update_bus_token(struct irq_domain *domain,
@@ -460,6 +462,9 @@ extern void irq_domain_free_irqs_common(struct irq_domain *domain,
extern void irq_domain_free_irqs_top(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs);
+extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
+extern int irq_domain_pop_irq(struct irq_domain *domain, int virq);
+
extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs, void *arg);
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 5dd1272d1ab2..5fdd93bb9300 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -23,10 +23,26 @@
# define trace_softirq_context(p) ((p)->softirq_context)
# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
-# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
-# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
+# define trace_hardirq_enter() \
+do { \
+ current->hardirq_context++; \
+ crossrelease_hist_start(XHLOCK_HARD); \
+} while (0)
+# define trace_hardirq_exit() \
+do { \
+ current->hardirq_context--; \
+ crossrelease_hist_end(XHLOCK_HARD); \
+} while (0)
+# define lockdep_softirq_enter() \
+do { \
+ current->softirq_context++; \
+ crossrelease_hist_start(XHLOCK_SOFT); \
+} while (0)
+# define lockdep_softirq_exit() \
+do { \
+ current->softirq_context--; \
+ crossrelease_hist_end(XHLOCK_SOFT); \
+} while (0)
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 2afd74b9d844..cd5861651b17 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -163,6 +163,8 @@ extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
extern void static_key_disable(struct static_key *key);
+extern void static_key_enable_cpuslocked(struct static_key *key);
+extern void static_key_disable_cpuslocked(struct static_key *key);
/*
* We should be using ATOMIC_INIT() for initializing .enabled, but
@@ -234,24 +236,29 @@ static inline int jump_label_apply_nops(struct module *mod)
static inline void static_key_enable(struct static_key *key)
{
- int count = static_key_count(key);
-
- WARN_ON_ONCE(count < 0 || count > 1);
+ STATIC_KEY_CHECK_USE();
- if (!count)
- static_key_slow_inc(key);
+ if (atomic_read(&key->enabled) != 0) {
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
+ return;
+ }
+ atomic_set(&key->enabled, 1);
}
static inline void static_key_disable(struct static_key *key)
{
- int count = static_key_count(key);
-
- WARN_ON_ONCE(count < 0 || count > 1);
+ STATIC_KEY_CHECK_USE();
- if (count)
- static_key_slow_dec(key);
+ if (atomic_read(&key->enabled) != 1) {
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
+ return;
+ }
+ atomic_set(&key->enabled, 0);
}
+#define static_key_enable_cpuslocked(k) static_key_enable((k))
+#define static_key_disable_cpuslocked(k) static_key_disable((k))
+
#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
@@ -413,8 +420,10 @@ extern bool ____wrong_branch_error(void);
* Normal usage; boolean enable/disable.
*/
-#define static_branch_enable(x) static_key_enable(&(x)->key)
-#define static_branch_disable(x) static_key_disable(&(x)->key)
+#define static_branch_enable(x) static_key_enable(&(x)->key)
+#define static_branch_disable(x) static_key_disable(&(x)->key)
+#define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
+#define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
index b7f8aced7870..41960fecf783 100644
--- a/include/linux/kasan-checks.h
+++ b/include/linux/kasan-checks.h
@@ -2,11 +2,13 @@
#define _LINUX_KASAN_CHECKS_H
#ifdef CONFIG_KASAN
-void kasan_check_read(const void *p, unsigned int size);
-void kasan_check_write(const void *p, unsigned int size);
+void kasan_check_read(const volatile void *p, unsigned int size);
+void kasan_check_write(const volatile void *p, unsigned int size);
#else
-static inline void kasan_check_read(const void *p, unsigned int size) { }
-static inline void kasan_check_write(const void *p, unsigned int size) { }
+static inline void kasan_check_read(const volatile void *p, unsigned int size)
+{ }
+static inline void kasan_check_write(const volatile void *p, unsigned int size)
+{ }
#endif
#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index bd6d96cf80b1..91189bb0c818 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -44,6 +44,12 @@
#define STACK_MAGIC 0xdeadbeef
+/**
+ * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
+ * @x: value to repeat
+ *
+ * NOTE: @x is not checked for > 0xff; larger values produce odd results.
+ */
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
/* @a is a power of 2 value */
@@ -57,6 +63,10 @@
#define READ 0
#define WRITE 1
+/**
+ * ARRAY_SIZE - get the number of elements in array @arr
+ * @arr: array to be sized
+ */
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
#define u64_to_user_ptr(x) ( \
@@ -76,10 +86,21 @@
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
#define round_down(x, y) ((x) & ~__round_mask(x, y))
+/**
+ * FIELD_SIZEOF - get the size of a struct's field
+ * @t: the target struct
+ * @f: the target struct's field
+ * Return: the size of @f in the struct definition without having a
+ * declared instance of @t.
+ */
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
-#define DIV_ROUND_UP_ULL(ll,d) \
- ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
+
+#define DIV_ROUND_DOWN_ULL(ll, d) \
+ ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
+
+#define DIV_ROUND_UP_ULL(ll, d) DIV_ROUND_DOWN_ULL((ll) + (d) - 1, (d))
#if BITS_PER_LONG == 32
# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
@@ -104,7 +125,7 @@
/*
* Divide positive or negative dividend by positive or negative divisor
* and round to closest integer. Result is undefined for negative
- * divisors if he dividend variable type is unsigned and for negative
+ * divisors if the dividend variable type is unsigned and for negative
* dividends if the divisor variable type is unsigned.
*/
#define DIV_ROUND_CLOSEST(x, divisor)( \
@@ -244,13 +265,13 @@ extern int _cond_resched(void);
* @ep_ro: right open interval endpoint
*
* Perform a "reciprocal multiplication" in order to "scale" a value into
- * range [0, ep_ro), where the upper interval endpoint is right-open.
+ * range [0, @ep_ro), where the upper interval endpoint is right-open.
* This is useful, e.g. for accessing a index of an array containing
- * ep_ro elements, for example. Think of it as sort of modulus, only that
+ * @ep_ro elements, for example. Think of it as sort of modulus, only that
* the result isn't that of modulo. ;) Note that if initial input is a
* small value, then result will return 0.
*
- * Return: a result based on val in interval [0, ep_ro).
+ * Return: a result based on @val in interval [0, @ep_ro).
*/
static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
{
@@ -277,6 +298,13 @@ extern int oops_may_print(void);
void do_exit(long error_code) __noreturn;
void complete_and_exit(struct completion *, long) __noreturn;
+#ifdef CONFIG_ARCH_HAS_REFCOUNT
+void refcount_error_report(struct pt_regs *regs, const char *err);
+#else
+static inline void refcount_error_report(struct pt_regs *regs, const char *err)
+{ }
+#endif
+
/* Internal, do not use. */
int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
int __must_check _kstrtol(const char *s, unsigned int base, long *res);
@@ -608,8 +636,8 @@ do { \
* trace_printk - printf formatting in the ftrace buffer
* @fmt: the printf format for printing
*
- * Note: __trace_printk is an internal function for trace_printk and
- * the @ip is passed in via the trace_printk macro.
+ * Note: __trace_printk is an internal function for trace_printk() and
+ * the @ip is passed in via the trace_printk() macro.
*
* This function allows a kernel developer to debug fast path sections
* that printk is not appropriate for. By scattering in various
@@ -619,7 +647,7 @@ do { \
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_printks scattered around in
* your code. (Extra memory is used for special buffers that are
- * allocated when trace_printk() is used)
+ * allocated when trace_printk() is used.)
*
* A little optization trick is done here. If there's only one
* argument, there's no need to scan the string for printf formats.
@@ -671,7 +699,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
* the @ip is passed in via the trace_puts macro.
*
* This is similar to trace_printk() but is made for those really fast
- * paths that a developer wants the least amount of "Heisenbug" affects,
+ * paths that a developer wants the least amount of "Heisenbug" effects,
* where the processing of the print format is still too much.
*
* This function allows a kernel developer to debug fast path sections
@@ -682,7 +710,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_puts scattered around in
* your code. (Extra memory is used for special buffers that are
- * allocated when trace_puts() is used)
+ * allocated when trace_puts() is used.)
*
* Returns: 0 if nothing was written, positive # if string was.
* (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
@@ -761,6 +789,12 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
t2 min2 = (y); \
(void) (&min1 == &min2); \
min1 < min2 ? min1 : min2; })
+
+/**
+ * min - return minimum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
#define min(x, y) \
__min(typeof(x), typeof(y), \
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
@@ -771,12 +805,31 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
t2 max2 = (y); \
(void) (&max1 == &max2); \
max1 > max2 ? max1 : max2; })
+
+/**
+ * max - return maximum of two values of the same or compatible types
+ * @x: first value
+ * @y: second value
+ */
#define max(x, y) \
__max(typeof(x), typeof(y), \
__UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \
x, y)
+/**
+ * min3 - return minimum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
#define min3(x, y, z) min((typeof(x))min(x, y), z)
+
+/**
+ * max3 - return maximum of three values
+ * @x: first value
+ * @y: second value
+ * @z: third value
+ */
#define max3(x, y, z) max((typeof(x))max(x, y), z)
/**
@@ -795,8 +848,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @lo: lowest allowable value
* @hi: highest allowable value
*
- * This macro does strict typechecking of lo/hi to make sure they are of the
- * same type as val. See the unnecessary pointer comparisons.
+ * This macro does strict typechecking of @lo/@hi to make sure they are of the
+ * same type as @val. See the unnecessary pointer comparisons.
*/
#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
@@ -806,11 +859,24 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
*
* Or not use min/max/clamp at all, of course.
*/
+
+/**
+ * min_t - return minimum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
#define min_t(type, x, y) \
__min(type, type, \
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
x, y)
+/**
+ * max_t - return maximum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
#define max_t(type, x, y) \
__max(type, type, \
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
@@ -824,7 +890,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of type
- * 'type' to make all the comparisons.
+ * @type to make all the comparisons.
*/
#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
@@ -835,15 +901,17 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of whatever
- * type the input argument 'val' is. This is useful when val is an unsigned
- * type and min and max are literals that will otherwise be assigned a signed
+ * type the input argument @val is. This is useful when @val is an unsigned
+ * type and @lo and @hi are literals that will otherwise be assigned a signed
* integer type.
*/
#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
-/*
- * swap - swap value of @a and @b
+/**
+ * swap - swap values of @a and @b
+ * @a: first value
+ * @b: second value
*/
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index a9b11b8d06f2..ab25c8b6d9e3 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -69,6 +69,12 @@ enum kernfs_root_flag {
* following flag enables that behavior.
*/
KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002,
+
+ /*
+ * The filesystem supports exportfs operation, so userspace can use
+ * fhandle to access nodes of the fs.
+ */
+ KERNFS_ROOT_SUPPORT_EXPORTOP = 0x0004,
};
/* type-specific structures for kernfs_node union members */
@@ -95,6 +101,21 @@ struct kernfs_elem_attr {
struct kernfs_node *notify_next; /* for kernfs_notify() */
};
+/* represent a kernfs node */
+union kernfs_node_id {
+ struct {
+ /*
+ * blktrace will export this struct as a simplified 'struct
+ * fid' (which is a big data struction), so userspace can use
+ * it to find kernfs node. The layout must match the first two
+ * fields of 'struct fid' exactly.
+ */
+ u32 ino;
+ u32 generation;
+ };
+ u64 id;
+};
+
/*
* kernfs_node - the building block of kernfs hierarchy. Each and every
* kernfs node is represented by single kernfs_node. Most fields are
@@ -131,9 +152,9 @@ struct kernfs_node {
void *priv;
+ union kernfs_node_id id;
unsigned short flags;
umode_t mode;
- unsigned int ino;
struct kernfs_iattrs *iattr;
};
@@ -163,7 +184,8 @@ struct kernfs_root {
unsigned int flags; /* KERNFS_ROOT_* flags */
/* private fields, do not use outside kernfs proper */
- struct ida ino_ida;
+ struct idr ino_idr;
+ u32 next_generation;
struct kernfs_syscall_ops *syscall_ops;
/* list of kernfs_super_info of this root, protected by kernfs_mutex */
@@ -336,6 +358,8 @@ struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns);
void kernfs_init(void);
+struct kernfs_node *kernfs_get_node_by_id(struct kernfs_root *root,
+ const union kernfs_node_id *id);
#else /* CONFIG_KERNFS */
static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
diff --git a/include/linux/key.h b/include/linux/key.h
index 044114185120..e315e16b6ff8 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -187,6 +187,7 @@ struct key {
#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */
#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */
#define KEY_FLAG_KEEP 10 /* set if key should not be removed */
+#define KEY_FLAG_UID_KEYRING 11 /* set if key is a user or user session keyring */
/* the key type and key description string
* - the desc is used to match a key against search criteria
@@ -243,6 +244,7 @@ extern struct key *key_alloc(struct key_type *type,
#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
+#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
extern void key_revoke(struct key *key);
extern void key_invalidate(struct key *key);
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 655082c88fd9..40c89ad4bea6 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -19,6 +19,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/umh.h>
#include <linux/gfp.h>
#include <linux/stddef.h>
#include <linux/errno.h>
@@ -44,63 +45,4 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS;
#define try_then_request_module(x, mod...) (x)
#endif
-
-struct cred;
-struct file;
-
-#define UMH_NO_WAIT 0 /* don't wait at all */
-#define UMH_WAIT_EXEC 1 /* wait for the exec, but not the process */
-#define UMH_WAIT_PROC 2 /* wait for the process to complete */
-#define UMH_KILLABLE 4 /* wait for EXEC/PROC killable */
-
-struct subprocess_info {
- struct work_struct work;
- struct completion *complete;
- const char *path;
- char **argv;
- char **envp;
- int wait;
- int retval;
- int (*init)(struct subprocess_info *info, struct cred *new);
- void (*cleanup)(struct subprocess_info *info);
- void *data;
-} __randomize_layout;
-
-extern int
-call_usermodehelper(const char *path, char **argv, char **envp, int wait);
-
-extern struct subprocess_info *
-call_usermodehelper_setup(const char *path, char **argv, char **envp,
- gfp_t gfp_mask,
- int (*init)(struct subprocess_info *info, struct cred *new),
- void (*cleanup)(struct subprocess_info *), void *data);
-
-extern int
-call_usermodehelper_exec(struct subprocess_info *info, int wait);
-
-extern struct ctl_table usermodehelper_table[];
-
-enum umh_disable_depth {
- UMH_ENABLED = 0,
- UMH_FREEZING,
- UMH_DISABLED,
-};
-
-extern int __usermodehelper_disable(enum umh_disable_depth depth);
-extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
-
-static inline int usermodehelper_disable(void)
-{
- return __usermodehelper_disable(UMH_DISABLED);
-}
-
-static inline void usermodehelper_enable(void)
-{
- __usermodehelper_set_disable_depth(UMH_ENABLED);
-}
-
-extern int usermodehelper_read_trylock(void);
-extern long usermodehelper_read_lock_wait(long timeout);
-extern void usermodehelper_read_unlock(void);
-
#endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 4d800c79475a..e0a6205caa71 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -57,6 +57,8 @@ enum kobject_action {
KOBJ_MOVE,
KOBJ_ONLINE,
KOBJ_OFFLINE,
+ KOBJ_BIND,
+ KOBJ_UNBIND,
KOBJ_MAX
};
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 21a6fd6c44af..6882538eda32 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -720,7 +720,7 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
-void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
+void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
@@ -800,6 +800,7 @@ int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
@@ -985,6 +986,12 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
return (hpa_t)pfn << PAGE_SHIFT;
}
+static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
+ gpa_t gpa)
+{
+ return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
+}
+
static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
{
unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 64c56d454f7d..bf6db4fe895b 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -49,6 +49,7 @@ struct led_classdev {
#define LED_HW_PLUGGABLE (1 << 19)
#define LED_PANIC_INDICATOR (1 << 20)
#define LED_BRIGHT_HW_CHANGED (1 << 21)
+#define LED_RETAIN_AT_SHUTDOWN (1 << 22)
/* set_brightness_work / blink_timer flags, atomic, private. */
unsigned long work_flags;
@@ -392,6 +393,7 @@ struct gpio_led {
unsigned retain_state_suspended : 1;
unsigned panic_indicator : 1;
unsigned default_state : 2;
+ unsigned retain_state_shutdown : 1;
/* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
struct gpio_desc *gpiod;
};
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
deleted file mode 100644
index 6db19f35f7c5..000000000000
--- a/include/linux/lguest.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Things the lguest guest needs to know. Note: like all lguest interfaces,
- * this is subject to wild and random change between versions.
- */
-#ifndef _LINUX_LGUEST_H
-#define _LINUX_LGUEST_H
-
-#ifndef __ASSEMBLY__
-#include <linux/time.h>
-#include <asm/irq.h>
-#include <asm/lguest_hcall.h>
-
-#define LG_CLOCK_MIN_DELTA 100UL
-#define LG_CLOCK_MAX_DELTA ULONG_MAX
-
-/*G:031
- * The second method of communicating with the Host is to via "struct
- * lguest_data". Once the Guest's initialization hypercall tells the Host where
- * this is, the Guest and Host both publish information in it.
-:*/
-struct lguest_data {
- /*
- * 512 == enabled (same as eflags in normal hardware). The Guest
- * changes interrupts so often that a hypercall is too slow.
- */
- unsigned int irq_enabled;
- /* Fine-grained interrupt disabling by the Guest */
- DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS);
-
- /*
- * The Host writes the virtual address of the last page fault here,
- * which saves the Guest a hypercall. CR2 is the native register where
- * this address would normally be found.
- */
- unsigned long cr2;
-
- /* Wallclock time set by the Host. */
- struct timespec time;
-
- /*
- * Interrupt pending set by the Host. The Guest should do a hypercall
- * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF).
- */
- int irq_pending;
-
- /*
- * Async hypercall ring. Instead of directly making hypercalls, we can
- * place them in here for processing the next time the Host wants.
- * This batching can be quite efficient.
- */
-
- /* 0xFF == done (set by Host), 0 == pending (set by Guest). */
- u8 hcall_status[LHCALL_RING_SIZE];
- /* The actual registers for the hypercalls. */
- struct hcall_args hcalls[LHCALL_RING_SIZE];
-
-/* Fields initialized by the Host at boot: */
- /* Memory not to try to access */
- unsigned long reserve_mem;
- /* KHz for the TSC clock. */
- u32 tsc_khz;
-
-/* Fields initialized by the Guest at boot: */
- /* Instruction to suppress interrupts even if enabled */
- unsigned long noirq_iret;
- /* Address above which page tables are all identical. */
- unsigned long kernel_address;
- /* The vector to try to use for system calls (0x40 or 0x80). */
- unsigned int syscall_vec;
-};
-extern struct lguest_data lguest_data;
-#endif /* __ASSEMBLY__ */
-#endif /* _LINUX_LGUEST_H */
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
deleted file mode 100644
index acd5b12565cc..000000000000
--- a/include/linux/lguest_launcher.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _LINUX_LGUEST_LAUNCHER
-#define _LINUX_LGUEST_LAUNCHER
-/* Everything the "lguest" userspace program needs to know. */
-#include <linux/types.h>
-
-/*D:010
- * Drivers
- *
- * The Guest needs devices to do anything useful. Since we don't let it touch
- * real devices (think of the damage it could do!) we provide virtual devices.
- * We emulate a PCI bus with virtio devices on it; we used to have our own
- * lguest bus which was far simpler, but this tests the virtio 1.0 standard.
- *
- * Virtio devices are also used by kvm, so we can simply reuse their optimized
- * device drivers. And one day when everyone uses virtio, my plan will be
- * complete. Bwahahahah!
- */
-
-/* Write command first word is a request. */
-enum lguest_req
-{
- LHREQ_INITIALIZE, /* + base, pfnlimit, start */
- LHREQ_GETDMA, /* No longer used */
- LHREQ_IRQ, /* + irq */
- LHREQ_BREAK, /* No longer used */
- LHREQ_EVENTFD, /* No longer used. */
- LHREQ_GETREG, /* + offset within struct pt_regs (then read value). */
- LHREQ_SETREG, /* + offset within struct pt_regs, value. */
- LHREQ_TRAP, /* + trap number to deliver to guest. */
-};
-
-/*
- * This is what read() of the lguest fd populates. trap ==
- * LGUEST_TRAP_ENTRY for an LHCALL_NOTIFY (addr is the
- * argument), 14 for a page fault in the MMIO region (addr is
- * the trap address, insn is the instruction), or 13 for a GPF
- * (insn is the instruction).
- */
-struct lguest_pending {
- __u8 trap;
- __u8 insn[7];
- __u32 addr;
-};
-#endif /* _LINUX_LGUEST_LAUNCHER */
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index f3d3e6af8838..3eaad2fbf284 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -87,6 +87,7 @@ struct nd_mapping_desc {
struct nvdimm *nvdimm;
u64 start;
u64 size;
+ int position;
};
struct nd_region_desc {
@@ -173,4 +174,19 @@ u64 nd_fletcher64(void *addr, size_t len, bool le);
void nvdimm_flush(struct nd_region *nd_region);
int nvdimm_has_flush(struct nd_region *nd_region);
int nvdimm_has_cache(struct nd_region *nd_region);
+
+#ifdef CONFIG_ARCH_HAS_PMEM_API
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
+void arch_wb_cache_pmem(void *addr, size_t size);
+void arch_invalidate_pmem(void *addr, size_t size);
+#else
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
+static inline void arch_wb_cache_pmem(void *addr, size_t size)
+{
+}
+static inline void arch_invalidate_pmem(void *addr, size_t size)
+{
+}
+#endif
+
#endif /* __LIBNVDIMM_H__ */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index fffe49f188e6..bfa8e0b0d6f1 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -18,6 +18,8 @@ extern int lock_stat;
#define MAX_LOCKDEP_SUBCLASSES 8UL
+#include <linux/types.h>
+
#ifdef CONFIG_LOCKDEP
#include <linux/linkage.h>
@@ -29,7 +31,7 @@ extern int lock_stat;
* We'd rather not expose kernel/lockdep_states.h this wide, but we do need
* the total number of states... :-(
*/
-#define XXX_LOCK_USAGE_STATES (1+3*4)
+#define XXX_LOCK_USAGE_STATES (1+2*4)
/*
* NR_LOCKDEP_CACHING_CLASSES ... Number of classes
@@ -155,6 +157,12 @@ struct lockdep_map {
int cpu;
unsigned long ip;
#endif
+#ifdef CONFIG_LOCKDEP_CROSSRELEASE
+ /*
+ * Whether it's a crosslock.
+ */
+ int cross;
+#endif
};
static inline void lockdep_copy_map(struct lockdep_map *to,
@@ -258,8 +266,95 @@ struct held_lock {
unsigned int hardirqs_off:1;
unsigned int references:12; /* 32 bits */
unsigned int pin_count;
+#ifdef CONFIG_LOCKDEP_CROSSRELEASE
+ /*
+ * Generation id.
+ *
+ * A value of cross_gen_id will be stored when holding this,
+ * which is globally increased whenever each crosslock is held.
+ */
+ unsigned int gen_id;
+#endif
+};
+
+#ifdef CONFIG_LOCKDEP_CROSSRELEASE
+#define MAX_XHLOCK_TRACE_ENTRIES 5
+
+/*
+ * This is for keeping locks waiting for commit so that true dependencies
+ * can be added at commit step.
+ */
+struct hist_lock {
+ /*
+ * Id for each entry in the ring buffer. This is used to
+ * decide whether the ring buffer was overwritten or not.
+ *
+ * For example,
+ *
+ * |<----------- hist_lock ring buffer size ------->|
+ * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
+ * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
+ *
+ * where 'p' represents an acquisition in process
+ * context, 'i' represents an acquisition in irq
+ * context.
+ *
+ * In this example, the ring buffer was overwritten by
+ * acquisitions in irq context, that should be detected on
+ * rollback or commit.
+ */
+ unsigned int hist_id;
+
+ /*
+ * Seperate stack_trace data. This will be used at commit step.
+ */
+ struct stack_trace trace;
+ unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
+
+ /*
+ * Seperate hlock instance. This will be used at commit step.
+ *
+ * TODO: Use a smaller data structure containing only necessary
+ * data. However, we should make lockdep code able to handle the
+ * smaller one first.
+ */
+ struct held_lock hlock;
+};
+
+/*
+ * To initialize a lock as crosslock, lockdep_init_map_crosslock() should
+ * be called instead of lockdep_init_map().
+ */
+struct cross_lock {
+ /*
+ * When more than one acquisition of crosslocks are overlapped,
+ * we have to perform commit for them based on cross_gen_id of
+ * the first acquisition, which allows us to add more true
+ * dependencies.
+ *
+ * Moreover, when no acquisition of a crosslock is in progress,
+ * we should not perform commit because the lock might not exist
+ * any more, which might cause incorrect memory access. So we
+ * have to track the number of acquisitions of a crosslock.
+ */
+ int nr_acquire;
+
+ /*
+ * Seperate hlock instance. This will be used at commit step.
+ *
+ * TODO: Use a smaller data structure containing only necessary
+ * data. However, we should make lockdep code able to handle the
+ * smaller one first.
+ */
+ struct held_lock hlock;
};
+struct lockdep_map_cross {
+ struct lockdep_map map;
+ struct cross_lock xlock;
+};
+#endif
+
/*
* Initialization, self-test and debugging-output methods:
*/
@@ -282,13 +377,6 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass);
/*
- * To initialize a lockdep_map statically use this macro.
- * Note that _name must not be NULL.
- */
-#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
- { .name = (_name), .key = (void *)(_key), }
-
-/*
* Reinitialize a lock key - for cases where there is special locking or
* special initialization of locks so that the validator gets the scope
* of dependencies wrong: they are either too broad (they need a class-split)
@@ -363,10 +451,6 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
-extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
-extern void lockdep_clear_current_reclaim_state(void);
-extern void lockdep_trace_alloc(gfp_t mask);
-
struct pin_cookie { unsigned int val; };
#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
@@ -375,7 +459,7 @@ extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
-# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+# define INIT_LOCKDEP .lockdep_recursion = 0,
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
@@ -416,9 +500,6 @@ static inline void lockdep_on(void)
# define lock_downgrade(l, i) do { } while (0)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
-# define lockdep_set_current_reclaim_state(g) do { } while (0)
-# define lockdep_clear_current_reclaim_state() do { } while (0)
-# define lockdep_trace_alloc(g) do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
@@ -467,6 +548,58 @@ struct pin_cookie { };
#endif /* !LOCKDEP */
+enum xhlock_context_t {
+ XHLOCK_HARD,
+ XHLOCK_SOFT,
+ XHLOCK_CTX_NR,
+};
+
+#ifdef CONFIG_LOCKDEP_CROSSRELEASE
+extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
+ const char *name,
+ struct lock_class_key *key,
+ int subclass);
+extern void lock_commit_crosslock(struct lockdep_map *lock);
+
+/*
+ * What we essencially have to initialize is 'nr_acquire'. Other members
+ * will be initialized in add_xlock().
+ */
+#define STATIC_CROSS_LOCK_INIT() \
+ { .nr_acquire = 0,}
+
+#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
+ { .map.name = (_name), .map.key = (void *)(_key), \
+ .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
+
+/*
+ * To initialize a lockdep_map statically use this macro.
+ * Note that _name must not be NULL.
+ */
+#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
+ { .name = (_name), .key = (void *)(_key), .cross = 0, }
+
+extern void crossrelease_hist_start(enum xhlock_context_t c);
+extern void crossrelease_hist_end(enum xhlock_context_t c);
+extern void lockdep_invariant_state(bool force);
+extern void lockdep_init_task(struct task_struct *task);
+extern void lockdep_free_task(struct task_struct *task);
+#else /* !CROSSRELEASE */
+#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
+/*
+ * To initialize a lockdep_map statically use this macro.
+ * Note that _name must not be NULL.
+ */
+#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
+ { .name = (_name), .key = (void *)(_key), }
+
+static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
+static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
+static inline void lockdep_invariant_state(bool force) {}
+static inline void lockdep_init_task(struct task_struct *task) {}
+static inline void lockdep_free_task(struct task_struct *task) {}
+#endif /* CROSSRELEASE */
+
#ifdef CONFIG_LOCK_STAT
extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 22b5d4e687ce..d1c2901f1542 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -4,7 +4,7 @@
*
* Author : Etienne BASSET <etienne.basset@ensta.org>
*
- * All credits to : Stephen Smalley, <sds@epoch.ncsc.mil>
+ * All credits to : Stephen Smalley, <sds@tycho.nsa.gov>
* All BUGS to : Etienne BASSET <etienne.basset@ensta.org>
*/
#ifndef _LSM_COMMON_LOGGING_
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 3a90febadbe2..c9258124e417 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -43,7 +43,11 @@
* interpreters. The hook can tell whether it has already been called by
* checking to see if @bprm->security is non-NULL. If so, then the hook
* may decide either to retain the security information saved earlier or
- * to replace it.
+ * to replace it. The hook must set @bprm->secureexec to 1 if a "secure
+ * exec" has happened as a result of this hook call. The flag is used to
+ * indicate the need for a sanitized execution environment, and is also
+ * passed in the ELF auxiliary table on the initial stack to indicate
+ * whether libc should enable secure mode.
* @bprm contains the linux_binprm structure.
* Return 0 if the hook is successful and permission is granted.
* @bprm_check_security:
@@ -71,12 +75,6 @@
* linux_binprm structure. This hook is a good place to perform state
* changes on the process such as clearing out non-inheritable signal
* state. This is called immediately after commit_creds().
- * @bprm_secureexec:
- * Return a boolean value (0 or 1) indicating whether a "secure exec"
- * is required. The flag is passed in the auxiliary table
- * on the initial stack to the ELF interpreter to indicate whether libc
- * should enable secure mode.
- * @bprm contains the linux_binprm structure.
*
* Security hooks for filesystem operations.
*
@@ -530,11 +528,6 @@
*
* Security hooks for task operations.
*
- * @task_create:
- * Check permission before creating a child process. See the clone(2)
- * manual page for definitions of the @clone_flags.
- * @clone_flags contains the flags indicating what should be shared.
- * Return 0 if permission is granted.
* @task_alloc:
* @task task being allocated.
* @clone_flags contains the flags indicating what should be shared.
@@ -1388,7 +1381,6 @@ union security_list_options {
int (*bprm_set_creds)(struct linux_binprm *bprm);
int (*bprm_check_security)(struct linux_binprm *bprm);
- int (*bprm_secureexec)(struct linux_binprm *bprm);
void (*bprm_committing_creds)(struct linux_binprm *bprm);
void (*bprm_committed_creds)(struct linux_binprm *bprm);
@@ -1508,7 +1500,6 @@ union security_list_options {
int (*file_receive)(struct file *file);
int (*file_open)(struct file *file, const struct cred *cred);
- int (*task_create)(unsigned long clone_flags);
int (*task_alloc)(struct task_struct *task, unsigned long clone_flags);
void (*task_free)(struct task_struct *task);
int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp);
@@ -1710,7 +1701,6 @@ struct security_hook_heads {
struct list_head vm_enough_memory;
struct list_head bprm_set_creds;
struct list_head bprm_check_security;
- struct list_head bprm_secureexec;
struct list_head bprm_committing_creds;
struct list_head bprm_committed_creds;
struct list_head sb_alloc_security;
@@ -1783,7 +1773,6 @@ struct security_hook_heads {
struct list_head file_send_sigiotask;
struct list_head file_receive;
struct list_head file_open;
- struct list_head task_create;
struct list_head task_alloc;
struct list_head task_free;
struct list_head cred_alloc_blank;
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index 4097ac9ea13a..b1a0ad9d23b3 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -136,5 +136,7 @@ extern struct resource *mcb_request_mem(struct mcb_device *dev,
const char *name);
extern void mcb_release_mem(struct resource *mem);
extern int mcb_get_irq(struct mcb_device *dev);
+extern struct resource *mcb_get_resource(struct mcb_device *dev,
+ unsigned int type);
#endif /* _LINUX_MCB_H */
diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h
index 61f5b21b31c7..a5d58f221939 100644
--- a/include/linux/mdio-mux.h
+++ b/include/linux/mdio-mux.h
@@ -12,7 +12,16 @@
#include <linux/device.h>
#include <linux/phy.h>
+/* mdio_mux_init() - Initialize a MDIO mux
+ * @dev The device owning the MDIO mux
+ * @mux_node The device node of the MDIO mux
+ * @switch_fn The function called for switching target MDIO child
+ * mux_handle A pointer to a (void *) used internaly by mdio-mux
+ * @data Private data used by switch_fn()
+ * @mux_bus An optional parent bus (Other case are to use parent_bus property)
+ */
int mdio_mux_init(struct device *dev,
+ struct device_node *mux_node,
int (*switch_fn) (int cur, int desired, void *data),
void **mux_handle,
void *data,
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 1255f09f5e42..265a9cd21cb4 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -21,7 +21,7 @@
#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
-#define sme_me_mask 0UL
+#define sme_me_mask 0ULL
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
@@ -30,18 +30,23 @@ static inline bool sme_active(void)
return !!sme_me_mask;
}
-static inline unsigned long sme_get_me_mask(void)
+static inline u64 sme_get_me_mask(void)
{
return sme_me_mask;
}
+#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
* The __sme_set() and __sme_clr() macros are useful for adding or removing
* the encryption mask from a value (e.g. when dealing with pagetable
* entries).
*/
-#define __sme_set(x) ((unsigned long)(x) | sme_me_mask)
-#define __sme_clr(x) ((unsigned long)(x) & ~sme_me_mask)
+#define __sme_set(x) ((x) | sme_me_mask)
+#define __sme_clr(x) ((x) & ~sme_me_mask)
+#else
+#define __sme_set(x) (x)
+#define __sme_clr(x) (x)
+#endif
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 9b15a4bcfa77..69966c461d1c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -488,8 +488,9 @@ struct mem_cgroup *lock_page_memcg(struct page *page);
void __unlock_page_memcg(struct mem_cgroup *memcg);
void unlock_page_memcg(struct page *page);
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+ int idx)
{
long val = 0;
int cpu;
@@ -503,15 +504,17 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
return val;
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx, int val)
+ int idx, int val)
{
if (!mem_cgroup_disabled())
__this_cpu_add(memcg->stat->count[idx], val);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx, int val)
+ int idx, int val)
{
if (!mem_cgroup_disabled())
this_cpu_add(memcg->stat->count[idx], val);
@@ -535,14 +538,14 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
* Kernel pages are an exception to this, since they'll never move.
*/
static inline void __mod_memcg_page_state(struct page *page,
- enum memcg_stat_item idx, int val)
+ int idx, int val)
{
if (page->mem_cgroup)
__mod_memcg_state(page->mem_cgroup, idx, val);
}
static inline void mod_memcg_page_state(struct page *page,
- enum memcg_stat_item idx, int val)
+ int idx, int val)
{
if (page->mem_cgroup)
mod_memcg_state(page->mem_cgroup, idx, val);
@@ -632,8 +635,9 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
this_cpu_add(memcg->stat->events[idx], count);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void count_memcg_page_event(struct page *page,
- enum memcg_stat_item idx)
+ int idx)
{
if (page->mem_cgroup)
count_memcg_events(page->mem_cgroup, idx, 1);
@@ -846,31 +850,31 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
}
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+ int idx)
{
return 0;
}
static inline void __mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx,
+ int idx,
int nr)
{
}
static inline void mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx,
+ int idx,
int nr)
{
}
static inline void __mod_memcg_page_state(struct page *page,
- enum memcg_stat_item idx,
+ int idx,
int nr)
{
}
static inline void mod_memcg_page_state(struct page *page,
- enum memcg_stat_item idx,
+ int idx,
int nr)
{
}
@@ -924,7 +928,7 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
}
static inline void count_memcg_page_event(struct page *page,
- enum memcg_stat_item idx)
+ int idx)
{
}
@@ -934,26 +938,30 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
}
#endif /* CONFIG_MEMCG */
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __inc_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+ int idx)
{
__mod_memcg_state(memcg, idx, 1);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __dec_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+ int idx)
{
__mod_memcg_state(memcg, idx, -1);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __inc_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+ int idx)
{
__mod_memcg_page_state(page, idx, 1);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void __dec_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+ int idx)
{
__mod_memcg_page_state(page, idx, -1);
}
@@ -982,26 +990,30 @@ static inline void __dec_lruvec_page_state(struct page *page,
__mod_lruvec_page_state(page, idx, -1);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void inc_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+ int idx)
{
mod_memcg_state(memcg, idx, 1);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void dec_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+ int idx)
{
mod_memcg_state(memcg, idx, -1);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void inc_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+ int idx)
{
mod_memcg_page_state(page, idx, 1);
}
+/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void dec_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+ int idx)
{
mod_memcg_page_state(page, idx, -1);
}
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index c8a5056a5ae0..0995e1a2b458 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -133,6 +133,17 @@ extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
extern int __add_pages(int nid, unsigned long start_pfn,
unsigned long nr_pages, bool want_memblock);
+#ifndef CONFIG_ARCH_HAS_ADD_PAGES
+static inline int add_pages(int nid, unsigned long start_pfn,
+ unsigned long nr_pages, bool want_memblock)
+{
+ return __add_pages(nid, start_pfn, nr_pages, want_memblock);
+}
+#else /* ARCH_HAS_ADD_PAGES */
+int add_pages(int nid, unsigned long start_pfn,
+ unsigned long nr_pages, bool want_memblock);
+#endif /* ARCH_HAS_ADD_PAGES */
+
#ifdef CONFIG_NUMA
extern int memory_add_physaddr_to_nid(u64 start);
#else
@@ -319,6 +330,6 @@ extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
int online_type);
-extern struct zone *default_zone_for_pfn(int nid, unsigned long pfn,
+extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
unsigned long nr_pages);
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 93416196ba64..79f8ba7c3894 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -4,6 +4,8 @@
#include <linux/ioport.h>
#include <linux/percpu-refcount.h>
+#include <asm/pgtable.h>
+
struct resource;
struct device;
@@ -35,24 +37,107 @@ static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
}
#endif
+/*
+ * Specialize ZONE_DEVICE memory into multiple types each having differents
+ * usage.
+ *
+ * MEMORY_DEVICE_HOST:
+ * Persistent device memory (pmem): struct page might be allocated in different
+ * memory and architecture might want to perform special actions. It is similar
+ * to regular memory, in that the CPU can access it transparently. However,
+ * it is likely to have different bandwidth and latency than regular memory.
+ * See Documentation/nvdimm/nvdimm.txt for more information.
+ *
+ * MEMORY_DEVICE_PRIVATE:
+ * Device memory that is not directly addressable by the CPU: CPU can neither
+ * read nor write private memory. In this case, we do still have struct pages
+ * backing the device memory. Doing so simplifies the implementation, but it is
+ * important to remember that there are certain points at which the struct page
+ * must be treated as an opaque object, rather than a "normal" struct page.
+ *
+ * A more complete discussion of unaddressable memory may be found in
+ * include/linux/hmm.h and Documentation/vm/hmm.txt.
+ *
+ * MEMORY_DEVICE_PUBLIC:
+ * Device memory that is cache coherent from device and CPU point of view. This
+ * is use on platform that have an advance system bus (like CAPI or CCIX). A
+ * driver can hotplug the device memory using ZONE_DEVICE and with that memory
+ * type. Any page of a process can be migrated to such memory. However no one
+ * should be allow to pin such memory so that it can always be evicted.
+ */
+enum memory_type {
+ MEMORY_DEVICE_HOST = 0,
+ MEMORY_DEVICE_PRIVATE,
+ MEMORY_DEVICE_PUBLIC,
+};
+
+/*
+ * For MEMORY_DEVICE_PRIVATE we use ZONE_DEVICE and extend it with two
+ * callbacks:
+ * page_fault()
+ * page_free()
+ *
+ * Additional notes about MEMORY_DEVICE_PRIVATE may be found in
+ * include/linux/hmm.h and Documentation/vm/hmm.txt. There is also a brief
+ * explanation in include/linux/memory_hotplug.h.
+ *
+ * The page_fault() callback must migrate page back, from device memory to
+ * system memory, so that the CPU can access it. This might fail for various
+ * reasons (device issues, device have been unplugged, ...). When such error
+ * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and
+ * set the CPU page table entry to "poisoned".
+ *
+ * Note that because memory cgroup charges are transferred to the device memory,
+ * this should never fail due to memory restrictions. However, allocation
+ * of a regular system page might still fail because we are out of memory. If
+ * that happens, the page_fault() callback must return VM_FAULT_OOM.
+ *
+ * The page_fault() callback can also try to migrate back multiple pages in one
+ * chunk, as an optimization. It must, however, prioritize the faulting address
+ * over all the others.
+ *
+ *
+ * The page_free() callback is called once the page refcount reaches 1
+ * (ZONE_DEVICE pages never reach 0 refcount unless there is a refcount bug.
+ * This allows the device driver to implement its own memory management.)
+ *
+ * For MEMORY_DEVICE_PUBLIC only the page_free() callback matter.
+ */
+typedef int (*dev_page_fault_t)(struct vm_area_struct *vma,
+ unsigned long addr,
+ const struct page *page,
+ unsigned int flags,
+ pmd_t *pmdp);
+typedef void (*dev_page_free_t)(struct page *page, void *data);
+
/**
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
+ * @page_fault: callback when CPU fault on an unaddressable device page
+ * @page_free: free page callback when page refcount reaches 1
* @altmap: pre-allocated/reserved memory for vmemmap allocations
* @res: physical address range covered by @ref
* @ref: reference count that pins the devm_memremap_pages() mapping
* @dev: host device of the mapping for debug
+ * @data: private data pointer for page_free()
+ * @type: memory type: see MEMORY_* in memory_hotplug.h
*/
struct dev_pagemap {
+ dev_page_fault_t page_fault;
+ dev_page_free_t page_free;
struct vmem_altmap *altmap;
const struct resource *res;
struct percpu_ref *ref;
struct device *dev;
+ void *data;
+ enum memory_type type;
};
#ifdef CONFIG_ZONE_DEVICE
void *devm_memremap_pages(struct device *dev, struct resource *res,
struct percpu_ref *ref, struct vmem_altmap *altmap);
struct dev_pagemap *find_dev_pagemap(resource_size_t phys);
+
+static inline bool is_zone_device_page(const struct page *page);
#else
static inline void *devm_memremap_pages(struct device *dev,
struct resource *res, struct percpu_ref *ref,
@@ -73,6 +158,20 @@ static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
}
#endif
+#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
+static inline bool is_device_private_page(const struct page *page)
+{
+ return is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PRIVATE;
+}
+
+static inline bool is_device_public_page(const struct page *page)
+{
+ return is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PUBLIC;
+}
+#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
+
/**
* get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
* @pfn: page frame number to lookup page_map
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 965b027e31b3..e9c908c4fba8 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -23,6 +23,7 @@ enum axp20x_variants {
AXP803_ID,
AXP806_ID,
AXP809_ID,
+ AXP813_ID,
NR_AXP20X_VARIANTS,
};
@@ -387,6 +388,34 @@ enum {
AXP803_REG_ID_MAX,
};
+enum {
+ AXP813_DCDC1 = 0,
+ AXP813_DCDC2,
+ AXP813_DCDC3,
+ AXP813_DCDC4,
+ AXP813_DCDC5,
+ AXP813_DCDC6,
+ AXP813_DCDC7,
+ AXP813_ALDO1,
+ AXP813_ALDO2,
+ AXP813_ALDO3,
+ AXP813_DLDO1,
+ AXP813_DLDO2,
+ AXP813_DLDO3,
+ AXP813_DLDO4,
+ AXP813_ELDO1,
+ AXP813_ELDO2,
+ AXP813_ELDO3,
+ AXP813_FLDO1,
+ AXP813_FLDO2,
+ AXP813_FLDO3,
+ AXP813_RTC_LDO,
+ AXP813_LDO_IO0,
+ AXP813_LDO_IO1,
+ AXP813_SW,
+ AXP813_REG_ID_MAX,
+};
+
/* IRQs */
enum {
AXP152_IRQ_LDO0IN_CONNECT = 1,
diff --git a/include/linux/mfd/bd9571mwv.h b/include/linux/mfd/bd9571mwv.h
new file mode 100644
index 000000000000..f0708ba4cbba
--- /dev/null
+++ b/include/linux/mfd/bd9571mwv.h
@@ -0,0 +1,115 @@
+/*
+ * ROHM BD9571MWV-M driver
+ *
+ * Copyright (C) 2017 Marek Vasut <marek.vasut+renesas@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * Based on the TPS65086 driver
+ */
+
+#ifndef __LINUX_MFD_BD9571MWV_H
+#define __LINUX_MFD_BD9571MWV_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* List of registers for BD9571MWV */
+#define BD9571MWV_VENDOR_CODE 0x00
+#define BD9571MWV_VENDOR_CODE_VAL 0xdb
+#define BD9571MWV_PRODUCT_CODE 0x01
+#define BD9571MWV_PRODUCT_CODE_VAL 0x60
+#define BD9571MWV_PRODUCT_REVISION 0x02
+
+#define BD9571MWV_I2C_FUSA_MODE 0x10
+#define BD9571MWV_I2C_MD2_E1_BIT_1 0x11
+#define BD9571MWV_I2C_MD2_E1_BIT_2 0x12
+
+#define BD9571MWV_BKUP_MODE_CNT 0x20
+#define BD9571MWV_BKUP_MODE_STATUS 0x21
+#define BD9571MWV_BKUP_RECOVERY_CNT 0x22
+#define BD9571MWV_BKUP_CTRL_TIM_CNT 0x23
+#define BD9571MWV_WAITBKUP_WDT_CNT 0x24
+#define BD9571MWV_128H_TIM_CNT 0x26
+#define BD9571MWV_QLLM_CNT 0x27
+
+#define BD9571MWV_AVS_SET_MONI 0x31
+#define BD9571MWV_AVS_SET_MONI_MASK 0x3
+#define BD9571MWV_AVS_VD09_VID(n) (0x32 + (n))
+#define BD9571MWV_AVS_DVFS_VID(n) (0x36 + (n))
+
+#define BD9571MWV_VD18_VID 0x42
+#define BD9571MWV_VD25_VID 0x43
+#define BD9571MWV_VD33_VID 0x44
+
+#define BD9571MWV_DVFS_VINIT 0x50
+#define BD9571MWV_DVFS_SETVMAX 0x52
+#define BD9571MWV_DVFS_BOOSTVID 0x53
+#define BD9571MWV_DVFS_SETVID 0x54
+#define BD9571MWV_DVFS_MONIVDAC 0x55
+#define BD9571MWV_DVFS_PGD_CNT 0x56
+
+#define BD9571MWV_GPIO_DIR 0x60
+#define BD9571MWV_GPIO_OUT 0x61
+#define BD9571MWV_GPIO_IN 0x62
+#define BD9571MWV_GPIO_DEB 0x63
+#define BD9571MWV_GPIO_INT_SET 0x64
+#define BD9571MWV_GPIO_INT 0x65
+#define BD9571MWV_GPIO_INTMASK 0x66
+
+#define BD9571MWV_REG_KEEP(n) (0x70 + (n))
+
+#define BD9571MWV_PMIC_INTERNAL_STATUS 0x80
+#define BD9571MWV_PROT_ERROR_STATUS0 0x81
+#define BD9571MWV_PROT_ERROR_STATUS1 0x82
+#define BD9571MWV_PROT_ERROR_STATUS2 0x83
+#define BD9571MWV_PROT_ERROR_STATUS3 0x84
+#define BD9571MWV_PROT_ERROR_STATUS4 0x85
+
+#define BD9571MWV_INT_INTREQ 0x90
+#define BD9571MWV_INT_INTREQ_MD1_INT BIT(0)
+#define BD9571MWV_INT_INTREQ_MD2_E1_INT BIT(1)
+#define BD9571MWV_INT_INTREQ_MD2_E2_INT BIT(2)
+#define BD9571MWV_INT_INTREQ_PROT_ERR_INT BIT(3)
+#define BD9571MWV_INT_INTREQ_GP_INT BIT(4)
+#define BD9571MWV_INT_INTREQ_128H_OF_INT BIT(5)
+#define BD9571MWV_INT_INTREQ_WDT_OF_INT BIT(6)
+#define BD9571MWV_INT_INTREQ_BKUP_TRG_INT BIT(7)
+#define BD9571MWV_INT_INTMASK 0x91
+
+#define BD9571MWV_ACCESS_KEY 0xff
+
+/* Define the BD9571MWV IRQ numbers */
+enum bd9571mwv_irqs {
+ BD9571MWV_IRQ_MD1,
+ BD9571MWV_IRQ_MD2_E1,
+ BD9571MWV_IRQ_MD2_E2,
+ BD9571MWV_IRQ_PROT_ERR,
+ BD9571MWV_IRQ_GP,
+ BD9571MWV_IRQ_128H_OF,
+ BD9571MWV_IRQ_WDT_OF,
+ BD9571MWV_IRQ_BKUP_TRG,
+};
+
+/**
+ * struct bd9571mwv - state holder for the bd9571mwv driver
+ *
+ * Device data may be used to access the BD9571MWV chip
+ */
+struct bd9571mwv {
+ struct device *dev;
+ struct regmap *regmap;
+
+ /* IRQ Data */
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+#endif /* __LINUX_MFD_BD9571MWV_H */
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 190c8f4afa02..2b16e95b9bb8 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -285,6 +285,11 @@ enum host_event_code {
EC_HOST_EVENT_HANG_DETECT = 20,
/* Hang detect logic detected a hang and warm rebooted the AP */
EC_HOST_EVENT_HANG_REBOOT = 21,
+ /* PD MCU triggering host event */
+ EC_HOST_EVENT_PD_MCU = 22,
+
+ /* EC desires to change state of host-controlled USB mux */
+ EC_HOST_EVENT_USB_MUX = 28,
/*
* The high bit of the event mask is not used as a host event code. If
@@ -2905,6 +2910,76 @@ struct ec_params_usb_pd_control {
uint8_t mux;
} __packed;
+#define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */
+#define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */
+#define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */
+
+struct ec_response_usb_pd_control_v1 {
+ uint8_t enabled;
+ uint8_t role;
+ uint8_t polarity;
+ char state[32];
+} __packed;
+
+#define EC_CMD_USB_PD_PORTS 0x102
+
+struct ec_response_usb_pd_ports {
+ uint8_t num_ports;
+} __packed;
+
+#define EC_CMD_USB_PD_POWER_INFO 0x103
+
+#define PD_POWER_CHARGING_PORT 0xff
+struct ec_params_usb_pd_power_info {
+ uint8_t port;
+} __packed;
+
+enum usb_chg_type {
+ USB_CHG_TYPE_NONE,
+ USB_CHG_TYPE_PD,
+ USB_CHG_TYPE_C,
+ USB_CHG_TYPE_PROPRIETARY,
+ USB_CHG_TYPE_BC12_DCP,
+ USB_CHG_TYPE_BC12_CDP,
+ USB_CHG_TYPE_BC12_SDP,
+ USB_CHG_TYPE_OTHER,
+ USB_CHG_TYPE_VBUS,
+ USB_CHG_TYPE_UNKNOWN,
+};
+
+struct usb_chg_measures {
+ uint16_t voltage_max;
+ uint16_t voltage_now;
+ uint16_t current_max;
+ uint16_t current_lim;
+} __packed;
+
+struct ec_response_usb_pd_power_info {
+ uint8_t role;
+ uint8_t type;
+ uint8_t dualrole;
+ uint8_t reserved1;
+ struct usb_chg_measures meas;
+ uint32_t max_power;
+} __packed;
+
+/* Get info about USB-C SS muxes */
+#define EC_CMD_USB_PD_MUX_INFO 0x11a
+
+struct ec_params_usb_pd_mux_info {
+ uint8_t port; /* USB-C port number */
+} __packed;
+
+/* Flags representing mux state */
+#define USB_PD_MUX_USB_ENABLED (1 << 0)
+#define USB_PD_MUX_DP_ENABLED (1 << 1)
+#define USB_PD_MUX_POLARITY_INVERTED (1 << 2)
+#define USB_PD_MUX_HPD_IRQ (1 << 3)
+
+struct ec_response_usb_pd_mux_info {
+ uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */
+} __packed;
+
/*****************************************************************************/
/*
* Passthru commands
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index ce9230af09c2..ae5b663836d0 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -45,6 +45,12 @@
#define DA9052_ADC_TJUNC 8
#define DA9052_ADC_VBBAT 9
+/* TSI channel has its own 4 channel mux */
+#define DA9052_ADC_TSI_XP 70
+#define DA9052_ADC_TSI_XN 71
+#define DA9052_ADC_TSI_YP 72
+#define DA9052_ADC_TSI_YN 73
+
#define DA9052_IRQ_DCIN 0
#define DA9052_IRQ_VBUS 1
#define DA9052_IRQ_DCINREM 2
diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h
index 5010f978725c..76780ea8849c 100644
--- a/include/linux/mfd/da9052/reg.h
+++ b/include/linux/mfd/da9052/reg.h
@@ -690,7 +690,10 @@
/* TSI CONTROL REGISTER B BITS */
#define DA9052_TSICONTB_ADCREF 0X80
#define DA9052_TSICONTB_TSIMAN 0X40
-#define DA9052_TSICONTB_TSIMUX 0X30
+#define DA9052_TSICONTB_TSIMUX_XP 0X00
+#define DA9052_TSICONTB_TSIMUX_YP 0X10
+#define DA9052_TSICONTB_TSIMUX_XN 0X20
+#define DA9052_TSICONTB_TSIMUX_YN 0X30
#define DA9052_TSICONTB_TSISEL3 0X08
#define DA9052_TSICONTB_TSISEL2 0X04
#define DA9052_TSICONTB_TSISEL1 0X02
@@ -705,8 +708,14 @@
/* TSI CO-ORDINATE LSB RESULT REGISTER BITS */
#define DA9052_TSILSB_PENDOWN 0X40
#define DA9052_TSILSB_TSIZL 0X30
+#define DA9052_TSILSB_TSIZL_SHIFT 4
+#define DA9052_TSILSB_TSIZL_BITS 2
#define DA9052_TSILSB_TSIYL 0X0C
+#define DA9052_TSILSB_TSIYL_SHIFT 2
+#define DA9052_TSILSB_TSIYL_BITS 2
#define DA9052_TSILSB_TSIXL 0X03
+#define DA9052_TSILSB_TSIXL_SHIFT 0
+#define DA9052_TSILSB_TSIXL_BITS 2
/* TSI Z MEASUREMENT MSB RESULT REGISTER BIT */
#define DA9052_TSIZMSB_TSIZM 0XFF
diff --git a/include/linux/i2c/dm355evm_msp.h b/include/linux/mfd/dm355evm_msp.h
index 372470350fab..372470350fab 100644
--- a/include/linux/i2c/dm355evm_msp.h
+++ b/include/linux/mfd/dm355evm_msp.h
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
index 38a372a0e285..2227c6a75d84 100644
--- a/include/linux/mfd/ds1wm.h
+++ b/include/linux/mfd/ds1wm.h
@@ -1,13 +1,28 @@
-/* MFD cell driver data for the DS1WM driver */
+/* MFD cell driver data for the DS1WM driver
+ *
+ * to be defined in the MFD device that is
+ * using this driver for one of his sub devices
+ */
struct ds1wm_driver_data {
int active_high;
int clock_rate;
- /* in milliseconds, the amount of time to */
- /* sleep following a reset pulse. Zero */
- /* should work if your bus devices recover*/
- /* time respects the 1-wire spec since the*/
- /* ds1wm implements the precise timings of*/
- /* a reset pulse/presence detect sequence.*/
+ /* in milliseconds, the amount of time to
+ * sleep following a reset pulse. Zero
+ * should work if your bus devices recover
+ * time respects the 1-wire spec since the
+ * ds1wm implements the precise timings of
+ * a reset pulse/presence detect sequence.
+ */
unsigned int reset_recover_delay;
+
+ /* Say 1 here for big endian Hardware
+ * (only relevant with bus-shift > 0
+ */
+ bool is_hw_big_endian;
+
+ /* left shift of register number to get register address offsett.
+ * Only 0,1,2 allowed for 8,16 or 32 bit bus width respectively
+ */
+ unsigned int bus_shift;
};
diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h
index 587273e35acf..2580c08db7b1 100644
--- a/include/linux/mfd/hi6421-pmic.h
+++ b/include/linux/mfd/hi6421-pmic.h
@@ -38,4 +38,9 @@ struct hi6421_pmic {
struct regmap *regmap;
};
+enum hi6421_type {
+ HI6421 = 0,
+ HI6421_V530,
+};
+
#endif /* __HI6421_PMIC_H */
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index 83701ef7d3c7..d3156594674c 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -206,6 +206,97 @@ enum rk818_reg {
#define RK818_USB_ILMIN_2000MA 0x7
#define RK818_USB_CHG_SD_VSEL_MASK 0x70
+/* RK805 */
+enum rk805_reg {
+ RK805_ID_DCDC1,
+ RK805_ID_DCDC2,
+ RK805_ID_DCDC3,
+ RK805_ID_DCDC4,
+ RK805_ID_LDO1,
+ RK805_ID_LDO2,
+ RK805_ID_LDO3,
+};
+
+/* CONFIG REGISTER */
+#define RK805_VB_MON_REG 0x21
+#define RK805_THERMAL_REG 0x22
+
+/* POWER CHANNELS ENABLE REGISTER */
+#define RK805_DCDC_EN_REG 0x23
+#define RK805_SLP_DCDC_EN_REG 0x25
+#define RK805_SLP_LDO_EN_REG 0x26
+#define RK805_LDO_EN_REG 0x27
+
+/* BUCK AND LDO CONFIG REGISTER */
+#define RK805_BUCK_LDO_SLP_LP_EN_REG 0x2A
+#define RK805_BUCK1_CONFIG_REG 0x2E
+#define RK805_BUCK1_ON_VSEL_REG 0x2F
+#define RK805_BUCK1_SLP_VSEL_REG 0x30
+#define RK805_BUCK2_CONFIG_REG 0x32
+#define RK805_BUCK2_ON_VSEL_REG 0x33
+#define RK805_BUCK2_SLP_VSEL_REG 0x34
+#define RK805_BUCK3_CONFIG_REG 0x36
+#define RK805_BUCK4_CONFIG_REG 0x37
+#define RK805_BUCK4_ON_VSEL_REG 0x38
+#define RK805_BUCK4_SLP_VSEL_REG 0x39
+#define RK805_LDO1_ON_VSEL_REG 0x3B
+#define RK805_LDO1_SLP_VSEL_REG 0x3C
+#define RK805_LDO2_ON_VSEL_REG 0x3D
+#define RK805_LDO2_SLP_VSEL_REG 0x3E
+#define RK805_LDO3_ON_VSEL_REG 0x3F
+#define RK805_LDO3_SLP_VSEL_REG 0x40
+
+/* INTERRUPT REGISTER */
+#define RK805_PWRON_LP_INT_TIME_REG 0x47
+#define RK805_PWRON_DB_REG 0x48
+#define RK805_DEV_CTRL_REG 0x4B
+#define RK805_INT_STS_REG 0x4C
+#define RK805_INT_STS_MSK_REG 0x4D
+#define RK805_GPIO_IO_POL_REG 0x50
+#define RK805_OUT_REG 0x52
+#define RK805_ON_SOURCE_REG 0xAE
+#define RK805_OFF_SOURCE_REG 0xAF
+
+#define RK805_NUM_REGULATORS 7
+
+#define RK805_PWRON_FALL_RISE_INT_EN 0x0
+#define RK805_PWRON_FALL_RISE_INT_MSK 0x81
+
+/* RK805 IRQ Definitions */
+#define RK805_IRQ_PWRON_RISE 0
+#define RK805_IRQ_VB_LOW 1
+#define RK805_IRQ_PWRON 2
+#define RK805_IRQ_PWRON_LP 3
+#define RK805_IRQ_HOTDIE 4
+#define RK805_IRQ_RTC_ALARM 5
+#define RK805_IRQ_RTC_PERIOD 6
+#define RK805_IRQ_PWRON_FALL 7
+
+#define RK805_IRQ_PWRON_RISE_MSK BIT(0)
+#define RK805_IRQ_VB_LOW_MSK BIT(1)
+#define RK805_IRQ_PWRON_MSK BIT(2)
+#define RK805_IRQ_PWRON_LP_MSK BIT(3)
+#define RK805_IRQ_HOTDIE_MSK BIT(4)
+#define RK805_IRQ_RTC_ALARM_MSK BIT(5)
+#define RK805_IRQ_RTC_PERIOD_MSK BIT(6)
+#define RK805_IRQ_PWRON_FALL_MSK BIT(7)
+
+#define RK805_PWR_RISE_INT_STATUS BIT(0)
+#define RK805_VB_LOW_INT_STATUS BIT(1)
+#define RK805_PWRON_INT_STATUS BIT(2)
+#define RK805_PWRON_LP_INT_STATUS BIT(3)
+#define RK805_HOTDIE_INT_STATUS BIT(4)
+#define RK805_ALARM_INT_STATUS BIT(5)
+#define RK805_PERIOD_INT_STATUS BIT(6)
+#define RK805_PWR_FALL_INT_STATUS BIT(7)
+
+#define RK805_BUCK1_2_ILMAX_MASK (3 << 6)
+#define RK805_BUCK3_4_ILMAX_MASK (3 << 3)
+#define RK805_RTC_PERIOD_INT_MASK (1 << 6)
+#define RK805_RTC_ALARM_INT_MASK (1 << 5)
+#define RK805_INT_ALARM_EN (1 << 3)
+#define RK805_INT_TIMER_EN (1 << 2)
+
/* RK808 IRQ Definitions */
#define RK808_IRQ_VOUT_LO 0
#define RK808_IRQ_VB_LO 1
@@ -298,6 +389,14 @@ enum rk818_reg {
#define VOUT_LO_INT BIT(0)
#define CLK32KOUT2_EN BIT(0)
+#define TEMP115C 0x0c
+#define TEMP_HOTDIE_MSK 0x0c
+#define SLP_SD_MSK (0x3 << 2)
+#define SHUTDOWN_FUN (0x2 << 2)
+#define SLEEP_FUN (0x1 << 2)
+#define RK8XX_ID_MSK 0xfff0
+#define FPWM_MODE BIT(7)
+
enum {
BUCK_ILMIN_50MA,
BUCK_ILMIN_100MA,
@@ -321,6 +420,28 @@ enum {
};
enum {
+ RK805_BUCK1_2_ILMAX_2500MA,
+ RK805_BUCK1_2_ILMAX_3000MA,
+ RK805_BUCK1_2_ILMAX_3500MA,
+ RK805_BUCK1_2_ILMAX_4000MA,
+};
+
+enum {
+ RK805_BUCK3_ILMAX_1500MA,
+ RK805_BUCK3_ILMAX_2000MA,
+ RK805_BUCK3_ILMAX_2500MA,
+ RK805_BUCK3_ILMAX_3000MA,
+};
+
+enum {
+ RK805_BUCK4_ILMAX_2000MA,
+ RK805_BUCK4_ILMAX_2500MA,
+ RK805_BUCK4_ILMAX_3000MA,
+ RK805_BUCK4_ILMAX_3500MA,
+};
+
+enum {
+ RK805_ID = 0x8050,
RK808_ID = 0x0000,
RK818_ID = 0x8181,
};
diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h
index e5a6cdeb77db..d61bc58aba8a 100644
--- a/include/linux/mfd/rn5t618.h
+++ b/include/linux/mfd/rn5t618.h
@@ -226,11 +226,17 @@ enum {
RN5T618_DCDC2,
RN5T618_DCDC3,
RN5T618_DCDC4,
+ RN5T618_DCDC5,
RN5T618_LDO1,
RN5T618_LDO2,
RN5T618_LDO3,
RN5T618_LDO4,
RN5T618_LDO5,
+ RN5T618_LDO6,
+ RN5T618_LDO7,
+ RN5T618_LDO8,
+ RN5T618_LDO9,
+ RN5T618_LDO10,
RN5T618_LDORTC1,
RN5T618_LDORTC2,
RN5T618_REG_NUM,
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h
new file mode 100644
index 000000000000..77c7cf40d9b4
--- /dev/null
+++ b/include/linux/mfd/stm32-lptimer.h
@@ -0,0 +1,62 @@
+/*
+ * STM32 Low-Power Timer parent driver.
+ *
+ * Copyright (C) STMicroelectronics 2017
+ *
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>
+ *
+ * Inspired by Benjamin Gaignard's stm32-timers driver
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _LINUX_STM32_LPTIMER_H_
+#define _LINUX_STM32_LPTIMER_H_
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+
+#define STM32_LPTIM_ISR 0x00 /* Interrupt and Status Reg */
+#define STM32_LPTIM_ICR 0x04 /* Interrupt Clear Reg */
+#define STM32_LPTIM_IER 0x08 /* Interrupt Enable Reg */
+#define STM32_LPTIM_CFGR 0x0C /* Configuration Reg */
+#define STM32_LPTIM_CR 0x10 /* Control Reg */
+#define STM32_LPTIM_CMP 0x14 /* Compare Reg */
+#define STM32_LPTIM_ARR 0x18 /* Autoreload Reg */
+#define STM32_LPTIM_CNT 0x1C /* Counter Reg */
+
+/* STM32_LPTIM_ISR - bit fields */
+#define STM32_LPTIM_CMPOK_ARROK GENMASK(4, 3)
+#define STM32_LPTIM_ARROK BIT(4)
+#define STM32_LPTIM_CMPOK BIT(3)
+
+/* STM32_LPTIM_ICR - bit fields */
+#define STM32_LPTIM_CMPOKCF_ARROKCF GENMASK(4, 3)
+
+/* STM32_LPTIM_CR - bit fields */
+#define STM32_LPTIM_CNTSTRT BIT(2)
+#define STM32_LPTIM_ENABLE BIT(0)
+
+/* STM32_LPTIM_CFGR - bit fields */
+#define STM32_LPTIM_ENC BIT(24)
+#define STM32_LPTIM_COUNTMODE BIT(23)
+#define STM32_LPTIM_WAVPOL BIT(21)
+#define STM32_LPTIM_PRESC GENMASK(11, 9)
+#define STM32_LPTIM_CKPOL GENMASK(2, 1)
+
+/* STM32_LPTIM_ARR */
+#define STM32_LPTIM_MAX_ARR 0xFFFF
+
+/**
+ * struct stm32_lptimer - STM32 Low-Power Timer data assigned by parent device
+ * @clk: clock reference for this instance
+ * @regmap: register map reference for this instance
+ * @has_encoder: indicates this Low-Power Timer supports encoder mode
+ */
+struct stm32_lptimer {
+ struct clk *clk;
+ struct regmap *regmap;
+ bool has_encoder;
+};
+
+#endif
diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h
index afa266169800..7a367f34b66a 100644
--- a/include/linux/mfd/syscon/atmel-smc.h
+++ b/include/linux/mfd/syscon/atmel-smc.h
@@ -15,21 +15,26 @@
#define _LINUX_MFD_SYSCON_ATMEL_SMC_H_
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#define ATMEL_SMC_SETUP(cs) (((cs) * 0x10))
-#define ATMEL_HSMC_SETUP(cs) (0x600 + ((cs) * 0x14))
+#define ATMEL_HSMC_SETUP(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14))
#define ATMEL_SMC_PULSE(cs) (((cs) * 0x10) + 0x4)
-#define ATMEL_HSMC_PULSE(cs) (0x600 + ((cs) * 0x14) + 0x4)
+#define ATMEL_HSMC_PULSE(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x4)
#define ATMEL_SMC_CYCLE(cs) (((cs) * 0x10) + 0x8)
-#define ATMEL_HSMC_CYCLE(cs) (0x600 + ((cs) * 0x14) + 0x8)
+#define ATMEL_HSMC_CYCLE(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x8)
#define ATMEL_SMC_NWE_SHIFT 0
#define ATMEL_SMC_NCS_WR_SHIFT 8
#define ATMEL_SMC_NRD_SHIFT 16
#define ATMEL_SMC_NCS_RD_SHIFT 24
#define ATMEL_SMC_MODE(cs) (((cs) * 0x10) + 0xc)
-#define ATMEL_HSMC_MODE(cs) (0x600 + ((cs) * 0x14) + 0x10)
+#define ATMEL_HSMC_MODE(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14) + 0x10)
#define ATMEL_SMC_MODE_READMODE_MASK BIT(0)
#define ATMEL_SMC_MODE_READMODE_NCS (0 << 0)
#define ATMEL_SMC_MODE_READMODE_NRD (1 << 0)
@@ -59,7 +64,8 @@
#define ATMEL_SMC_MODE_PS_16 (2 << 28)
#define ATMEL_SMC_MODE_PS_32 (3 << 28)
-#define ATMEL_HSMC_TIMINGS(cs) (0x600 + ((cs) * 0x14) + 0xc)
+#define ATMEL_HSMC_TIMINGS(layout, cs) \
+ ((layout)->timing_regs_offset + ((cs) * 0x14) + 0xc)
#define ATMEL_HSMC_TIMINGS_OCMS BIT(12)
#define ATMEL_HSMC_TIMINGS_RBNSEL(x) ((x) << 28)
#define ATMEL_HSMC_TIMINGS_NFSEL BIT(31)
@@ -69,6 +75,10 @@
#define ATMEL_HSMC_TIMINGS_TRR_SHIFT 16
#define ATMEL_HSMC_TIMINGS_TWB_SHIFT 24
+struct atmel_hsmc_reg_layout {
+ unsigned int timing_regs_offset;
+};
+
/**
* struct atmel_smc_cs_conf - SMC CS config as described in the datasheet.
* @setup: NCS/NWE/NRD setup timings (not applicable to at91rm9200)
@@ -98,11 +108,15 @@ int atmel_smc_cs_conf_set_cycle(struct atmel_smc_cs_conf *conf,
unsigned int shift, unsigned int ncycles);
void atmel_smc_cs_conf_apply(struct regmap *regmap, int cs,
const struct atmel_smc_cs_conf *conf);
-void atmel_hsmc_cs_conf_apply(struct regmap *regmap, int cs,
- const struct atmel_smc_cs_conf *conf);
+void atmel_hsmc_cs_conf_apply(struct regmap *regmap,
+ const struct atmel_hsmc_reg_layout *reglayout,
+ int cs, const struct atmel_smc_cs_conf *conf);
void atmel_smc_cs_conf_get(struct regmap *regmap, int cs,
struct atmel_smc_cs_conf *conf);
-void atmel_hsmc_cs_conf_get(struct regmap *regmap, int cs,
- struct atmel_smc_cs_conf *conf);
+void atmel_hsmc_cs_conf_get(struct regmap *regmap,
+ const struct atmel_hsmc_reg_layout *reglayout,
+ int cs, struct atmel_smc_cs_conf *conf);
+const struct atmel_hsmc_reg_layout *
+atmel_hsmc_get_reg_layout(struct device_node *np);
#endif /* _LINUX_MFD_SYSCON_ATMEL_SMC_H_ */
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 26e8f8c0a6db..15646740e2a8 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -107,6 +107,9 @@
*/
#define TMIO_MMC_CLK_ACTUAL BIT(10)
+/* Some controllers have a CBSY bit */
+#define TMIO_MMC_HAVE_CBSY BIT(11)
+
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
@@ -128,6 +131,8 @@ struct tmio_mmc_data {
unsigned int cd_gpio;
int alignment_shift;
dma_addr_t dma_rx_offset;
+ unsigned int max_blk_count;
+ unsigned short max_segs;
void (*set_pwr)(struct platform_device *host, int state);
void (*set_clk_div)(struct platform_device *host, int state);
};
@@ -139,6 +144,7 @@ struct tmio_nand_data {
struct nand_bbt_descr *badblock_pattern;
struct mtd_partition *partition;
unsigned int num_partitions;
+ const char *const *part_parsers;
};
#define FBIO_TMIO_ACC_WRITE 0x7C639300
diff --git a/include/linux/i2c/tps65010.h b/include/linux/mfd/tps65010.h
index 08aa92278d71..a1fb9bc5311d 100644
--- a/include/linux/i2c/tps65010.h
+++ b/include/linux/mfd/tps65010.h
@@ -1,4 +1,4 @@
-/* linux/i2c/tps65010.h
+/* linux/mfd/tps65010.h
*
* Functions to access TPS65010 power management device.
*
diff --git a/include/linux/mfd/tps68470.h b/include/linux/mfd/tps68470.h
new file mode 100644
index 000000000000..44f9d9f647ed
--- /dev/null
+++ b/include/linux/mfd/tps68470.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2017 Intel Corporation
+ *
+ * Functions to access TPS68470 power management chip.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MFD_TPS68470_H
+#define __LINUX_MFD_TPS68470_H
+
+/* Register addresses */
+#define TPS68470_REG_POSTDIV2 0x06
+#define TPS68470_REG_BOOSTDIV 0x07
+#define TPS68470_REG_BUCKDIV 0x08
+#define TPS68470_REG_PLLSWR 0x09
+#define TPS68470_REG_XTALDIV 0x0A
+#define TPS68470_REG_PLLDIV 0x0B
+#define TPS68470_REG_POSTDIV 0x0C
+#define TPS68470_REG_PLLCTL 0x0D
+#define TPS68470_REG_PLLCTL2 0x0E
+#define TPS68470_REG_CLKCFG1 0x0F
+#define TPS68470_REG_CLKCFG2 0x10
+#define TPS68470_REG_GPCTL0A 0x14
+#define TPS68470_REG_GPCTL0B 0x15
+#define TPS68470_REG_GPCTL1A 0x16
+#define TPS68470_REG_GPCTL1B 0x17
+#define TPS68470_REG_GPCTL2A 0x18
+#define TPS68470_REG_GPCTL2B 0x19
+#define TPS68470_REG_GPCTL3A 0x1A
+#define TPS68470_REG_GPCTL3B 0x1B
+#define TPS68470_REG_GPCTL4A 0x1C
+#define TPS68470_REG_GPCTL4B 0x1D
+#define TPS68470_REG_GPCTL5A 0x1E
+#define TPS68470_REG_GPCTL5B 0x1F
+#define TPS68470_REG_GPCTL6A 0x20
+#define TPS68470_REG_GPCTL6B 0x21
+#define TPS68470_REG_SGPO 0x22
+#define TPS68470_REG_GPDI 0x26
+#define TPS68470_REG_GPDO 0x27
+#define TPS68470_REG_VCMVAL 0x3C
+#define TPS68470_REG_VAUX1VAL 0x3D
+#define TPS68470_REG_VAUX2VAL 0x3E
+#define TPS68470_REG_VIOVAL 0x3F
+#define TPS68470_REG_VSIOVAL 0x40
+#define TPS68470_REG_VAVAL 0x41
+#define TPS68470_REG_VDVAL 0x42
+#define TPS68470_REG_S_I2C_CTL 0x43
+#define TPS68470_REG_VCMCTL 0x44
+#define TPS68470_REG_VAUX1CTL 0x45
+#define TPS68470_REG_VAUX2CTL 0x46
+#define TPS68470_REG_VACTL 0x47
+#define TPS68470_REG_VDCTL 0x48
+#define TPS68470_REG_RESET 0x50
+#define TPS68470_REG_REVID 0xFF
+
+#define TPS68470_REG_MAX TPS68470_REG_REVID
+
+/* Register field definitions */
+
+#define TPS68470_REG_RESET_MASK GENMASK(7, 0)
+#define TPS68470_VAVAL_AVOLT_MASK GENMASK(6, 0)
+
+#define TPS68470_VDVAL_DVOLT_MASK GENMASK(5, 0)
+#define TPS68470_VCMVAL_VCVOLT_MASK GENMASK(6, 0)
+#define TPS68470_VIOVAL_IOVOLT_MASK GENMASK(6, 0)
+#define TPS68470_VSIOVAL_IOVOLT_MASK GENMASK(6, 0)
+#define TPS68470_VAUX1VAL_AUX1VOLT_MASK GENMASK(6, 0)
+#define TPS68470_VAUX2VAL_AUX2VOLT_MASK GENMASK(6, 0)
+
+#define TPS68470_VACTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_VDCTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_VCMCTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_S_I2C_CTL_EN_MASK GENMASK(1, 0)
+#define TPS68470_VAUX1CTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_VAUX2CTL_EN_MASK GENMASK(0, 0)
+#define TPS68470_PLL_EN_MASK GENMASK(0, 0)
+
+#define TPS68470_CLKCFG1_MODE_A_MASK GENMASK(1, 0)
+#define TPS68470_CLKCFG1_MODE_B_MASK GENMASK(3, 2)
+
+#define TPS68470_GPIO_CTL_REG_A(x) (TPS68470_REG_GPCTL0A + (x) * 2)
+#define TPS68470_GPIO_CTL_REG_B(x) (TPS68470_REG_GPCTL0B + (x) * 2)
+#define TPS68470_GPIO_MODE_MASK GENMASK(1, 0)
+#define TPS68470_GPIO_MODE_IN 0
+#define TPS68470_GPIO_MODE_IN_PULLUP 1
+#define TPS68470_GPIO_MODE_OUT_CMOS 2
+#define TPS68470_GPIO_MODE_OUT_ODRAIN 3
+
+#endif /* __LINUX_MFD_TPS68470_H */
diff --git a/include/linux/i2c/twl.h b/include/linux/mfd/twl.h
index 9ad7828d9d34..9ad7828d9d34 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/mfd/twl.h
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 3e0d405dc842..643c7ae7d7b4 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -35,15 +35,28 @@ static inline struct page *new_page_nodemask(struct page *page,
int preferred_nid, nodemask_t *nodemask)
{
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
+ unsigned int order = 0;
+ struct page *new_page = NULL;
if (PageHuge(page))
return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
preferred_nid, nodemask);
+ if (thp_migration_supported() && PageTransHuge(page)) {
+ order = HPAGE_PMD_ORDER;
+ gfp_mask |= GFP_TRANSHUGE;
+ }
+
if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
gfp_mask |= __GFP_HIGHMEM;
- return __alloc_pages_nodemask(gfp_mask, 0, preferred_nid, nodemask);
+ new_page = __alloc_pages_nodemask(gfp_mask, order,
+ preferred_nid, nodemask);
+
+ if (new_page && PageTransHuge(page))
+ prep_transhuge_page(new_page);
+
+ return new_page;
}
#ifdef CONFIG_MIGRATION
@@ -59,6 +72,7 @@ extern void putback_movable_page(struct page *page);
extern int migrate_prep(void);
extern int migrate_prep_local(void);
+extern void migrate_page_states(struct page *newpage, struct page *page);
extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
@@ -79,6 +93,10 @@ static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; }
+static inline void migrate_page_states(struct page *newpage, struct page *page)
+{
+}
+
static inline void migrate_page_copy(struct page *newpage,
struct page *page) {}
@@ -138,4 +156,136 @@ static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
}
#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
+
+#ifdef CONFIG_MIGRATION
+
+/*
+ * Watch out for PAE architecture, which has an unsigned long, and might not
+ * have enough bits to store all physical address and flags. So far we have
+ * enough room for all our flags.
+ */
+#define MIGRATE_PFN_VALID (1UL << 0)
+#define MIGRATE_PFN_MIGRATE (1UL << 1)
+#define MIGRATE_PFN_LOCKED (1UL << 2)
+#define MIGRATE_PFN_WRITE (1UL << 3)
+#define MIGRATE_PFN_DEVICE (1UL << 4)
+#define MIGRATE_PFN_ERROR (1UL << 5)
+#define MIGRATE_PFN_SHIFT 6
+
+static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
+{
+ if (!(mpfn & MIGRATE_PFN_VALID))
+ return NULL;
+ return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
+}
+
+static inline unsigned long migrate_pfn(unsigned long pfn)
+{
+ return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
+}
+
+/*
+ * struct migrate_vma_ops - migrate operation callback
+ *
+ * @alloc_and_copy: alloc destination memory and copy source memory to it
+ * @finalize_and_map: allow caller to map the successfully migrated pages
+ *
+ *
+ * The alloc_and_copy() callback happens once all source pages have been locked,
+ * unmapped and checked (checked whether pinned or not). All pages that can be
+ * migrated will have an entry in the src array set with the pfn value of the
+ * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other
+ * flags might be set but should be ignored by the callback).
+ *
+ * The alloc_and_copy() callback can then allocate destination memory and copy
+ * source memory to it for all those entries (ie with MIGRATE_PFN_VALID and
+ * MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the
+ * callback must update each corresponding entry in the dst array with the pfn
+ * value of the destination page and with the MIGRATE_PFN_VALID and
+ * MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages
+ * locked, via lock_page()).
+ *
+ * At this point the alloc_and_copy() callback is done and returns.
+ *
+ * Note that the callback does not have to migrate all the pages that are
+ * marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration
+ * from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also
+ * set in the src array entry). If the device driver cannot migrate a device
+ * page back to system memory, then it must set the corresponding dst array
+ * entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to
+ * access any of the virtual addresses originally backed by this page. Because
+ * a SIGBUS is such a severe result for the userspace process, the device
+ * driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an
+ * unrecoverable state.
+ *
+ * For empty entry inside CPU page table (pte_none() or pmd_none() is true) we
+ * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
+ * allowing device driver to allocate device memory for those unback virtual
+ * address. For this the device driver simply have to allocate device memory
+ * and properly set the destination entry like for regular migration. Note that
+ * this can still fails and thus inside the device driver must check if the
+ * migration was successful for those entry inside the finalize_and_map()
+ * callback just like for regular migration.
+ *
+ * THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES
+ * OR BAD THINGS WILL HAPPEN !
+ *
+ *
+ * The finalize_and_map() callback happens after struct page migration from
+ * source to destination (destination struct pages are the struct pages for the
+ * memory allocated by the alloc_and_copy() callback). Migration can fail, and
+ * thus the finalize_and_map() allows the driver to inspect which pages were
+ * successfully migrated, and which were not. Successfully migrated pages will
+ * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
+ *
+ * It is safe to update device page table from within the finalize_and_map()
+ * callback because both destination and source page are still locked, and the
+ * mmap_sem is held in read mode (hence no one can unmap the range being
+ * migrated).
+ *
+ * Once callback is done cleaning up things and updating its page table (if it
+ * chose to do so, this is not an obligation) then it returns. At this point,
+ * the HMM core will finish up the final steps, and the migration is complete.
+ *
+ * THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY
+ * ENTRIES OR BAD THINGS WILL HAPPEN !
+ */
+struct migrate_vma_ops {
+ void (*alloc_and_copy)(struct vm_area_struct *vma,
+ const unsigned long *src,
+ unsigned long *dst,
+ unsigned long start,
+ unsigned long end,
+ void *private);
+ void (*finalize_and_map)(struct vm_area_struct *vma,
+ const unsigned long *src,
+ const unsigned long *dst,
+ unsigned long start,
+ unsigned long end,
+ void *private);
+};
+
+#if defined(CONFIG_MIGRATE_VMA_HELPER)
+int migrate_vma(const struct migrate_vma_ops *ops,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ unsigned long *src,
+ unsigned long *dst,
+ void *private);
+#else
+static inline int migrate_vma(const struct migrate_vma_ops *ops,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ unsigned long *src,
+ unsigned long *dst,
+ void *private)
+{
+ return -EINVAL;
+}
+#endif /* IS_ENABLED(CONFIG_MIGRATE_VMA_HELPER) */
+
+#endif /* CONFIG_MIGRATION */
+
#endif /* _LINUX_MIGRATE_H */
diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
index ebf3d89a3919..bdf66af9b937 100644
--- a/include/linux/migrate_mode.h
+++ b/include/linux/migrate_mode.h
@@ -6,11 +6,16 @@
* on most operations but not ->writepage as the potential stall time
* is too significant
* MIGRATE_SYNC will block when migrating pages
+ * MIGRATE_SYNC_NO_COPY will block when migrating pages but will not copy pages
+ * with the CPU. Instead, page copy happens outside the migratepage()
+ * callback and is likely using a DMA engine. See migrate_vma() and HMM
+ * (mm/hmm.c) for users of this mode.
*/
enum migrate_mode {
MIGRATE_ASYNC,
MIGRATE_SYNC_LIGHT,
MIGRATE_SYNC,
+ MIGRATE_SYNC_NO_COPY,
};
#endif /* MIGRATE_MODE_H_INCLUDED */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index b54517c05e9a..b0a57e043fa3 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -224,6 +224,7 @@ enum {
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36,
MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37,
+ MLX4_DEV_CAP_FLAG2_USER_MAC_EN = 1ULL << 38,
};
enum {
@@ -428,6 +429,12 @@ enum mlx4_steer_type {
MLX4_NUM_STEERS
};
+enum mlx4_resource_usage {
+ MLX4_RES_USAGE_NONE,
+ MLX4_RES_USAGE_DRIVER,
+ MLX4_RES_USAGE_USER_VERBS,
+};
+
enum {
MLX4_NUM_FEXCH = 64 * 1024,
};
@@ -518,6 +525,14 @@ struct mlx4_phys_caps {
u32 base_tunnel_sqpn;
};
+struct mlx4_spec_qps {
+ u32 qp0_qkey;
+ u32 qp0_proxy;
+ u32 qp0_tunnel;
+ u32 qp1_proxy;
+ u32 qp1_tunnel;
+};
+
struct mlx4_caps {
u64 fw_ver;
u32 function;
@@ -547,11 +562,7 @@ struct mlx4_caps {
int max_qp_init_rdma;
int max_qp_dest_rdma;
int max_tc_eth;
- u32 *qp0_qkey;
- u32 *qp0_proxy;
- u32 *qp1_proxy;
- u32 *qp0_tunnel;
- u32 *qp1_tunnel;
+ struct mlx4_spec_qps *spec_qps;
int num_srqs;
int max_srq_wqes;
int max_srq_sge;
@@ -749,6 +760,7 @@ struct mlx4_cq {
} tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
+ u8 usage;
};
struct mlx4_qp {
@@ -758,6 +770,7 @@ struct mlx4_qp {
atomic_t refcount;
struct completion free;
+ u8 usage;
};
struct mlx4_srq {
@@ -1121,7 +1134,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
unsigned vector, int collapsed, int timestamp_en);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
- int *base, u8 flags);
+ int *base, u8 flags, u8 usage);
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
@@ -1373,6 +1386,7 @@ int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
+int mlx4_SET_PORT_user_mac(struct mlx4_dev *dev, u8 port, u8 *user_mac);
int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu);
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc);
@@ -1418,7 +1432,7 @@ int mlx4_get_phys_port_id(struct mlx4_dev *dev);
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
-int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
+int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage);
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index f31a0b5377e1..e32dbc4934db 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -48,7 +48,7 @@
/* helper macros */
#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
-#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld)))
+#define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld))
#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
@@ -290,6 +290,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
+ MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
@@ -305,6 +306,10 @@ enum mlx5_event {
};
enum {
+ MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
+};
+
+enum {
MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
@@ -709,7 +714,7 @@ static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
return (cqe->op_own >> 2) & 0x3;
}
-static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
}
@@ -968,14 +973,13 @@ enum mlx5_cap_type {
MLX5_CAP_ATOMIC,
MLX5_CAP_ROCE,
MLX5_CAP_IPOIB_OFFLOADS,
- MLX5_CAP_EOIB_OFFLOADS,
+ MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
MLX5_CAP_RESERVED,
MLX5_CAP_VECTOR_CALC,
MLX5_CAP_QOS,
- MLX5_CAP_FPGA,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1011,6 +1015,10 @@ enum mlx5_mcam_feature_groups {
MLX5_GET(per_protocol_networking_offload_caps,\
mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap)
+
#define MLX5_CAP_ROCE(mdev, cap) \
MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
@@ -1101,10 +1109,10 @@ enum mlx5_mcam_feature_groups {
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
#define MLX5_CAP_FPGA(mdev, cap) \
- MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
+ MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
#define MLX5_CAP64_FPGA(mdev, cap) \
- MLX5_GET64(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap)
+ MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
enum {
MLX5_CMD_STAT_OK = 0x0,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index df6ce59a1f95..401c8972cc3a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -162,6 +162,13 @@ enum dbg_rsc_type {
MLX5_DBG_RSC_CQ,
};
+enum port_state_policy {
+ MLX5_POLICY_DOWN = 0,
+ MLX5_POLICY_UP = 1,
+ MLX5_POLICY_FOLLOW = 2,
+ MLX5_POLICY_INVALID = 0xffffffff
+};
+
struct mlx5_field_desc {
struct dentry *dent;
int i;
@@ -185,6 +192,7 @@ enum mlx5_dev_event {
MLX5_DEV_EVENT_GUID_CHANGE,
MLX5_DEV_EVENT_CLIENT_REREG,
MLX5_DEV_EVENT_PPS,
+ MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT,
};
enum mlx5_port_status {
@@ -291,7 +299,7 @@ struct mlx5_cmd {
struct semaphore pages_sem;
int mode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
- struct pci_pool *pool;
+ struct dma_pool *pool;
struct mlx5_cmd_debug dbg;
struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
@@ -410,6 +418,7 @@ enum mlx5_res_type {
MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
MLX5_RES_SRQ = 3,
MLX5_RES_XSRQ = 4,
+ MLX5_RES_XRQ = 5,
};
struct mlx5_core_rsc_common {
@@ -525,6 +534,9 @@ struct mlx5_mkey_table {
struct mlx5_vf_context {
int enabled;
+ u64 port_guid;
+ u64 node_guid;
+ enum port_state_policy policy;
};
struct mlx5_core_sriov {
@@ -534,7 +546,6 @@ struct mlx5_core_sriov {
};
struct mlx5_irq_info {
- cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
};
@@ -550,6 +561,7 @@ struct mlx5_fc_stats {
unsigned long sampling_interval; /* jiffies */
};
+struct mlx5_mpfs;
struct mlx5_eswitch;
struct mlx5_lag;
struct mlx5_pagefault;
@@ -597,7 +609,6 @@ struct mlx5_port_module_event_stats {
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
- struct msix_entry *msix_arr;
struct mlx5_irq_info *irq_info;
/* pages stuff */
@@ -646,7 +657,11 @@ struct mlx5_priv {
struct list_head ctx_list;
spinlock_t ctx_lock;
+ struct list_head waiting_events_list;
+ bool is_accum_events;
+
struct mlx5_flow_steering *steering;
+ struct mlx5_mpfs *mpfs;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
@@ -673,9 +688,7 @@ enum mlx5_device_state {
};
enum mlx5_interface_state {
- MLX5_INTERFACE_STATE_DOWN = BIT(0),
- MLX5_INTERFACE_STATE_UP = BIT(1),
- MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
+ MLX5_INTERFACE_STATE_UP = BIT(0),
};
enum mlx5_pci_status {
@@ -761,6 +774,7 @@ struct mlx5_core_dev {
u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
+ u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
} caps;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
@@ -842,13 +856,6 @@ struct mlx5_pas {
u8 log_sz;
};
-enum port_state_policy {
- MLX5_POLICY_DOWN = 0,
- MLX5_POLICY_UP = 1,
- MLX5_POLICY_FOLLOW = 2,
- MLX5_POLICY_INVALID = 0xffffffff
-};
-
enum phy_port_state {
MLX5_AAA_111
};
@@ -885,8 +892,6 @@ static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
return buf->direct.buf + offset;
}
-extern struct workqueue_struct *mlx5_core_wq;
-
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
.struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
@@ -1091,7 +1096,7 @@ enum {
};
enum {
- MAX_UMR_CACHE_ENTRY = 20,
+ MR_CACHE_LAST_STD_ENTRY = 20,
MLX5_IMR_MTT_CACHE_ENTRY,
MLX5_IMR_KSM_CACHE_ENTRY,
MAX_MR_CACHE_ENTRIES
@@ -1185,4 +1190,10 @@ enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
+static inline const struct cpumask *
+mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
+{
+ return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector);
+}
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 3030121b4746..69772347f866 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -200,6 +200,7 @@ enum {
MLX5_CMD_OP_QUERY_SQ = 0x907,
MLX5_CMD_OP_CREATE_RQ = 0x908,
MLX5_CMD_OP_MODIFY_RQ = 0x909,
+ MLX5_CMD_OP_SET_DELAY_DROP_PARAMS = 0x910,
MLX5_CMD_OP_DESTROY_RQ = 0x90a,
MLX5_CMD_OP_QUERY_RQ = 0x90b,
MLX5_CMD_OP_CREATE_RMP = 0x90c,
@@ -294,8 +295,10 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 inner_tcp_dport[0x1];
u8 inner_tcp_flags[0x1];
u8 reserved_at_37[0x9];
+ u8 reserved_at_40[0x1a];
+ u8 bth_dst_qp[0x1];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_5b[0x25];
};
struct mlx5_ifc_flow_table_prop_layout_bits {
@@ -324,7 +327,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_80[0x18];
u8 log_max_destination[0x8];
- u8 reserved_at_a0[0x18];
+ u8 log_max_flow_counter[0x8];
+ u8 reserved_at_a8[0x10];
u8 log_max_flow[0x8];
u8 reserved_at_c0[0x40];
@@ -431,7 +435,9 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_100[0xc];
u8 inner_ipv6_flow_label[0x14];
- u8 reserved_at_120[0xe0];
+ u8 reserved_at_120[0x28];
+ u8 bth_dst_qp[0x18];
+ u8 reserved_at_160[0xa0];
};
struct mlx5_ifc_cmd_pas_bits {
@@ -599,10 +605,10 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 rss_ind_tbl_cap[0x4];
u8 reg_umr_sq[0x1];
u8 scatter_fcs[0x1];
- u8 reserved_at_1a[0x1];
+ u8 enhanced_multi_pkt_send_wqe[0x1];
u8 tunnel_lso_const_out_ip_id[0x1];
u8 reserved_at_1c[0x2];
- u8 tunnel_statless_gre[0x1];
+ u8 tunnel_stateless_gre[0x1];
u8 tunnel_stateless_vxlan[0x1];
u8 swp[0x1];
@@ -840,7 +846,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 retransmission_q_counters[0x1];
u8 reserved_at_183[0x1];
u8 modify_rq_counter_set_id[0x1];
- u8 reserved_at_185[0x1];
+ u8 rq_delay_drop[0x1];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
@@ -857,7 +863,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pcam_reg[0x1];
u8 local_ca_ack_delay[0x5];
u8 port_module_event[0x1];
- u8 reserved_at_1b1[0x1];
+ u8 enhanced_error_q_counters[0x1];
u8 ports_check[0x1];
u8 reserved_at_1b3[0x1];
u8 disable_link_up[0x1];
@@ -873,7 +879,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_tc[0x4];
u8 reserved_at_1d0[0x1];
u8 dcbx[0x1];
- u8 reserved_at_1d2[0x3];
+ u8 general_notification_event[0x1];
+ u8 reserved_at_1d3[0x2];
u8 fpga[0x1];
u8 rol_s[0x1];
u8 rol_g[0x1];
@@ -963,7 +970,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_2a0[0x10];
u8 max_wqe_sz_rq[0x10];
- u8 reserved_at_2c0[0x10];
+ u8 max_flow_counter_31_16[0x10];
u8 max_wqe_sz_sq_dc[0x10];
u8 reserved_at_2e0[0x7];
@@ -981,7 +988,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_340[0x8];
u8 log_max_flow_counter_bulk[0x8];
- u8 max_flow_counter[0x10];
+ u8 max_flow_counter_15_0[0x10];
u8 reserved_at_360[0x3];
@@ -1016,7 +1023,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1];
- u8 reserved_at_3e1[0xa];
+ u8 disable_local_lb[0x1];
+ u8 reserved_at_3e2[0x9];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
@@ -1071,9 +1079,7 @@ struct mlx5_ifc_dest_format_struct_bits {
};
struct mlx5_ifc_flow_counter_list_bits {
- u8 clear[0x1];
- u8 num_of_counters[0xf];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
u8 reserved_at_20[0x20];
};
@@ -1187,7 +1193,8 @@ struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
u8 reserved_at_c0[0x12];
u8 cnp_dscp[0x6];
- u8 reserved_at_d8[0x5];
+ u8 reserved_at_d8[0x4];
+ u8 cnp_prio_mode[0x1];
u8 cnp_802p_prio[0x3];
u8 reserved_at_e0[0x720];
@@ -1540,7 +1547,17 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
u8 port_transmit_wait_low[0x20];
- u8 reserved_at_40[0x780];
+ u8 reserved_at_40[0x100];
+
+ u8 rx_buffer_almost_full_high[0x20];
+
+ u8 rx_buffer_almost_full_low[0x20];
+
+ u8 rx_buffer_full_high[0x20];
+
+ u8 rx_buffer_full_low[0x20];
+
+ u8 reserved_at_1c0[0x600];
};
struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
@@ -1856,7 +1873,19 @@ struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
u8 crc_error_tlp[0x20];
- u8 reserved_at_140[0x680];
+ u8 tx_overflow_buffer_pkt_high[0x20];
+
+ u8 tx_overflow_buffer_pkt_low[0x20];
+
+ u8 outbound_stalled_reads[0x20];
+
+ u8 outbound_stalled_writes[0x20];
+
+ u8 outbound_stalled_reads_events[0x20];
+
+ u8 outbound_stalled_writes_events[0x20];
+
+ u8 reserved_at_200[0x5c0];
};
struct mlx5_ifc_cmd_inter_comp_event_bits {
@@ -2015,6 +2044,10 @@ enum {
};
enum {
+ MLX5_QPC_OFFLOAD_TYPE_RNDV = 0x1,
+};
+
+enum {
MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0,
MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1,
};
@@ -2057,7 +2090,8 @@ struct mlx5_ifc_qpc_bits {
u8 st[0x8];
u8 reserved_at_10[0x3];
u8 pm_state[0x2];
- u8 reserved_at_15[0x7];
+ u8 reserved_at_15[0x3];
+ u8 offload_type[0x4];
u8 end_padding_mode[0x2];
u8 reserved_at_1e[0x2];
@@ -2437,7 +2471,7 @@ struct mlx5_ifc_sqc_bits {
u8 cd_master[0x1];
u8 fre[0x1];
u8 flush_in_error_en[0x1];
- u8 reserved_at_4[0x1];
+ u8 allow_multi_pkt_send_wqe[0x1];
u8 min_wqe_inline_mode[0x3];
u8 state[0x4];
u8 reg_umr[0x1];
@@ -2515,7 +2549,7 @@ enum {
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
- u8 reserved_at_1[0x1];
+ u8 delay_drop_en[0x1];
u8 scatter_fcs[0x1];
u8 vsd[0x1];
u8 mem_rq_type[0x4];
@@ -2562,7 +2596,9 @@ struct mlx5_ifc_rmpc_bits {
struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_at_0[0x5];
u8 min_wqe_inline_mode[0x3];
- u8 reserved_at_8[0x17];
+ u8 reserved_at_8[0x15];
+ u8 disable_mc_local_lb[0x1];
+ u8 disable_uc_local_lb[0x1];
u8 roce_en[0x1];
u8 arm_change_event[0x1];
@@ -3000,7 +3036,7 @@ struct mlx5_ifc_xrqc_bits {
struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
- u8 reserved_at_180[0x880];
+ u8 reserved_at_180[0x280];
struct mlx5_ifc_wq_bits wq;
};
@@ -3947,7 +3983,47 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 local_ack_timeout_err[0x20];
- u8 reserved_at_320[0x4e0];
+ u8 reserved_at_320[0xa0];
+
+ u8 resp_local_length_error[0x20];
+
+ u8 req_local_length_error[0x20];
+
+ u8 resp_local_qp_error[0x20];
+
+ u8 local_operation_error[0x20];
+
+ u8 resp_local_protection[0x20];
+
+ u8 req_local_protection[0x20];
+
+ u8 resp_cqe_error[0x20];
+
+ u8 req_cqe_error[0x20];
+
+ u8 req_mw_binding[0x20];
+
+ u8 req_bad_response[0x20];
+
+ u8 req_remote_invalid_request[0x20];
+
+ u8 resp_remote_invalid_request[0x20];
+
+ u8 req_remote_access_errors[0x20];
+
+ u8 resp_remote_access_errors[0x20];
+
+ u8 req_remote_operation_errors[0x20];
+
+ u8 req_transport_retries_exceeded[0x20];
+
+ u8 cq_overflow[0x20];
+
+ u8 resp_cqe_flush_error[0x20];
+
+ u8 req_cqe_flush_error[0x20];
+
+ u8 reserved_at_620[0x1e0];
};
struct mlx5_ifc_query_q_counter_in_bits {
@@ -4403,8 +4479,7 @@ struct mlx5_ifc_query_flow_counter_in_bits {
u8 reserved_at_c1[0xf];
u8 num_of_counters[0x10];
- u8 reserved_at_e0[0x10];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
};
struct mlx5_ifc_query_esw_vport_context_out_bits {
@@ -5229,7 +5304,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_at_0[0x16];
+ u8 reserved_at_0[0x14];
+ u8 disable_uc_local_lb[0x1];
+ u8 disable_mc_local_lb[0x1];
u8 node_guid[0x1];
u8 port_guid[0x1];
u8 min_inline[0x1];
@@ -5847,6 +5924,28 @@ struct mlx5_ifc_destroy_rq_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_set_delay_drop_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 delay_drop_timeout[0x10];
+};
+
+struct mlx5_ifc_set_delay_drop_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_destroy_rmp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6272,8 +6371,7 @@ struct mlx5_ifc_dealloc_flow_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x10];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
u8 reserved_at_60[0x20];
};
@@ -7098,8 +7196,7 @@ struct mlx5_ifc_alloc_flow_counter_out_bits {
u8 syndrome[0x20];
- u8 reserved_at_40[0x10];
- u8 flow_counter_id[0x10];
+ u8 flow_counter_id[0x20];
u8 reserved_at_60[0x20];
};
@@ -7718,8 +7815,9 @@ struct mlx5_ifc_peir_reg_bits {
};
struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x7c];
+ u8 reserved_at_0[0x7b];
+ u8 rx_buffer_fullness_counters[0x1];
u8 ptys_connector_type[0x1];
u8 reserved_at_7d[0x1];
u8 ppcnt_discard_group[0x1];
@@ -7749,8 +7847,9 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x7d];
-
+ u8 reserved_at_0[0x7b];
+ u8 pcie_outbound_stalled[0x1];
+ u8 tx_overflow_buffer_pkt[0x1];
u8 mtpps_enh_out_per_adj[0x1];
u8 mtpps_fs[0x1];
u8 pcie_performance_group[0x1];
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index f378dc0e7eaf..66d19b611fe4 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -560,6 +560,9 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen);
+int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
+ u32 timeout_usec);
+
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
void mlx5_init_qp_table(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
index 1cde0fd53f90..24ff23e27c8a 100644
--- a/include/linux/mlx5/srq.h
+++ b/include/linux/mlx5/srq.h
@@ -38,6 +38,7 @@
enum {
MLX5_SRQ_FLAG_ERR = (1 << 0),
MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
+ MLX5_SRQ_FLAG_RNDV = (1 << 2),
};
struct mlx5_srq_attr {
@@ -56,6 +57,10 @@ struct mlx5_srq_attr {
u32 user_index;
u64 db_record;
__be64 *pas;
+ u32 tm_log_list_size;
+ u32 tm_next_tag;
+ u32 tm_hw_phase_cnt;
+ u32 tm_sw_phase_cnt;
};
struct mlx5_core_dev;
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 656c70b65dd2..aaa0bb9e7655 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -114,5 +114,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
u8 other_vport, u8 port_num,
int vf,
struct mlx5_hca_vport_context *req);
-
+int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable);
+int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 46b9ac5e8569..065d99deb847 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -23,6 +23,7 @@
#include <linux/page_ext.h>
#include <linux/err.h>
#include <linux/page_ref.h>
+#include <linux/memremap.h>
struct mempolicy;
struct anon_vma;
@@ -189,7 +190,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
-#define VM_ARCH_2 0x02000000
+#define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */
#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
#ifdef CONFIG_MEM_SOFT_DIRTY
@@ -208,10 +209,12 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
+#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
#if defined(CONFIG_X86)
@@ -235,9 +238,11 @@ extern unsigned int kobjsize(const void *objp);
# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
#endif
-#if defined(CONFIG_X86)
+#if defined(CONFIG_X86_INTEL_MPX)
/* MPX specific bounds table or bounds directory */
-# define VM_MPX VM_ARCH_2
+# define VM_MPX VM_HIGH_ARCH_4
+#else
+# define VM_MPX VM_NONE
#endif
#ifndef VM_GROWSUP
@@ -795,6 +800,28 @@ static inline bool is_zone_device_page(const struct page *page)
}
#endif
+#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
+void put_zone_device_private_or_public_page(struct page *page);
+DECLARE_STATIC_KEY_FALSE(device_private_key);
+#define IS_HMM_ENABLED static_branch_unlikely(&device_private_key)
+static inline bool is_device_private_page(const struct page *page);
+static inline bool is_device_public_page(const struct page *page);
+#else /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
+static inline void put_zone_device_private_or_public_page(struct page *page)
+{
+}
+#define IS_HMM_ENABLED 0
+static inline bool is_device_private_page(const struct page *page)
+{
+ return false;
+}
+static inline bool is_device_public_page(const struct page *page)
+{
+ return false;
+}
+#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
+
+
static inline void get_page(struct page *page)
{
page = compound_head(page);
@@ -810,6 +837,18 @@ static inline void put_page(struct page *page)
{
page = compound_head(page);
+ /*
+ * For private device pages we need to catch refcount transition from
+ * 2 to 1, when refcount reach one it means the private device page is
+ * free and we need to inform the device driver through callback. See
+ * include/linux/memremap.h and HMM for details.
+ */
+ if (IS_HMM_ENABLED && unlikely(is_device_private_page(page) ||
+ unlikely(is_device_public_page(page)))) {
+ put_zone_device_private_or_public_page(page);
+ return;
+ }
+
if (put_page_testzero(page))
__put_page(page);
}
@@ -1195,8 +1234,10 @@ struct zap_details {
pgoff_t last_index; /* Highest page->index to unmap */
};
-struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
- pte_t pte);
+struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte, bool with_public_device);
+#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)
+
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
@@ -1260,6 +1301,7 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+ unsigned long *start, unsigned long *end,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
@@ -1992,13 +2034,13 @@ extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
/* interval_tree.c */
void vma_interval_tree_insert(struct vm_area_struct *node,
- struct rb_root *root);
+ struct rb_root_cached *root);
void vma_interval_tree_insert_after(struct vm_area_struct *node,
struct vm_area_struct *prev,
- struct rb_root *root);
+ struct rb_root_cached *root);
void vma_interval_tree_remove(struct vm_area_struct *node,
- struct rb_root *root);
-struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
+ struct rb_root_cached *root);
+struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
unsigned long start, unsigned long last);
@@ -2008,11 +2050,12 @@ struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
vma; vma = vma_interval_tree_iter_next(vma, start, last))
void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
- struct rb_root *root);
+ struct rb_root_cached *root);
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
- struct rb_root *root);
-struct anon_vma_chain *anon_vma_interval_tree_iter_first(
- struct rb_root *root, unsigned long start, unsigned long last);
+ struct rb_root_cached *root);
+struct anon_vma_chain *
+anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
+ unsigned long start, unsigned long last);
struct anon_vma_chain *anon_vma_interval_tree_iter_next(
struct anon_vma_chain *node, unsigned long start, unsigned long last);
#ifdef CONFIG_DEBUG_VM_RB
@@ -2293,6 +2336,8 @@ int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
+int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
+ pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
@@ -2505,7 +2550,7 @@ enum mf_action_page_type {
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
extern void clear_huge_page(struct page *page,
- unsigned long addr,
+ unsigned long addr_hint,
unsigned int pages_per_huge_page);
extern void copy_user_huge_page(struct page *dst, struct page *src,
unsigned long addr, struct vm_area_struct *vma,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3cadee0a3508..46f4ecf5479a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -23,6 +23,7 @@
struct address_space;
struct mem_cgroup;
+struct hmm;
/*
* Each physical page in the system has a struct page associated with
@@ -335,6 +336,7 @@ struct vm_area_struct {
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
+ atomic_long_t swap_readahead_info;
#ifndef CONFIG_MMU
struct vm_region *vm_region; /* NOMMU mapping region */
#endif
@@ -502,6 +504,11 @@ struct mm_struct {
atomic_long_t hugetlb_usage;
#endif
struct work_struct async_put_work;
+
+#if IS_ENABLED(CONFIG_HMM)
+ /* HMM needs to track a few things per mm */
+ struct hmm *hmm;
+#endif
} __randomize_layout;
extern struct mm_struct init_mm;
@@ -526,26 +533,6 @@ extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
extern void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end);
-/*
- * Memory barriers to keep this state in sync are graciously provided by
- * the page table locks, outside of which no page table modifications happen.
- * The barriers are used to ensure the order between tlb_flush_pending updates,
- * which happen while the lock is not taken, and the PTE updates, which happen
- * while the lock is taken, are serialized.
- */
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
-{
- return atomic_read(&mm->tlb_flush_pending) > 0;
-}
-
-/*
- * Returns true if there are two above TLB batching threads in parallel.
- */
-static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
-{
- return atomic_read(&mm->tlb_flush_pending) > 1;
-}
-
static inline void init_tlb_flush_pending(struct mm_struct *mm)
{
atomic_set(&mm->tlb_flush_pending, 0);
@@ -554,27 +541,82 @@ static inline void init_tlb_flush_pending(struct mm_struct *mm)
static inline void inc_tlb_flush_pending(struct mm_struct *mm)
{
atomic_inc(&mm->tlb_flush_pending);
-
/*
- * Guarantee that the tlb_flush_pending increase does not leak into the
- * critical section updating the page tables
+ * The only time this value is relevant is when there are indeed pages
+ * to flush. And we'll only flush pages after changing them, which
+ * requires the PTL.
+ *
+ * So the ordering here is:
+ *
+ * atomic_inc(&mm->tlb_flush_pending);
+ * spin_lock(&ptl);
+ * ...
+ * set_pte_at();
+ * spin_unlock(&ptl);
+ *
+ * spin_lock(&ptl)
+ * mm_tlb_flush_pending();
+ * ....
+ * spin_unlock(&ptl);
+ *
+ * flush_tlb_range();
+ * atomic_dec(&mm->tlb_flush_pending);
+ *
+ * Where the increment if constrained by the PTL unlock, it thus
+ * ensures that the increment is visible if the PTE modification is
+ * visible. After all, if there is no PTE modification, nobody cares
+ * about TLB flushes either.
+ *
+ * This very much relies on users (mm_tlb_flush_pending() and
+ * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
+ * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
+ * locks (PPC) the unlock of one doesn't order against the lock of
+ * another PTL.
+ *
+ * The decrement is ordered by the flush_tlb_range(), such that
+ * mm_tlb_flush_pending() will not return false unless all flushes have
+ * completed.
*/
- smp_mb__before_spinlock();
}
-/* Clearing is done after a TLB flush, which also provides a barrier. */
static inline void dec_tlb_flush_pending(struct mm_struct *mm)
{
/*
- * Guarantee that the tlb_flush_pending does not not leak into the
- * critical section, since we must order the PTE change and changes to
- * the pending TLB flush indication. We could have relied on TLB flush
- * as a memory barrier, but this behavior is not clearly documented.
+ * See inc_tlb_flush_pending().
+ *
+ * This cannot be smp_mb__before_atomic() because smp_mb() simply does
+ * not order against TLB invalidate completion, which is what we need.
+ *
+ * Therefore we must rely on tlb_flush_*() to guarantee order.
*/
- smp_mb__before_atomic();
atomic_dec(&mm->tlb_flush_pending);
}
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+ /*
+ * Must be called after having acquired the PTL; orders against that
+ * PTLs release and therefore ensures that if we observe the modified
+ * PTE we must also observe the increment from inc_tlb_flush_pending().
+ *
+ * That is, it only guarantees to return true if there is a flush
+ * pending for _this_ PTL.
+ */
+ return atomic_read(&mm->tlb_flush_pending);
+}
+
+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+{
+ /*
+ * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
+ * for which there is a TLB flush pending in order to guarantee
+ * we've seen both that PTE modification and the increment.
+ *
+ * (no requirement on actually still holding the PTL, that is irrelevant)
+ */
+ return atomic_read(&mm->tlb_flush_pending) > 1;
+}
+
struct vm_fault;
struct vm_special_mapping {
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 46c73e97e61f..279b39008a33 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -29,8 +29,8 @@ struct mmc_csd {
unsigned char structure;
unsigned char mmca_vsn;
unsigned short cmdclass;
- unsigned short tacc_clks;
- unsigned int tacc_ns;
+ unsigned short taac_clks;
+ unsigned int taac_ns;
unsigned int c_size;
unsigned int r2w_factor;
unsigned int max_dtr;
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index a0c63ea28796..927519385482 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -122,11 +122,18 @@ struct mmc_data {
unsigned int timeout_clks; /* data timeout (in clocks) */
unsigned int blksz; /* data block size */
unsigned int blocks; /* number of blocks */
+ unsigned int blk_addr; /* block address */
int error; /* data error */
unsigned int flags;
-#define MMC_DATA_WRITE (1 << 8)
-#define MMC_DATA_READ (1 << 9)
+#define MMC_DATA_WRITE BIT(8)
+#define MMC_DATA_READ BIT(9)
+/* Extra flags used by CQE */
+#define MMC_DATA_QBR BIT(10) /* CQE queue barrier*/
+#define MMC_DATA_PRIO BIT(11) /* CQE high priority */
+#define MMC_DATA_REL_WR BIT(12) /* Reliable write */
+#define MMC_DATA_DAT_TAG BIT(13) /* Tag request */
+#define MMC_DATA_FORCED_PRG BIT(14) /* Forced programming */
unsigned int bytes_xfered;
@@ -149,18 +156,22 @@ struct mmc_request {
struct completion completion;
struct completion cmd_completion;
void (*done)(struct mmc_request *);/* completion function */
+ /*
+ * Notify uppers layers (e.g. mmc block driver) that recovery is needed
+ * due to an error associated with the mmc_request. Currently used only
+ * by CQE.
+ */
+ void (*recovery_notifier)(struct mmc_request *);
struct mmc_host *host;
/* Allow other commands during this ongoing data transfer or busy wait */
bool cap_cmd_during_tfr;
+
+ int tag;
};
struct mmc_card;
-struct mmc_async_req;
-struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
- struct mmc_async_req *areq,
- enum mmc_blk_status *ret_stat);
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
int retries);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index ebd1cebbef0c..9a43763a68ad 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -162,6 +162,50 @@ struct mmc_host_ops {
unsigned int direction, int blk_size);
};
+struct mmc_cqe_ops {
+ /* Allocate resources, and make the CQE operational */
+ int (*cqe_enable)(struct mmc_host *host, struct mmc_card *card);
+ /* Free resources, and make the CQE non-operational */
+ void (*cqe_disable)(struct mmc_host *host);
+ /*
+ * Issue a read, write or DCMD request to the CQE. Also deal with the
+ * effect of ->cqe_off().
+ */
+ int (*cqe_request)(struct mmc_host *host, struct mmc_request *mrq);
+ /* Free resources (e.g. DMA mapping) associated with the request */
+ void (*cqe_post_req)(struct mmc_host *host, struct mmc_request *mrq);
+ /*
+ * Prepare the CQE and host controller to accept non-CQ commands. There
+ * is no corresponding ->cqe_on(), instead ->cqe_request() is required
+ * to deal with that.
+ */
+ void (*cqe_off)(struct mmc_host *host);
+ /*
+ * Wait for all CQE tasks to complete. Return an error if recovery
+ * becomes necessary.
+ */
+ int (*cqe_wait_for_idle)(struct mmc_host *host);
+ /*
+ * Notify CQE that a request has timed out. Return false if the request
+ * completed or true if a timeout happened in which case indicate if
+ * recovery is needed.
+ */
+ bool (*cqe_timeout)(struct mmc_host *host, struct mmc_request *mrq,
+ bool *recovery_needed);
+ /*
+ * Stop all CQE activity and prepare the CQE and host controller to
+ * accept recovery commands.
+ */
+ void (*cqe_recovery_start)(struct mmc_host *host);
+ /*
+ * Clear the queue and call mmc_cqe_request_done() on all requests.
+ * Requests that errored will have the error set on the mmc_request
+ * (data->error or cmd->error for DCMD). Requests that did not error
+ * will have zero data bytes transferred.
+ */
+ void (*cqe_recovery_finish)(struct mmc_host *host);
+};
+
struct mmc_async_req {
/* active mmc request */
struct mmc_request *mrq;
@@ -272,7 +316,7 @@ struct mmc_host {
#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */
#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */
#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */
-#define MMC_CAP_NO_BOUNCE_BUFF (1 << 21) /* Disable bounce buffers on host */
+/* (1 << 21) is free for reuse */
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
@@ -291,10 +335,6 @@ struct mmc_host {
MMC_CAP2_HS200_1_2V_SDR)
#define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */
#define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */
-#define MMC_CAP2_PACKED_RD (1 << 12) /* Allow packed read */
-#define MMC_CAP2_PACKED_WR (1 << 13) /* Allow packed write */
-#define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
- MMC_CAP2_PACKED_WR)
#define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
#define MMC_CAP2_HS400_1_8V (1 << 15) /* Can support HS400 1.8V */
#define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */
@@ -307,6 +347,8 @@ struct mmc_host {
#define MMC_CAP2_HS400_ES (1 << 20) /* Host supports enhanced strobe */
#define MMC_CAP2_NO_SD (1 << 21) /* Do not send SD commands during initialization */
#define MMC_CAP2_NO_MMC (1 << 22) /* Do not send (e)MMC commands during initialization */
+#define MMC_CAP2_CQE (1 << 23) /* Has eMMC command queue engine */
+#define MMC_CAP2_CQE_DCMD (1 << 24) /* CQE can issue a direct command */
mmc_pm_flag_t pm_caps; /* supported pm features */
@@ -328,9 +370,6 @@ struct mmc_host {
unsigned int use_spi_crc:1;
unsigned int claimed:1; /* host exclusively claimed */
unsigned int bus_dead:1; /* bus has been released */
-#ifdef CONFIG_MMC_DEBUG
- unsigned int removed:1; /* host is being removed */
-#endif
unsigned int can_retune:1; /* re-tuning can be used */
unsigned int doing_retune:1; /* re-tuning in progress */
unsigned int retune_now:1; /* do re-tuning at next req */
@@ -393,6 +432,13 @@ struct mmc_host {
int dsr_req; /* DSR value is valid */
u32 dsr; /* optional driver stage (DSR) value */
+ /* Command Queue Engine (CQE) support */
+ const struct mmc_cqe_ops *cqe_ops;
+ void *cqe_private;
+ int cqe_qdepth;
+ bool cqe_enabled;
+ bool cqe_on;
+
unsigned long private[0] ____cacheline_aligned;
};
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index b733eb404ffc..abacd5484bc0 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -39,6 +39,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
+#define SDIO_DEVICE_ID_CYPRESS_4373 0x4373
#define SDIO_VENDOR_ID_INTEL 0x0089
#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index c91b3bcd158f..6866e8126982 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -95,17 +95,6 @@ struct mmu_notifier_ops {
pte_t pte);
/*
- * Before this is invoked any secondary MMU is still ok to
- * read/write to the page previously pointed to by the Linux
- * pte because the page hasn't been freed yet and it won't be
- * freed until this returns. If required set_page_dirty has to
- * be called internally to this method.
- */
- void (*invalidate_page)(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address);
-
- /*
* invalidate_range_start() and invalidate_range_end() must be
* paired and are called only when the mmap_sem and/or the
* locks protecting the reverse maps are held. If the subsystem
@@ -220,8 +209,6 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
unsigned long address, pte_t pte);
-extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
- unsigned long address);
extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end);
extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
@@ -268,13 +255,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
__mmu_notifier_change_pte(mm, address, pte);
}
-static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
- unsigned long address)
-{
- if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_page(mm, address);
-}
-
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
@@ -420,6 +400,11 @@ extern void mmu_notifier_synchronize(void);
#else /* CONFIG_MMU_NOTIFIER */
+static inline int mm_has_notifiers(struct mm_struct *mm)
+{
+ return 0;
+}
+
static inline void mmu_notifier_release(struct mm_struct *mm)
{
}
@@ -442,11 +427,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
{
}
-static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
- unsigned long address)
-{
-}
-
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index fc14b8b3f6ce..c8f89417740b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -114,6 +114,20 @@ struct zone_padding {
#define ZONE_PADDING(name)
#endif
+#ifdef CONFIG_NUMA
+enum numa_stat_item {
+ NUMA_HIT, /* allocated in intended node */
+ NUMA_MISS, /* allocated in non intended node */
+ NUMA_FOREIGN, /* was intended here, hit elsewhere */
+ NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
+ NUMA_LOCAL, /* allocation from local node */
+ NUMA_OTHER, /* allocation from other node */
+ NR_VM_NUMA_STAT_ITEMS
+};
+#else
+#define NR_VM_NUMA_STAT_ITEMS 0
+#endif
+
enum zone_stat_item {
/* First 128 byte cacheline (assuming 64 bit words) */
NR_FREE_PAGES,
@@ -132,14 +146,6 @@ enum zone_stat_item {
#if IS_ENABLED(CONFIG_ZSMALLOC)
NR_ZSPAGES, /* allocated in zsmalloc */
#endif
-#ifdef CONFIG_NUMA
- NUMA_HIT, /* allocated in intended node */
- NUMA_MISS, /* allocated in non intended node */
- NUMA_FOREIGN, /* was intended here, hit elsewhere */
- NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
- NUMA_LOCAL, /* allocation from local node */
- NUMA_OTHER, /* allocation from other node */
-#endif
NR_FREE_CMA_PAGES,
NR_VM_ZONE_STAT_ITEMS };
@@ -276,6 +282,7 @@ struct per_cpu_pageset {
struct per_cpu_pages pcp;
#ifdef CONFIG_NUMA
s8 expire;
+ u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
#endif
#ifdef CONFIG_SMP
s8 stat_threshold;
@@ -496,6 +503,7 @@ struct zone {
ZONE_PADDING(_pad3_)
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+ atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
} ____cacheline_internodealigned_in_smp;
enum pgdat_flags {
@@ -770,8 +778,7 @@ static inline bool is_dev_zone(const struct zone *zone)
#include <linux/memory_hotplug.h>
-extern struct mutex zonelists_mutex;
-void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
+void build_all_zonelists(pg_data_t *pgdat);
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
int classzone_idx, unsigned int alloc_flags,
@@ -896,7 +903,7 @@ int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
extern int numa_zonelist_order_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern char numa_zonelist_order[];
-#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
+#define NUMA_ZONELIST_ORDER_LEN 16
#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -1087,8 +1094,14 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
#error Allocator MAX_ORDER exceeds SECTION_SIZE
#endif
-#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
-#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
+static inline unsigned long pfn_to_section_nr(unsigned long pfn)
+{
+ return pfn >> PFN_SECTION_SHIFT;
+}
+static inline unsigned long section_nr_to_pfn(unsigned long sec)
+{
+ return sec << PFN_SECTION_SHIFT;
+}
#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 3f74ef2281e8..694cebb50f72 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -674,8 +674,6 @@ struct ulpi_device_id {
* struct fsl_mc_device_id - MC object device identifier
* @vendor: vendor ID
* @obj_type: MC object type
- * @ver_major: MC object version major number
- * @ver_minor: MC object version minor number
*
* Type of entries in the "device Id" table for MC object devices supported by
* a MC object device driver. The last entry of the table has vendor set to 0x0
diff --git a/include/linux/module.h b/include/linux/module.h
index e7bdd549e527..fe5aa3736707 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -209,7 +209,7 @@ extern void cleanup_module(void);
#ifdef MODULE
/* Creates an alias so file2alias.c can find device table. */
#define MODULE_DEVICE_TABLE(type, name) \
-extern const typeof(name) __mod_##type##__##name##_device_table \
+extern typeof(name) __mod_##type##__##name##_device_table \
__attribute__ ((unused, alias(__stringify(name))))
#else /* !MODULE */
#define MODULE_DEVICE_TABLE(type, name)
diff --git a/include/linux/msg.h b/include/linux/msg.h
index a001305f5a79..81263fe3f9dc 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -2,6 +2,7 @@
#define _LINUX_MSG_H
#include <linux/list.h>
+#include <linux/time64.h>
#include <uapi/linux/msg.h>
/* one msg_msg structure for each message */
@@ -17,9 +18,9 @@ struct msg_msg {
/* one msq_queue structure for each present queue on the system */
struct msg_queue {
struct kern_ipc_perm q_perm;
- time_t q_stime; /* last msgsnd time */
- time_t q_rtime; /* last msgrcv time */
- time_t q_ctime; /* last change time */
+ time64_t q_stime; /* last msgsnd time */
+ time64_t q_rtime; /* last msgrcv time */
+ time64_t q_ctime; /* last change time */
unsigned long q_cbytes; /* current number of bytes on queue */
unsigned long q_qnum; /* number of messages in queue */
unsigned long q_qbytes; /* max number of bytes on queue */
@@ -31,12 +32,4 @@ struct msg_queue {
struct list_head q_senders;
} __randomize_layout;
-/* Helper routines for sys_msgsnd and sys_msgrcv */
-extern long do_msgsnd(int msqid, long mtype, void __user *mtext,
- size_t msgsz, int msgflg);
-extern long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
- int msgflg,
- long (*msg_fill)(void __user *, struct msg_msg *,
- size_t));
-
#endif /* _LINUX_MSG_H */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index df6d59201d31..80e3b562bef6 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -66,6 +66,7 @@ struct fsl_mc_msi_desc {
* @mask_pos: [PCI MSI] Mask register position
* @mask_base: [PCI MSI-X] Mask register base address
* @platform: [platform] Platform device specific msi descriptor data
+ * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data
*/
struct msi_desc {
/* Shared device/bus type independent data */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index f8a2ef239c60..6cd0f6b7658b 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -206,6 +206,15 @@ struct mtd_pairing_scheme {
struct module; /* only needed for owner field in mtd_info */
+/**
+ * struct mtd_debug_info - debugging information for an MTD device.
+ *
+ * @dfs_dir: direntry object of the MTD device debugfs directory
+ */
+struct mtd_debug_info {
+ struct dentry *dfs_dir;
+};
+
struct mtd_info {
u_char type;
uint32_t flags;
@@ -346,6 +355,7 @@ struct mtd_info {
struct module *owner;
struct device dev;
int usecount;
+ struct mtd_debug_info dbg;
};
int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
diff --git a/include/linux/mtd/nand-gpio.h b/include/linux/mtd/nand-gpio.h
index 51534e50f7fc..be4f45d89be2 100644
--- a/include/linux/mtd/nand-gpio.h
+++ b/include/linux/mtd/nand-gpio.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_MTD_NAND_GPIO_H
#define __LINUX_MTD_NAND_GPIO_H
-#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
struct gpio_nand_platdata {
int gpio_nce;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/rawnand.h
index 5216d2eb2289..2b05f4273bab 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/rawnand.h
@@ -1,6 +1,4 @@
/*
- * linux/include/linux/mtd/nand.h
- *
* Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
* Steven J. Hill <sjhill@realitydiluted.com>
* Thomas Gleixner <tglx@linutronix.de>
@@ -15,8 +13,8 @@
* Changelog:
* See git changelog.
*/
-#ifndef __LINUX_MTD_NAND_H
-#define __LINUX_MTD_NAND_H
+#ifndef __LINUX_MTD_RAWNAND_H
+#define __LINUX_MTD_RAWNAND_H
#include <linux/wait.h>
#include <linux/spinlock.h>
@@ -44,12 +42,6 @@ void nand_release(struct mtd_info *mtd);
/* Internal helper for board drivers which need to override command function */
void nand_wait_ready(struct mtd_info *mtd);
-/* locks all blocks present in the device */
-int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
-
-/* unlocks specified locked blocks */
-int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
-
/* The maximum number of NAND chips in an array */
#define NAND_MAX_CHIPS 8
@@ -89,10 +81,6 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
#define NAND_CMD_SET_FEATURES 0xef
#define NAND_CMD_RESET 0xff
-#define NAND_CMD_LOCK 0x2a
-#define NAND_CMD_UNLOCK1 0x23
-#define NAND_CMD_UNLOCK2 0x24
-
/* Extended commands for large page devices */
#define NAND_CMD_READSTART 0x30
#define NAND_CMD_RNDOUTSTART 0xE0
@@ -449,14 +437,16 @@ struct nand_jedec_params {
__le16 crc;
} __packed;
+/* The maximum expected count of bytes in the NAND ID sequence */
+#define NAND_MAX_ID_LEN 8
+
/**
* struct nand_id - NAND id structure
- * @data: buffer containing the id bytes. Currently 8 bytes large, but can
- * be extended if required.
+ * @data: buffer containing the id bytes.
* @len: ID length.
*/
struct nand_id {
- u8 data[8];
+ u8 data[NAND_MAX_ID_LEN];
int len;
};
@@ -1028,8 +1018,6 @@ static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
#define NAND_MFR_ATO 0x9b
#define NAND_MFR_WINBOND 0xef
-/* The maximum expected count of bytes in the NAND ID sequence */
-#define NAND_MAX_ID_LEN 8
/*
* A helper for defining older NAND chips where the second ID byte fully
@@ -1246,6 +1234,8 @@ int onfi_init_data_interface(struct nand_chip *chip,
*/
static inline bool nand_is_slc(struct nand_chip *chip)
{
+ WARN(chip->bits_per_cell == 0,
+ "chip->bits_per_cell is used uninitialized\n");
return chip->bits_per_cell == 1;
}
@@ -1328,4 +1318,4 @@ void nand_cleanup(struct nand_chip *chip);
/* Default extended ID decoding function */
void nand_decode_ext_id(struct nand_chip *chip);
-#endif /* __LINUX_MTD_NAND_H */
+#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index 2251add65fa7..c759d403cbc0 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -22,7 +22,7 @@
#include <linux/completion.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/pm_qos.h>
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h
index 65e91d0fa981..e1845fc4afbd 100644
--- a/include/linux/mtd/sharpsl.h
+++ b/include/linux/mtd/sharpsl.h
@@ -8,7 +8,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
@@ -17,4 +17,5 @@ struct sharpsl_nand_platform_data {
const struct mtd_ooblayout_ops *ecc_layout;
struct mtd_partition *partitions;
unsigned int nr_partitions;
+ const char *const *part_parsers;
};
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 55faa2f07cca..1f0a7fc7772f 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -41,6 +41,8 @@
#define SPINOR_OP_WREN 0x06 /* Write enable */
#define SPINOR_OP_RDSR 0x05 /* Read status register */
#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */
+#define SPINOR_OP_RDSR2 0x3f /* Read status register 2 */
+#define SPINOR_OP_WRSR2 0x3e /* Write status register 2 */
#define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */
#define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */
#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual Output SPI) */
@@ -56,6 +58,7 @@
#define SPINOR_OP_CHIP_ERASE 0xc7 /* Erase whole flash chip */
#define SPINOR_OP_SE 0xd8 /* Sector erase (usually 64KiB) */
#define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */
+#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */
#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
@@ -102,6 +105,7 @@
/* Used for Spansion flashes only. */
#define SPINOR_OP_BRWR 0x17 /* Bank register write */
+#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
/* Used for Micron flashes only. */
#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */
@@ -116,6 +120,9 @@
#define SR_BP2 BIT(4) /* Block protect 2 */
#define SR_TB BIT(5) /* Top/Bottom protect */
#define SR_SRWD BIT(7) /* SR write protect */
+/* Spansion/Cypress specific status bits */
+#define SR_E_ERR BIT(5)
+#define SR_P_ERR BIT(6)
#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
@@ -128,6 +135,9 @@
/* Configuration Register bits. */
#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
+/* Status Register 2 bits. */
+#define SR2_QUAD_EN_BIT7 BIT(7)
+
/* Supported SPI protocols */
#define SNOR_PROTO_INST_MASK GENMASK(23, 16)
#define SNOR_PROTO_INST_SHIFT 16
@@ -218,6 +228,7 @@ enum spi_nor_option_flags {
SNOR_F_NO_OP_CHIP_ERASE = BIT(2),
SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
SNOR_F_READY_XSR_RDY = BIT(4),
+ SNOR_F_USE_CLSR = BIT(5),
};
/**
diff --git a/include/linux/mtd/xip.h b/include/linux/mtd/xip.h
index abed4dec5c2f..e373690cce0a 100644
--- a/include/linux/mtd/xip.h
+++ b/include/linux/mtd/xip.h
@@ -30,7 +30,9 @@
* obviously not be running from flash. The __xipram is therefore marking
* those functions so they get relocated to ram.
*/
-#define __xipram noinline __attribute__ ((__section__ (".data")))
+#ifdef CONFIG_XIP_KERNEL
+#define __xipram noinline __attribute__ ((__section__ (".xiptext")))
+#endif
/*
* Each architecture has to provide the following macros. They must access
@@ -90,10 +92,10 @@
#define xip_cpu_idle() do { } while (0)
#endif
-#else
+#endif /* CONFIG_MTD_XIP */
+#ifndef __xipram
#define __xipram
-
-#endif /* CONFIG_MTD_XIP */
+#endif
#endif /* __LINUX_MTD_XIP_H__ */
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h
index 5577e1b773c4..ea96d4c82be7 100644
--- a/include/linux/mux/consumer.h
+++ b/include/linux/mux/consumer.h
@@ -13,6 +13,8 @@
#ifndef _LINUX_MUX_CONSUMER_H
#define _LINUX_MUX_CONSUMER_H
+#include <linux/compiler.h>
+
struct device;
struct mux_control;
diff --git a/include/linux/net.h b/include/linux/net.h
index ebeb48c92005..d97d80d7fdf8 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -190,8 +190,16 @@ struct proto_ops {
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
int (*set_peek_off)(struct sock *sk, int val);
int (*peek_len)(struct socket *sock);
+
+ /* The following functions are called internally by kernel with
+ * sock lock already held.
+ */
int (*read_sock)(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
+ int (*sendpage_locked)(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags);
+ int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
+ size_t size);
};
#define DECLARE_SOCKADDR(type, dst, src) \
@@ -279,6 +287,8 @@ do { \
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
+int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
+ struct kvec *vec, size_t num, size_t len);
int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len, int flags);
@@ -297,6 +307,8 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval,
unsigned int optlen);
int kernel_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
+int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
+ size_t size, int flags);
int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 1d4737cffc71..dc8b4896b77b 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -36,7 +36,6 @@ enum {
/**/NETIF_F_GSO_SHIFT, /* keep the order of SKB_GSO_* bits */
NETIF_F_TSO_BIT /* ... TCPv4 segmentation */
= NETIF_F_GSO_SHIFT,
- NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */
NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */
NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */
NETIF_F_TSO_MANGLEID_BIT, /* ... IPV4 ID mangling allowed */
@@ -76,6 +75,7 @@ enum {
NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */
NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
+ NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
/*
* Add your fresh new feature above and remember to update
@@ -118,7 +118,6 @@ enum {
#define NETIF_F_TSO6 __NETIF_F(TSO6)
#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN)
#define NETIF_F_TSO __NETIF_F(TSO)
-#define NETIF_F_UFO __NETIF_F(UFO)
#define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED)
#define NETIF_F_RXFCS __NETIF_F(RXFCS)
#define NETIF_F_RXALL __NETIF_F(RXALL)
@@ -140,6 +139,7 @@ enum {
#define NETIF_F_HW_TC __NETIF_F(HW_TC)
#define NETIF_F_HW_ESP __NETIF_F(HW_ESP)
#define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM)
+#define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT)
#define for_each_netdev_feature(mask_addr, bit) \
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
@@ -172,7 +172,7 @@ enum {
NETIF_F_FSO)
/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO | \
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | \
NETIF_F_GSO_SCTP)
/*
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 779b23595596..f535779d9dc1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -35,7 +35,6 @@
#include <linux/percpu.h>
#include <linux/rculist.h>
-#include <linux/dmaengine.h>
#include <linux/workqueue.h>
#include <linux/dynamic_queue_limits.h>
@@ -66,6 +65,7 @@ struct mpls_dev;
/* UDP Tunnel offloads */
struct udp_tunnel_info;
struct bpf_prog;
+struct xdp_buff;
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
@@ -693,10 +693,9 @@ struct netdev_rx_queue {
*/
struct rx_queue_attribute {
struct attribute attr;
- ssize_t (*show)(struct netdev_rx_queue *queue,
- struct rx_queue_attribute *attr, char *buf);
+ ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
ssize_t (*store)(struct netdev_rx_queue *queue,
- struct rx_queue_attribute *attr, const char *buf, size_t len);
+ const char *buf, size_t len);
};
#ifdef CONFIG_XPS
@@ -770,31 +769,14 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
struct sk_buff *skb);
-/* These structures hold the attributes of qdisc and classifiers
- * that are being passed to the netdevice through the setup_tc op.
- */
-enum {
+enum tc_setup_type {
TC_SETUP_MQPRIO,
TC_SETUP_CLSU32,
TC_SETUP_CLSFLOWER,
- TC_SETUP_MATCHALL,
+ TC_SETUP_CLSMATCHALL,
TC_SETUP_CLSBPF,
};
-struct tc_cls_u32_offload;
-
-struct tc_to_netdev {
- unsigned int type;
- union {
- struct tc_cls_u32_offload *cls_u32;
- struct tc_cls_flower_offload *cls_flower;
- struct tc_cls_matchall_offload *cls_mall;
- struct tc_cls_bpf_offload *cls_bpf;
- struct tc_mqprio_qopt *mqprio;
- };
- bool egress_dev;
-};
-
/* These structures hold the attributes of xdp state that are being passed
* to the netdevice through the xdp op.
*/
@@ -977,8 +959,8 @@ struct xfrmdev_ops {
* with PF and querying it may introduce a theoretical security risk.
* int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
- * int (*ndo_setup_tc)(struct net_device *dev, u32 handle, u32 chain_index,
- * __be16 protocol, struct tc_to_netdev *tc);
+ * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
+ * void *type_data);
* Called to setup any 'tc' scheduler, classifier or action on @dev.
* This is always called from the stack with the rtnl lock held and netif
* tx queues stopped. This allows the netdevice to perform queue
@@ -1138,7 +1120,12 @@ struct xfrmdev_ops {
* int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp);
* This function is used to set or query state related to XDP on the
* netdevice. See definition of enum xdp_netdev_command for details.
- *
+ * int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp);
+ * This function is used to submit a XDP packet for transmit on a
+ * netdevice.
+ * void (*ndo_xdp_flush)(struct net_device *dev);
+ * This function is used to inform the driver to flush a particular
+ * xdp tx queue. Must be called on same CPU as xdp_xmit.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1221,9 +1208,8 @@ struct net_device_ops {
struct net_device *dev,
int vf, bool setting);
int (*ndo_setup_tc)(struct net_device *dev,
- u32 handle, u32 chain_index,
- __be16 protocol,
- struct tc_to_netdev *tc);
+ enum tc_setup_type type,
+ void *type_data);
#if IS_ENABLED(CONFIG_FCOE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
@@ -1323,6 +1309,9 @@ struct net_device_ops {
int needed_headroom);
int (*ndo_xdp)(struct net_device *dev,
struct netdev_xdp *xdp);
+ int (*ndo_xdp_xmit)(struct net_device *dev,
+ struct xdp_buff *xdp);
+ void (*ndo_xdp_flush)(struct net_device *dev);
};
/**
@@ -1802,7 +1791,7 @@ struct net_device {
#endif
struct netdev_queue __rcu *ingress_queue;
#ifdef CONFIG_NETFILTER_INGRESS
- struct nf_hook_entry __rcu *nf_hooks_ingress;
+ struct nf_hook_entries __rcu *nf_hooks_ingress;
#endif
unsigned char broadcast[MAX_ADDR_LEN];
@@ -2308,6 +2297,7 @@ struct netdev_lag_lower_state_info {
#define NETDEV_PRECHANGEUPPER 0x001A
#define NETDEV_CHANGELOWERSTATE 0x001B
#define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C
+#define NETDEV_UDP_TUNNEL_DROP_INFO 0x001D
#define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
int register_netdevice_notifier(struct notifier_block *nb);
@@ -2423,8 +2413,8 @@ struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
int dev_open(struct net_device *dev);
-int dev_close(struct net_device *dev);
-int dev_close_many(struct list_head *head, bool unlink);
+void dev_close(struct net_device *dev);
+void dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
int dev_queue_xmit(struct sk_buff *skb);
@@ -2774,7 +2764,7 @@ struct softnet_data {
unsigned int input_queue_head ____cacheline_aligned_in_smp;
/* Elements below can be accessed between CPUs for RPS/RFS */
- struct call_single_data csd ____cacheline_aligned_in_smp;
+ call_single_data_t csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
unsigned int input_queue_tail;
@@ -3251,6 +3241,8 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
__dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
}
+void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
+int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
int netif_receive_skb(struct sk_buff *skb);
@@ -3866,6 +3858,8 @@ int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev);
+bool netdev_has_any_upper_dev(struct net_device *dev);
+
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter);
void *netdev_lower_get_next_private_rcu(struct net_device *dev,
@@ -4019,22 +4013,22 @@ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_devi
return rc;
}
-int netdev_class_create_file_ns(struct class_attribute *class_attr,
+int netdev_class_create_file_ns(const struct class_attribute *class_attr,
const void *ns);
-void netdev_class_remove_file_ns(struct class_attribute *class_attr,
+void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
const void *ns);
-static inline int netdev_class_create_file(struct class_attribute *class_attr)
+static inline int netdev_class_create_file(const struct class_attribute *class_attr)
{
return netdev_class_create_file_ns(class_attr, NULL);
}
-static inline void netdev_class_remove_file(struct class_attribute *class_attr)
+static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
{
netdev_class_remove_file_ns(class_attr, NULL);
}
-extern struct kobj_ns_type_operations net_ns_type_operations;
+extern const struct kobj_ns_type_operations net_ns_type_operations;
const char *netdev_drivername(const struct net_device *dev);
@@ -4089,7 +4083,6 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
/* check flags correspondence */
BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
- BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 22f081065d49..f84bca1703cd 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -72,25 +72,32 @@ struct nf_hook_ops {
};
struct nf_hook_entry {
- struct nf_hook_entry __rcu *next;
nf_hookfn *hook;
void *priv;
- const struct nf_hook_ops *orig_ops;
};
-static inline void
-nf_hook_entry_init(struct nf_hook_entry *entry, const struct nf_hook_ops *ops)
-{
- entry->next = NULL;
- entry->hook = ops->hook;
- entry->priv = ops->priv;
- entry->orig_ops = ops;
-}
+struct nf_hook_entries {
+ u16 num_hook_entries;
+ /* padding */
+ struct nf_hook_entry hooks[];
+
+ /* trailer: pointers to original orig_ops of each hook.
+ *
+ * This is not part of struct nf_hook_entry since its only
+ * needed in slow path (hook register/unregister).
+ *
+ * const struct nf_hook_ops *orig_ops[]
+ */
+};
-static inline int
-nf_hook_entry_priority(const struct nf_hook_entry *entry)
+static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e)
{
- return entry->orig_ops->priority;
+ unsigned int n = e->num_hook_entries;
+ const void *hook_end;
+
+ hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */
+
+ return (struct nf_hook_ops **)hook_end;
}
static inline int
@@ -100,12 +107,6 @@ nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb,
return entry->hook(entry->priv, skb, state);
}
-static inline const struct nf_hook_ops *
-nf_hook_entry_ops(const struct nf_hook_entry *entry)
-{
- return entry->orig_ops;
-}
-
static inline void nf_hook_state_init(struct nf_hook_state *p,
unsigned int hook,
u_int8_t pf,
@@ -168,7 +169,7 @@ extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
#endif
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
- struct nf_hook_entry *entry);
+ const struct nf_hook_entries *e, unsigned int i);
/**
* nf_hook - call a netfilter hook
@@ -182,7 +183,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
- struct nf_hook_entry *hook_head;
+ struct nf_hook_entries *hook_head;
int ret = 1;
#ifdef HAVE_JUMP_LABEL
@@ -200,7 +201,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
nf_hook_state_init(&state, hook, pf, indev, outdev,
sk, net, okfn);
- ret = nf_hook_slow(skb, &state, hook_head);
+ ret = nf_hook_slow(skb, &state, hook_head, 0);
}
rcu_read_unlock();
diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h
index 074790c0cf74..0fc458bde80b 100644
--- a/include/linux/netfilter/xt_hashlimit.h
+++ b/include/linux/netfilter/xt_hashlimit.h
@@ -5,5 +5,6 @@
#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \
XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \
- XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES)
+ XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES |\
+ XT_HASHLIMIT_RATE_MATCH)
#endif /*_XT_HASHLIMIT_H*/
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 2c2a5514b0df..528b24c78308 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -108,9 +108,10 @@ struct ebt_table {
#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
~(__alignof__(struct _xt_align)-1))
-extern struct ebt_table *ebt_register_table(struct net *net,
- const struct ebt_table *table,
- const struct nf_hook_ops *);
+extern int ebt_register_table(struct net *net,
+ const struct ebt_table *table,
+ const struct nf_hook_ops *ops,
+ struct ebt_table **res);
extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
const struct nf_hook_ops *);
extern unsigned int ebt_do_table(struct sk_buff *skb,
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 59476061de86..8d5dae1e2ff8 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -17,7 +17,7 @@ static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
/* caller must hold rcu_read_lock */
static inline int nf_hook_ingress(struct sk_buff *skb)
{
- struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress);
+ struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
struct nf_hook_state state;
int ret;
@@ -30,7 +30,7 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
nf_hook_state_init(&state, NF_NETDEV_INGRESS,
NFPROTO_NETDEV, skb->dev, NULL, NULL,
dev_net(skb->dev), NULL);
- ret = nf_hook_slow(skb, &state, e);
+ ret = nf_hook_slow(skb, &state, e, 0);
if (ret == 0)
return -1;
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 5cc91d6381a3..a0282ceaa48b 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -49,7 +49,6 @@
struct nfs_access_entry {
struct rb_node rb_node;
struct list_head lru;
- unsigned long jiffies;
struct rpc_cred * cred;
__u32 mask;
struct rcu_head rcu_head;
@@ -154,7 +153,7 @@ struct nfs_inode {
*/
__be32 cookieverf[2];
- unsigned long nrequests;
+ atomic_long_t nrequests;
struct nfs_mds_commit_info commit_info;
/* Open contexts for shared mmap writes */
@@ -163,6 +162,7 @@ struct nfs_inode {
/* Readers: in-flight sillydelete RPC calls */
/* Writers: rmdir */
struct rw_semaphore rmdir_sem;
+ struct mutex commit_mutex;
#if IS_ENABLED(CONFIG_NFS_V4)
struct nfs4_cached_acl *nfs4_acl;
@@ -510,7 +510,7 @@ extern void nfs_commit_free(struct nfs_commit_data *data);
static inline int
nfs_have_writebacks(struct inode *inode)
{
- return NFS_I(inode)->nrequests != 0;
+ return atomic_long_read(&NFS_I(inode)->nrequests) != 0;
}
/*
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index d67b67ae6c8b..d117120c9b6e 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -125,8 +125,7 @@ extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
const struct nfs_pgio_completion_ops *compl_ops,
const struct nfs_rw_ops *rw_ops,
size_t bsize,
- int how,
- gfp_t gfp_flags);
+ int how);
extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
struct nfs_page *);
extern int nfs_pageio_resend(struct nfs_pageio_descriptor *,
@@ -139,8 +138,7 @@ extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
-extern int nfs_page_group_lock(struct nfs_page *, bool);
-extern void nfs_page_group_lock_wait(struct nfs_page *);
+extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 62cbcb842f99..164d5359d4ab 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1476,7 +1476,7 @@ struct nfs_pgio_header {
struct nfs_mds_commit_info {
atomic_t rpcs_out;
- unsigned long ncommit;
+ atomic_long_t ncommit;
struct list_head list;
};
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index a36abe2da13e..27e249ed7c5c 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -12,11 +12,31 @@
#ifdef CONFIG_LOCKUP_DETECTOR
void lockup_detector_init(void);
+void lockup_detector_soft_poweroff(void);
+void lockup_detector_cleanup(void);
+bool is_hardlockup(void);
+
+extern int watchdog_user_enabled;
+extern int nmi_watchdog_user_enabled;
+extern int soft_watchdog_user_enabled;
+extern int watchdog_thresh;
+extern unsigned long watchdog_enabled;
+
+extern struct cpumask watchdog_cpumask;
+extern unsigned long *watchdog_cpumask_bits;
+#ifdef CONFIG_SMP
+extern int sysctl_softlockup_all_cpu_backtrace;
+extern int sysctl_hardlockup_all_cpu_backtrace;
#else
-static inline void lockup_detector_init(void)
-{
-}
-#endif
+#define sysctl_softlockup_all_cpu_backtrace 0
+#define sysctl_hardlockup_all_cpu_backtrace 0
+#endif /* !CONFIG_SMP */
+
+#else /* CONFIG_LOCKUP_DETECTOR */
+static inline void lockup_detector_init(void) { }
+static inline void lockup_detector_soft_poweroff(void) { }
+static inline void lockup_detector_cleanup(void) { }
+#endif /* !CONFIG_LOCKUP_DETECTOR */
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
extern void touch_softlockup_watchdog_sched(void);
@@ -24,29 +44,17 @@ extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
extern unsigned int softlockup_panic;
-extern int soft_watchdog_enabled;
-extern atomic_t watchdog_park_in_progress;
#else
-static inline void touch_softlockup_watchdog_sched(void)
-{
-}
-static inline void touch_softlockup_watchdog(void)
-{
-}
-static inline void touch_softlockup_watchdog_sync(void)
-{
-}
-static inline void touch_all_softlockup_watchdogs(void)
-{
-}
+static inline void touch_softlockup_watchdog_sched(void) { }
+static inline void touch_softlockup_watchdog(void) { }
+static inline void touch_softlockup_watchdog_sync(void) { }
+static inline void touch_all_softlockup_watchdogs(void) { }
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
void reset_hung_task_detector(void);
#else
-static inline void reset_hung_task_detector(void)
-{
-}
+static inline void reset_hung_task_detector(void) { }
#endif
/*
@@ -54,12 +62,12 @@ static inline void reset_hung_task_detector(void)
* 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
* bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
*
- * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
- * are variables that are only used as an 'interface' between the parameters
- * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
- * 'watchdog_thresh' variable is handled differently because its value is not
- * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
- * is equal zero.
+ * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
+ * 'soft_watchdog_user_enabled' are variables that are only used as an
+ * 'interface' between the parameters in /proc/sys/kernel and the internal
+ * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
+ * handled differently because its value is not boolean, and the lockup
+ * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
*/
#define NMI_WATCHDOG_ENABLED_BIT 0
#define SOFT_WATCHDOG_ENABLED_BIT 1
@@ -73,17 +81,41 @@ extern unsigned int hardlockup_panic;
static inline void hardlockup_detector_disable(void) {}
#endif
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+# define NMI_WATCHDOG_SYSCTL_PERM 0644
+#else
+# define NMI_WATCHDOG_SYSCTL_PERM 0444
+#endif
+
#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
extern void arch_touch_nmi_watchdog(void);
+extern void hardlockup_detector_perf_stop(void);
+extern void hardlockup_detector_perf_restart(void);
+extern void hardlockup_detector_perf_disable(void);
+extern void hardlockup_detector_perf_enable(void);
+extern void hardlockup_detector_perf_cleanup(void);
+extern int hardlockup_detector_perf_init(void);
#else
-#if !defined(CONFIG_HAVE_NMI_WATCHDOG)
+static inline void hardlockup_detector_perf_stop(void) { }
+static inline void hardlockup_detector_perf_restart(void) { }
+static inline void hardlockup_detector_perf_disable(void) { }
+static inline void hardlockup_detector_perf_enable(void) { }
+static inline void hardlockup_detector_perf_cleanup(void) { }
+# if !defined(CONFIG_HAVE_NMI_WATCHDOG)
+static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
static inline void arch_touch_nmi_watchdog(void) {}
+# else
+static inline int hardlockup_detector_perf_init(void) { return 0; }
+# endif
#endif
-#endif
+
+void watchdog_nmi_stop(void);
+void watchdog_nmi_start(void);
+int watchdog_nmi_probe(void);
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
- *
+ *
* If the architecture supports the NMI watchdog, touch_nmi_watchdog()
* may be used to reset the timeout - for code which intentionally
* disables interrupts for a long time. This call is stateless.
@@ -153,22 +185,6 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
u64 hw_nmi_get_sample_period(int watchdog_thresh);
#endif
-#ifdef CONFIG_LOCKUP_DETECTOR
-extern int nmi_watchdog_enabled;
-extern int watchdog_user_enabled;
-extern int watchdog_thresh;
-extern unsigned long watchdog_enabled;
-extern struct cpumask watchdog_cpumask;
-extern unsigned long *watchdog_cpumask_bits;
-extern int __read_mostly watchdog_suspended;
-#ifdef CONFIG_SMP
-extern int sysctl_softlockup_all_cpu_backtrace;
-extern int sysctl_hardlockup_all_cpu_backtrace;
-#else
-#define sysctl_softlockup_all_cpu_backtrace 0
-#define sysctl_hardlockup_all_cpu_backtrace 0
-#endif
-
#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
defined(CONFIG_HARDLOCKUP_DETECTOR)
void watchdog_update_hrtimer_threshold(u64 period);
@@ -176,7 +192,6 @@ void watchdog_update_hrtimer_threshold(u64 period);
static inline void watchdog_update_hrtimer_threshold(u64 period) { }
#endif
-extern bool is_hardlockup(void);
struct ctl_table;
extern int proc_watchdog(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
@@ -188,18 +203,6 @@ extern int proc_watchdog_thresh(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
extern int proc_watchdog_cpumask(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
-extern int lockup_detector_suspend(void);
-extern void lockup_detector_resume(void);
-#else
-static inline int lockup_detector_suspend(void)
-{
- return 0;
-}
-
-static inline void lockup_detector_resume(void)
-{
-}
-#endif
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
#include <asm/nmi.h>
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 2591878c1d48..a726f96010d5 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -346,11 +346,6 @@ struct nvme_fc_remote_port {
* indicating an FC transport Aborted status.
* Entrypoint is Mandatory.
*
- * @defer_rcv: Called by the transport to signal the LLLD that it has
- * begun processing of a previously received NVME CMD IU. The LLDD
- * is now free to re-use the rcv buffer associated with the
- * nvmefc_tgt_fcp_req.
- *
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
@@ -624,7 +619,7 @@ struct nvmefc_tgt_fcp_req {
u32 timeout;
u32 transfer_length;
struct fc_ba_rjt ba_rjt;
- struct scatterlist sg[NVME_FC_MAX_SEGMENTS];
+ struct scatterlist *sg;
int sg_cnt;
void *rspaddr;
dma_addr_t rspdma;
@@ -806,11 +801,19 @@ struct nvmet_fc_target_port {
* outstanding operation (if there was one) to complete, then will
* call the fcp_req_release() callback to return the command's
* exchange context back to the LLDD.
+ * Entrypoint is Mandatory.
*
* @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req
* to the LLDD after all operations on the fcp operation are complete.
* This may be due to the command completing or upon completion of
* abort cleanup.
+ * Entrypoint is Mandatory.
+ *
+ * @defer_rcv: Called by the transport to signal the LLLD that it has
+ * begun processing of a previously received NVME CMD IU. The LLDD
+ * is now free to re-use the rcv buffer associated with the
+ * nvmefc_tgt_fcp_req.
+ * Entrypoint is Optional.
*
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 25d8225dbd04..9310ce77d8e1 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -32,6 +32,8 @@
#define NVME_RDMA_IP_PORT 4420
+#define NVME_NSID_ALL 0xffffffff
+
enum nvme_subsys_type {
NVME_NQN_DISC = 1, /* Discovery type target subsystem */
NVME_NQN_NVME = 2, /* NVME type target subsystem */
@@ -133,19 +135,26 @@ enum {
enum {
NVME_CC_ENABLE = 1 << 0,
NVME_CC_CSS_NVM = 0 << 4,
+ NVME_CC_EN_SHIFT = 0,
+ NVME_CC_CSS_SHIFT = 4,
NVME_CC_MPS_SHIFT = 7,
- NVME_CC_ARB_RR = 0 << 11,
- NVME_CC_ARB_WRRU = 1 << 11,
- NVME_CC_ARB_VS = 7 << 11,
- NVME_CC_SHN_NONE = 0 << 14,
- NVME_CC_SHN_NORMAL = 1 << 14,
- NVME_CC_SHN_ABRUPT = 2 << 14,
- NVME_CC_SHN_MASK = 3 << 14,
- NVME_CC_IOSQES = NVME_NVM_IOSQES << 16,
- NVME_CC_IOCQES = NVME_NVM_IOCQES << 20,
+ NVME_CC_AMS_SHIFT = 11,
+ NVME_CC_SHN_SHIFT = 14,
+ NVME_CC_IOSQES_SHIFT = 16,
+ NVME_CC_IOCQES_SHIFT = 20,
+ NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT,
+ NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT,
+ NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT,
+ NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT,
+ NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT,
+ NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT,
+ NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
+ NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
+ NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
NVME_CSTS_RDY = 1 << 0,
NVME_CSTS_CFS = 1 << 1,
NVME_CSTS_NSSRO = 1 << 4,
+ NVME_CSTS_PP = 1 << 5,
NVME_CSTS_SHST_NORMAL = 0 << 2,
NVME_CSTS_SHST_OCCUR = 1 << 2,
NVME_CSTS_SHST_CMPLT = 2 << 2,
@@ -217,7 +226,9 @@ struct nvme_id_ctrl {
__le16 mntmt;
__le16 mxtmt;
__le32 sanicap;
- __u8 rsvd332[180];
+ __le32 hmminds;
+ __le16 hmmaxd;
+ __u8 rsvd338[174];
__u8 sqes;
__u8 cqes;
__le16 maxcmd;
@@ -251,10 +262,11 @@ enum {
NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
NVME_CTRL_ONCS_DSM = 1 << 2,
NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
+ NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
- NVME_CTRL_OACS_DBBUF_SUPP = 1 << 7,
+ NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
};
struct nvme_lbaf {
@@ -376,6 +388,13 @@ struct nvme_smart_log {
__u8 rsvd216[296];
};
+struct nvme_fw_slot_info_log {
+ __u8 afi;
+ __u8 rsvd1[7];
+ __le64 frs[7];
+ __u8 rsvd64[448];
+};
+
enum {
NVME_SMART_CRIT_SPARE = 1 << 0,
NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
@@ -386,6 +405,7 @@ enum {
enum {
NVME_AER_NOTICE_NS_CHANGED = 0x0002,
+ NVME_AER_NOTICE_FW_ACT_STARTING = 0x0102,
};
struct nvme_lba_range_type {
@@ -451,12 +471,14 @@ enum nvme_opcode {
*
* @NVME_SGL_FMT_ADDRESS: absolute address of the data block
* @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
+ * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA
* @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
* request subtype
*/
enum {
NVME_SGL_FMT_ADDRESS = 0x00,
NVME_SGL_FMT_OFFSET = 0x01,
+ NVME_SGL_FMT_TRANSPORT_A = 0x0A,
NVME_SGL_FMT_INVALIDATE = 0x0f,
};
@@ -470,12 +492,16 @@ enum {
*
* For struct nvme_keyed_sgl_desc:
* @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
+ *
+ * Transport-specific SGL types:
+ * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor
*/
enum {
NVME_SGL_FMT_DATA_DESC = 0x00,
NVME_SGL_FMT_SEG_DESC = 0x02,
NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
+ NVME_TRANSPORT_SGL_DATA_DESC = 0x05,
};
struct nvme_sgl_desc {
@@ -677,6 +703,7 @@ enum {
NVME_FEAT_ASYNC_EVENT = 0x0b,
NVME_FEAT_AUTO_PST = 0x0c,
NVME_FEAT_HOST_MEM_BUF = 0x0d,
+ NVME_FEAT_TIMESTAMP = 0x0e,
NVME_FEAT_KATO = 0x0f,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
@@ -1106,19 +1133,6 @@ enum {
NVME_SC_UNWRITTEN_BLOCK = 0x287,
NVME_SC_DNR = 0x4000,
-
-
- /*
- * FC Transport-specific error status values for NVME commands
- *
- * Transport-specific status code values must be in the range 0xB0..0xBF
- */
-
- /* Generic FC failure - catchall */
- NVME_SC_FC_TRANSPORT_ERROR = 0x00B0,
-
- /* I/O failure due to FC ABTS'd */
- NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1,
};
struct nvme_completion {
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index c2256d746543..4e85447f7860 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -12,6 +12,9 @@
#ifndef _LINUX_NVMEM_CONSUMER_H
#define _LINUX_NVMEM_CONSUMER_H
+#include <linux/err.h>
+#include <linux/errno.h>
+
struct device;
struct device_node;
/* consumer cookie */
@@ -35,6 +38,7 @@ void nvmem_cell_put(struct nvmem_cell *cell);
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
+int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val);
/* direct nvmem device read/write interface */
struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
@@ -85,6 +89,12 @@ static inline int nvmem_cell_write(struct nvmem_cell *cell,
return -ENOSYS;
}
+static inline int nvmem_cell_read_u32(struct device *dev,
+ const char *cell_id, u32 *val)
+{
+ return -ENOSYS;
+}
+
static inline struct nvmem_device *nvmem_device_get(struct device *dev,
const char *name)
{
diff --git a/include/linux/of.h b/include/linux/of.h
index 4a8a70916237..b240ed69dc96 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -104,7 +104,6 @@ extern const struct fwnode_operations of_fwnode_ops;
static inline void of_node_init(struct device_node *node)
{
kobject_init(&node->kobj, &of_node_ktype);
- node->fwnode.type = FWNODE_OF;
node->fwnode.ops = &of_fwnode_ops;
}
@@ -152,7 +151,7 @@ void of_core_init(void);
static inline bool is_of_node(const struct fwnode_handle *fwnode)
{
- return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF;
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &of_fwnode_ops;
}
#define to_of_node(__fwnode) \
@@ -735,6 +734,16 @@ static inline struct device_node *of_get_cpu_node(int cpu,
return NULL;
}
+static inline int of_n_addr_cells(struct device_node *np)
+{
+ return 0;
+
+}
+static inline int of_n_size_cells(struct device_node *np)
+{
+ return 0;
+}
+
static inline int of_property_read_u64(const struct device_node *np,
const char *propname, u64 *out_value)
{
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index b4ad8b4f8506..611502524425 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -50,7 +50,7 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
struct device *cpu_dev;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
- return NULL;
+ return of_get_cpu_node(cpu, NULL);
return of_node_get(cpu_dev->of_node);
}
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index e0d1946270f3..fb908e598348 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -57,7 +57,14 @@ extern const struct of_device_id of_default_bus_match_table[];
extern struct platform_device *of_device_alloc(struct device_node *np,
const char *bus_id,
struct device *parent);
+#ifdef CONFIG_OF
extern struct platform_device *of_find_device_by_node(struct device_node *np);
+#else
+static inline struct platform_device *of_find_device_by_node(struct device_node *np)
+{
+ return NULL;
+}
+#endif
/* Platform devices and busses creation */
extern struct platform_device *of_platform_device_create(struct device_node *np,
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index d33e3280c8ad..ba2d470d2d0a 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -303,8 +303,8 @@ PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
* Only test-and-set exist for PG_writeback. The unconditional operators are
* risky: they bypass page accounting.
*/
-TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND)
- TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND)
+TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
+ TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 79b36f57c3ba..5bbd6780f205 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -353,8 +353,16 @@ struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
unsigned int nr_entries, struct page **entries,
pgoff_t *indices);
-unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
- unsigned int nr_pages, struct page **pages);
+unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
+ pgoff_t end, unsigned int nr_pages,
+ struct page **pages);
+static inline unsigned find_get_pages(struct address_space *mapping,
+ pgoff_t *start, unsigned int nr_pages,
+ struct page **pages)
+{
+ return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
+ pages);
+}
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index b45d391b4540..4dcd5506f1ed 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -27,8 +27,16 @@ unsigned pagevec_lookup_entries(struct pagevec *pvec,
pgoff_t start, unsigned nr_entries,
pgoff_t *indices);
void pagevec_remove_exceptionals(struct pagevec *pvec);
-unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
- pgoff_t start, unsigned nr_pages);
+unsigned pagevec_lookup_range(struct pagevec *pvec,
+ struct address_space *mapping,
+ pgoff_t *start, pgoff_t end);
+static inline unsigned pagevec_lookup(struct pagevec *pvec,
+ struct address_space *mapping,
+ pgoff_t *start)
+{
+ return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1);
+}
+
unsigned pagevec_lookup_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, int tag,
unsigned nr_pages);
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index af5edbf3eea3..f7a04e1af112 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -62,11 +62,13 @@ struct pci_epc_ops {
* @size: the size of the PCI address space
* @bitmap: bitmap to manage the PCI address space
* @pages: number of bits representing the address region
+ * @page_size: size of each page
*/
struct pci_epc_mem {
phys_addr_t phys_base;
size_t size;
unsigned long *bitmap;
+ size_t page_size;
int pages;
};
@@ -98,6 +100,9 @@ struct pci_epc {
#define devm_pci_epc_create(dev, ops) \
__devm_pci_epc_create((dev), (ops), THIS_MODULE)
+#define pci_epc_mem_init(epc, phys_addr, size) \
+ __pci_epc_mem_init((epc), (phys_addr), (size), PAGE_SIZE)
+
static inline void epc_set_drvdata(struct pci_epc *epc, void *data)
{
dev_set_drvdata(&epc->dev, data);
@@ -135,7 +140,8 @@ void pci_epc_stop(struct pci_epc *epc);
struct pci_epc *pci_epc_get(const char *epc_name);
void pci_epc_put(struct pci_epc *epc);
-int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size);
+int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size,
+ size_t page_size);
void pci_epc_mem_exit(struct pci_epc *epc);
void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size);
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 0d529cb90143..60d551a9a1ba 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -14,17 +14,10 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
struct pci_epf;
-enum pci_interrupt_pin {
- PCI_INTERRUPT_UNKNOWN,
- PCI_INTERRUPT_INTA,
- PCI_INTERRUPT_INTB,
- PCI_INTERRUPT_INTC,
- PCI_INTERRUPT_INTD,
-};
-
enum pci_barno {
BAR_0,
BAR_1,
@@ -149,6 +142,8 @@ static inline void *epf_get_drvdata(struct pci_epf *epf)
return dev_get_drvdata(&epf->dev);
}
+const struct pci_epf_device_id *
+pci_epf_match_device(const struct pci_epf_device_id *id, struct pci_epf *epf);
struct pci_epf *pci_epf_create(const char *name);
void pci_epf_destroy(struct pci_epf *epf);
int __pci_epf_register_driver(struct pci_epf_driver *driver,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f958d0732af6..f4f8ee5a7362 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -102,6 +102,28 @@ enum {
DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
};
+/**
+ * enum pci_interrupt_pin - PCI INTx interrupt values
+ * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
+ * @PCI_INTERRUPT_INTA: PCI INTA pin
+ * @PCI_INTERRUPT_INTB: PCI INTB pin
+ * @PCI_INTERRUPT_INTC: PCI INTC pin
+ * @PCI_INTERRUPT_INTD: PCI INTD pin
+ *
+ * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
+ * PCI_INTERRUPT_PIN register.
+ */
+enum pci_interrupt_pin {
+ PCI_INTERRUPT_UNKNOWN,
+ PCI_INTERRUPT_INTA,
+ PCI_INTERRUPT_INTB,
+ PCI_INTERRUPT_INTC,
+ PCI_INTERRUPT_INTD,
+};
+
+/* The number of legacy PCI INTx interrupts */
+#define PCI_NUM_INTX 4
+
/*
* pci_power_t values must match the bits in the Capabilities PME_Support
* and Control/Status PowerState fields in the Power Management capability.
@@ -453,6 +475,7 @@ struct pci_host_bridge {
void *release_data;
struct msi_controller *msi;
unsigned int ignore_reset_delay:1; /* for entire hierarchy */
+ unsigned int no_ext_tags:1; /* no Extended Tags */
/* Resource alignment requirements */
resource_size_t (*align_resource)(struct pci_dev *dev,
const struct resource *res,
@@ -731,6 +754,7 @@ struct pci_driver {
void (*shutdown) (struct pci_dev *dev);
int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */
const struct pci_error_handlers *err_handler;
+ const struct attribute_group **groups;
struct device_driver driver;
struct pci_dynids dynids;
};
@@ -846,7 +870,6 @@ char *pcibios_setup(char *str);
resource_size_t pcibios_align_resource(void *, const struct resource *,
resource_size_t,
resource_size_t);
-void pcibios_update_irq(struct pci_dev *, int irq);
/* Weak but can be overriden by arch */
void pci_fixup_cardbus(struct pci_bus *);
@@ -1164,8 +1187,6 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
void pdev_enable_device(struct pci_dev *);
int pci_enable_resources(struct pci_dev *, int mask);
-void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
- int (*)(const struct pci_dev *, u8, u8));
void pci_assign_irq(struct pci_dev *dev);
struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
#define HAVE_PCI_REQ_REGIONS 2
@@ -1398,6 +1419,38 @@ pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
NULL);
}
+/**
+ * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
+ * @d: the INTx IRQ domain
+ * @node: the DT node for the device whose interrupt we're translating
+ * @intspec: the interrupt specifier data from the DT
+ * @intsize: the number of entries in @intspec
+ * @out_hwirq: pointer at which to write the hwirq number
+ * @out_type: pointer at which to write the interrupt type
+ *
+ * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
+ * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
+ * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
+ * INTx value to obtain the hwirq number.
+ *
+ * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
+ */
+static inline int pci_irqd_intx_xlate(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ const u32 intx = intspec[0];
+
+ if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
+ return -EINVAL;
+
+ *out_hwirq = intx - PCI_INTERRUPT_INTA;
+ return 0;
+}
+
#ifdef CONFIG_PCIEPORTBUS
extern bool pcie_ports_disabled;
extern bool pcie_ports_auto;
@@ -1632,6 +1685,8 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
+static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
+{ return false; }
#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
@@ -2063,7 +2118,7 @@ static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
/**
* pci_vpd_srdt_size - Extracts the Small Resource Data Type length
- * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
+ * @srdt: Pointer to the beginning of the Small Resource Data Type tag
*
* Returns the extracted Small Resource Data Type length.
*/
@@ -2074,7 +2129,7 @@ static inline u8 pci_vpd_srdt_size(const u8 *srdt)
/**
* pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
- * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
+ * @srdt: Pointer to the beginning of the Small Resource Data Type tag
*
* Returns the extracted Small Resource Data Type Tag Item.
*/
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c71e532da458..4adf6161ec77 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -576,6 +576,7 @@
#define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095
#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096
#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097
+#define PCI_DEVICE_ID_AMD_CS5536_DEV_IDE 0x2092
#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
#define PCI_DEVICE_ID_AMD_LX_AES 0x2082
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index afcd130ab3a9..e8f3f7479224 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -38,7 +38,7 @@ static inline void set_service_data(struct pcie_device *dev, void *data)
dev->priv_data = data;
}
-static inline void* get_service_data(struct pcie_device *dev)
+static inline void *get_service_data(struct pcie_device *dev)
{
return dev->priv_data;
}
@@ -50,8 +50,8 @@ struct pcie_port_service_driver {
int (*suspend) (struct pcie_device *dev);
int (*resume) (struct pcie_device *dev);
- /* Service Error Recovery Handler */
- const struct pci_error_handlers *err_handler;
+ /* Device driver may resume normal operations */
+ void (*error_resume)(struct pci_dev *dev);
/* Link Reset Capability - AER service driver specific */
pci_ers_result_t (*reset_link) (struct pci_dev *dev);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 491b3f5a5f8a..6a5fb939d3e5 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -21,6 +21,25 @@
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
+/* minimum allocation size and shift in bytes */
+#define PCPU_MIN_ALLOC_SHIFT 2
+#define PCPU_MIN_ALLOC_SIZE (1 << PCPU_MIN_ALLOC_SHIFT)
+
+/* number of bits per page, used to trigger a scan if blocks are > PAGE_SIZE */
+#define PCPU_BITS_PER_PAGE (PAGE_SIZE >> PCPU_MIN_ALLOC_SHIFT)
+
+/*
+ * This determines the size of each metadata block. There are several subtle
+ * constraints around this constant. The reserved region must be a multiple of
+ * PCPU_BITMAP_BLOCK_SIZE. Additionally, PCPU_BITMAP_BLOCK_SIZE must be a
+ * multiple of PAGE_SIZE or PAGE_SIZE must be a multiple of
+ * PCPU_BITMAP_BLOCK_SIZE to align with the populated page map. The unit_size
+ * also has to be a multiple of PCPU_BITMAP_BLOCK_SIZE to ensure full blocks.
+ */
+#define PCPU_BITMAP_BLOCK_SIZE PAGE_SIZE
+#define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \
+ PCPU_MIN_ALLOC_SHIFT)
+
/*
* Percpu allocator can serve percpu allocations before slab is
* initialized which allows slab to depend on the percpu allocator.
@@ -116,7 +135,6 @@ extern bool is_kernel_percpu_address(unsigned long addr);
#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
extern void __init setup_per_cpu_areas(void);
#endif
-extern void __init percpu_init_late(void);
extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
extern void __percpu *__alloc_percpu(size_t size, size_t align);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b14095bcf4bb..8e22f24ded6a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -139,17 +139,6 @@ struct hw_perf_event {
/* for tp_event->class */
struct list_head tp_list;
};
- struct { /* intel_cqm */
- int cqm_state;
- u32 cqm_rmid;
- int is_group_event;
- struct list_head cqm_events_entry;
- struct list_head cqm_groups_entry;
- struct list_head cqm_group_entry;
- };
- struct { /* itrace */
- int itrace_started;
- };
struct { /* amd_power */
u64 pwr_acc;
u64 ptsc;
@@ -417,11 +406,6 @@ struct pmu {
/*
- * Return the count value for a counter.
- */
- u64 (*count) (struct perf_event *event); /*optional*/
-
- /*
* Set up pmu-private data structures for an AUX area
*/
void *(*setup_aux) (int cpu, void **pages,
@@ -541,6 +525,7 @@ struct swevent_hlist {
#define PERF_ATTACH_GROUP 0x02
#define PERF_ATTACH_TASK 0x04
#define PERF_ATTACH_TASK_DATA 0x08
+#define PERF_ATTACH_ITRACE 0x10
struct perf_cgroup;
struct ring_buffer;
@@ -864,6 +849,7 @@ extern int perf_aux_output_skip(struct perf_output_handle *handle,
unsigned long size);
extern void *perf_get_aux(struct perf_output_handle *handle);
extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
+extern void perf_event_itrace_started(struct perf_event *event);
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);
@@ -944,6 +930,8 @@ struct perf_sample_data {
struct perf_regs regs_intr;
u64 stack_user_size;
+
+ u64 phys_addr;
} ____cacheline_aligned;
/* default value for data source */
@@ -1111,11 +1099,6 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
__perf_event_task_sched_out(prev, next);
}
-static inline u64 __perf_event_count(struct perf_event *event)
-{
- return local64_read(&event->count) + atomic64_read(&event->child_count);
-}
-
extern void perf_event_mmap(struct vm_area_struct *vma);
extern struct perf_guest_info_callbacks *perf_guest_cbs;
extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@@ -1201,7 +1184,7 @@ extern void perf_event_init(void);
extern void perf_tp_event(u16 event_type, u64 count, void *record,
int entry_size, struct pt_regs *regs,
struct hlist_head *head, int rctx,
- struct task_struct *task);
+ struct task_struct *task, struct perf_event *event);
extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 0bb5b212ab42..d78cd01ea513 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -182,6 +182,7 @@ static inline const char *phy_modes(phy_interface_t interface)
#define MII_ADDR_C45 (1<<30)
struct device;
+struct phylink;
struct sk_buff;
/*
@@ -469,11 +470,13 @@ struct phy_device {
struct mutex lock;
+ struct phylink *phylink;
struct net_device *attached_dev;
u8 mdix;
u8 mdix_ctrl;
+ void (*phy_link_change)(struct phy_device *, bool up, bool do_carrier);
void (*adjust_link)(struct net_device *dev);
};
#define to_phy_device(d) container_of(to_mdio_device(d), \
@@ -667,6 +670,24 @@ struct phy_fixup {
int (*run)(struct phy_device *phydev);
};
+const char *phy_speed_to_str(int speed);
+const char *phy_duplex_to_str(unsigned int duplex);
+
+/* A structure for mapping a particular speed and duplex
+ * combination to a particular SUPPORTED and ADVERTISED value
+ */
+struct phy_setting {
+ u32 speed;
+ u8 duplex;
+ u8 bit;
+};
+
+const struct phy_setting *
+phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
+ size_t maxbit, bool exact);
+size_t phy_speeds(unsigned int *speeds, size_t size,
+ unsigned long *mask, size_t maxbit);
+
/**
* phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY.
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 78bb0d7f6b11..e694d4008c4a 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -27,6 +27,8 @@ enum phy_mode {
PHY_MODE_USB_HOST,
PHY_MODE_USB_DEVICE,
PHY_MODE_USB_OTG,
+ PHY_MODE_SGMII,
+ PHY_MODE_10GKR,
};
/**
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
new file mode 100644
index 000000000000..af67edd4ae38
--- /dev/null
+++ b/include/linux/phylink.h
@@ -0,0 +1,148 @@
+#ifndef NETDEV_PCS_H
+#define NETDEV_PCS_H
+
+#include <linux/phy.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+struct device_node;
+struct ethtool_cmd;
+struct net_device;
+
+enum {
+ MLO_PAUSE_NONE,
+ MLO_PAUSE_ASYM = BIT(0),
+ MLO_PAUSE_SYM = BIT(1),
+ MLO_PAUSE_RX = BIT(2),
+ MLO_PAUSE_TX = BIT(3),
+ MLO_PAUSE_TXRX_MASK = MLO_PAUSE_TX | MLO_PAUSE_RX,
+ MLO_PAUSE_AN = BIT(4),
+
+ MLO_AN_PHY = 0, /* Conventional PHY */
+ MLO_AN_FIXED, /* Fixed-link mode */
+ MLO_AN_SGMII, /* Cisco SGMII protocol */
+ MLO_AN_8023Z, /* 1000base-X protocol */
+};
+
+static inline bool phylink_autoneg_inband(unsigned int mode)
+{
+ return mode == MLO_AN_SGMII || mode == MLO_AN_8023Z;
+}
+
+struct phylink_link_state {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
+ phy_interface_t interface; /* PHY_INTERFACE_xxx */
+ int speed;
+ int duplex;
+ int pause;
+ unsigned int link:1;
+ unsigned int an_enabled:1;
+ unsigned int an_complete:1;
+};
+
+struct phylink_mac_ops {
+ /**
+ * validate: validate and update the link configuration
+ * @ndev: net_device structure associated with MAC
+ * @config: configuration to validate
+ *
+ * Update the %config->supported and %config->advertised masks
+ * clearing bits that can not be supported.
+ *
+ * Note: the PHY may be able to transform from one connection
+ * technology to another, so, eg, don't clear 1000BaseX just
+ * because the MAC is unable to support it. This is more about
+ * clearing unsupported speeds and duplex settings.
+ *
+ * If the %config->interface mode is %PHY_INTERFACE_MODE_1000BASEX
+ * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
+ * based on %config->advertised and/or %config->speed.
+ */
+ void (*validate)(struct net_device *ndev, unsigned long *supported,
+ struct phylink_link_state *state);
+
+ /* Read the current link state from the hardware */
+ int (*mac_link_state)(struct net_device *, struct phylink_link_state *);
+
+ /* Configure the MAC */
+ /**
+ * mac_config: configure the MAC for the selected mode and state
+ * @ndev: net_device structure for the MAC
+ * @mode: one of MLO_AN_FIXED, MLO_AN_PHY, MLO_AN_8023Z, MLO_AN_SGMII
+ * @state: state structure
+ *
+ * The action performed depends on the currently selected mode:
+ *
+ * %MLO_AN_FIXED, %MLO_AN_PHY:
+ * set the specified speed, duplex, pause mode, and phy interface
+ * mode in the provided @state.
+ * %MLO_AN_8023Z:
+ * place the link in 1000base-X mode, advertising the parameters
+ * given in advertising in @state.
+ * %MLO_AN_SGMII:
+ * place the link in Cisco SGMII mode - there is no advertisment
+ * to make as the PHY communicates the speed and duplex to the
+ * MAC over the in-band control word. Configuration of the pause
+ * mode is as per MLO_AN_PHY since this is not included.
+ */
+ void (*mac_config)(struct net_device *ndev, unsigned int mode,
+ const struct phylink_link_state *state);
+
+ /**
+ * mac_an_restart: restart 802.3z BaseX autonegotiation
+ * @ndev: net_device structure for the MAC
+ */
+ void (*mac_an_restart)(struct net_device *ndev);
+
+ void (*mac_link_down)(struct net_device *, unsigned int mode);
+ void (*mac_link_up)(struct net_device *, unsigned int mode,
+ struct phy_device *);
+};
+
+struct phylink *phylink_create(struct net_device *, struct device_node *,
+ phy_interface_t iface, const struct phylink_mac_ops *ops);
+void phylink_destroy(struct phylink *);
+
+int phylink_connect_phy(struct phylink *, struct phy_device *);
+int phylink_of_phy_connect(struct phylink *, struct device_node *);
+void phylink_disconnect_phy(struct phylink *);
+
+void phylink_mac_change(struct phylink *, bool up);
+
+void phylink_start(struct phylink *);
+void phylink_stop(struct phylink *);
+
+void phylink_ethtool_get_wol(struct phylink *, struct ethtool_wolinfo *);
+int phylink_ethtool_set_wol(struct phylink *, struct ethtool_wolinfo *);
+
+int phylink_ethtool_ksettings_get(struct phylink *,
+ struct ethtool_link_ksettings *);
+int phylink_ethtool_ksettings_set(struct phylink *,
+ const struct ethtool_link_ksettings *);
+int phylink_ethtool_nway_reset(struct phylink *);
+void phylink_ethtool_get_pauseparam(struct phylink *,
+ struct ethtool_pauseparam *);
+int phylink_ethtool_set_pauseparam(struct phylink *,
+ struct ethtool_pauseparam *);
+int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *);
+int phylink_ethtool_get_module_eeprom(struct phylink *,
+ struct ethtool_eeprom *, u8 *);
+int phylink_init_eee(struct phylink *, bool);
+int phylink_get_eee_err(struct phylink *);
+int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
+int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
+int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
+
+#define phylink_zero(bm) \
+ bitmap_zero(bm, __ETHTOOL_LINK_MODE_MASK_NBITS)
+#define __phylink_do_bit(op, bm, mode) \
+ op(ETHTOOL_LINK_MODE_ ## mode ## _BIT, bm)
+
+#define phylink_set(bm, mode) __phylink_do_bit(__set_bit, bm, mode)
+#define phylink_clear(bm, mode) __phylink_do_bit(__clear_bit, bm, mode)
+#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode)
+
+void phylink_set_port_modes(unsigned long *bits);
+
+#endif
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index e5b1716f98cc..7fa5d87190c2 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -152,12 +152,12 @@ struct pinctrl_map {
#ifdef CONFIG_PINCTRL
-extern int pinctrl_register_mappings(struct pinctrl_map const *map,
+extern int pinctrl_register_mappings(const struct pinctrl_map *map,
unsigned num_maps);
extern void pinctrl_provide_dummies(void);
#else
-static inline int pinctrl_register_mappings(struct pinctrl_map const *map,
+static inline int pinctrl_register_mappings(const struct pinctrl_map *map,
unsigned num_maps)
{
return 0;
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index e91d1b6a260d..5d8bc7f21c2a 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -86,6 +86,7 @@
* @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
* supplies, the argument to this parameter (on a custom format) tells
* the driver which alternative power source to use.
+ * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state.
* @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
* this parameter (on a custom format) tells the driver which alternative
* slew rate to use.
@@ -114,6 +115,7 @@ enum pin_config_param {
PIN_CONFIG_OUTPUT_ENABLE,
PIN_CONFIG_OUTPUT,
PIN_CONFIG_POWER_SOURCE,
+ PIN_CONFIG_SLEEP_HARDWARE_STATE,
PIN_CONFIG_SLEW_RATE,
PIN_CONFIG_END = 0x7F,
PIN_CONFIG_MAX = 0xFF,
diff --git a/include/linux/platform_data/gpio_backlight.h b/include/linux/platform_data/gpio_backlight.h
index 5ae0d9c80d4d..683d90453c41 100644
--- a/include/linux/platform_data/gpio_backlight.h
+++ b/include/linux/platform_data/gpio_backlight.h
@@ -14,7 +14,6 @@ struct gpio_backlight_platform_data {
struct device *fbdev;
int gpio;
int def_value;
- bool active_low;
const char *name;
};
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
index 0ff1e0dba720..73d9098ada2d 100644
--- a/include/linux/platform_data/hsmmc-omap.h
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -67,6 +67,9 @@ struct omap_hsmmc_platform_data {
#define HSMMC_HAS_HSPE_SUPPORT (1 << 2)
unsigned features;
+ /* string specifying a particular variant of hardware */
+ char *version;
+
int gpio_cd; /* gpio (card detect) */
int gpio_cod; /* gpio (cover detect) */
int gpio_wp; /* gpio (write protect) */
diff --git a/include/linux/platform_data/mdio-bcm-unimac.h b/include/linux/platform_data/mdio-bcm-unimac.h
new file mode 100644
index 000000000000..8a5f9f0b2c52
--- /dev/null
+++ b/include/linux/platform_data/mdio-bcm-unimac.h
@@ -0,0 +1,13 @@
+#ifndef __MDIO_BCM_UNIMAC_PDATA_H
+#define __MDIO_BCM_UNIMAC_PDATA_H
+
+struct unimac_mdio_pdata {
+ u32 phy_mask;
+ int (*wait_func)(void *data);
+ void *wait_func_data;
+ const char *bus_name;
+};
+
+#define UNIMAC_MDIO_DRV_NAME "unimac-mdio"
+
+#endif /* __MDIO_BCM_UNIMAC_PDATA_H */
diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h
index 1cf555aef896..f1a2cf655bdb 100644
--- a/include/linux/platform_data/mtd-davinci.h
+++ b/include/linux/platform_data/mtd-davinci.h
@@ -28,7 +28,7 @@
#ifndef __ARCH_ARM_DAVINCI_NAND_H
#define __ARCH_ARM_DAVINCI_NAND_H
-#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
#define NANDFCR_OFFSET 0x60
#define NANDFSR_OFFSET 0x64
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
index f01659026b26..f8c553f92655 100644
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ b/include/linux/platform_data/mtd-nand-s3c2410.h
@@ -12,7 +12,7 @@
#ifndef __MTD_NAND_S3C2410_H
#define __MTD_NAND_S3C2410_H
-#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
/**
* struct s3c2410_nand_set - define a set of one or more nand chips
diff --git a/include/linux/platform_data/omap_drm.h b/include/linux/platform_data/omap_drm.h
deleted file mode 100644
index f4e4a237ebd2..000000000000
--- a/include/linux/platform_data/omap_drm.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * DRM/KMS platform data for TI OMAP platforms
- *
- * Copyright (C) 2012 Texas Instruments
- * Author: Rob Clark <rob.clark@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __PLATFORM_DATA_OMAP_DRM_H__
-#define __PLATFORM_DATA_OMAP_DRM_H__
-
-/*
- * Optional platform data to configure the default configuration of which
- * pipes/overlays/CRTCs are used.. if this is not provided, then instead the
- * first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to
- * one manager, with priority given to managers that are connected to
- * detected devices. Remaining overlays are used as video planes. This
- * should be a good default behavior for most cases, but yet there still
- * might be times when you wish to do something different.
- */
-struct omap_kms_platform_data {
- /* overlays to use as CRTCs: */
- int ovl_cnt;
- const int *ovl_ids;
-
- /* overlays to use as video planes: */
- int pln_cnt;
- const int *pln_ids;
-
- int mgr_cnt;
- const int *mgr_ids;
-
- int dev_cnt;
- const char **dev_names;
-};
-
-struct omap_drm_platform_data {
- uint32_t omaprev;
- struct omap_kms_platform_data *kms_pdata;
-};
-
-#endif /* __PLATFORM_DATA_OMAP_DRM_H__ */
diff --git a/include/linux/i2c/pca954x.h b/include/linux/platform_data/pca954x.h
index 1712677d5904..1712677d5904 100644
--- a/include/linux/i2c/pca954x.h
+++ b/include/linux/platform_data/pca954x.h
diff --git a/include/linux/i2c/tc35876x.h b/include/linux/platform_data/tc35876x.h
index cd6a51c71e7e..cd6a51c71e7e 100644
--- a/include/linux/i2c/tc35876x.h
+++ b/include/linux/platform_data/tc35876x.h
diff --git a/include/linux/platform_data/x86/apple.h b/include/linux/platform_data/x86/apple.h
new file mode 100644
index 000000000000..079e816c3c21
--- /dev/null
+++ b/include/linux/platform_data/x86/apple.h
@@ -0,0 +1,13 @@
+#ifndef PLATFORM_DATA_X86_APPLE_H
+#define PLATFORM_DATA_X86_APPLE_H
+
+#ifdef CONFIG_X86
+/**
+ * x86_apple_machine - whether the machine is an x86 Apple Macintosh
+ */
+extern bool x86_apple_machine;
+#else
+#define x86_apple_machine false
+#endif
+
+#endif
diff --git a/include/linux/i2c/mlxcpld.h b/include/linux/platform_data/x86/mlxcpld.h
index b08dcb183fca..b08dcb183fca 100644
--- a/include/linux/i2c/mlxcpld.h
+++ b/include/linux/platform_data/x86/mlxcpld.h
diff --git a/include/linux/pm.h b/include/linux/pm.h
index b8b4df09fd8f..47ded8aa8a5d 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -689,6 +689,8 @@ struct dev_pm_domain {
extern void device_pm_lock(void);
extern void dpm_resume_start(pm_message_t state);
extern void dpm_resume_end(pm_message_t state);
+extern void dpm_noirq_resume_devices(pm_message_t state);
+extern void dpm_noirq_end(void);
extern void dpm_resume_noirq(pm_message_t state);
extern void dpm_resume_early(pm_message_t state);
extern void dpm_resume(pm_message_t state);
@@ -697,6 +699,8 @@ extern void dpm_complete(pm_message_t state);
extern void device_pm_unlock(void);
extern int dpm_suspend_end(pm_message_t state);
extern int dpm_suspend_start(pm_message_t state);
+extern void dpm_noirq_begin(void);
+extern int dpm_noirq_suspend_devices(pm_message_t state);
extern int dpm_suspend_noirq(pm_message_t state);
extern int dpm_suspend_late(pm_message_t state);
extern int dpm_suspend(pm_message_t state);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 41004d97cefa..84f423d5633e 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -43,6 +43,7 @@ struct genpd_power_state {
s64 power_on_latency_ns;
s64 residency_ns;
struct fwnode_handle *fwnode;
+ ktime_t idle_time;
};
struct genpd_lock_ops;
@@ -78,6 +79,8 @@ struct generic_pm_domain {
unsigned int state_count; /* number of states */
unsigned int state_idx; /* state that genpd will go to when off */
void *free; /* Free the state that was allocated for default */
+ ktime_t on_time;
+ ktime_t accounting_time;
const struct genpd_lock_ops *lock_ops;
union {
struct mutex mlock;
diff --git a/include/linux/power/bq24190_charger.h b/include/linux/power/bq24190_charger.h
new file mode 100644
index 000000000000..45ce7f116a91
--- /dev/null
+++ b/include/linux/power/bq24190_charger.h
@@ -0,0 +1,18 @@
+/*
+ * Platform data for the TI bq24190 battery charger driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _BQ24190_CHARGER_H_
+#define _BQ24190_CHARGER_H_
+
+#include <linux/regulator/machine.h>
+
+struct bq24190_platform_data {
+ const struct regulator_init_data *regulator_init_data;
+};
+
+#endif
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 11e11685dd1d..43194e02c1ee 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -6,6 +6,7 @@ enum bq27xxx_chip {
BQ27010, /* bq27010, bq27210 */
BQ2750X, /* bq27500 deprecated alias */
BQ2751X, /* bq27510, bq27520 deprecated alias */
+ BQ2752X,
BQ27500, /* bq27500/1 */
BQ27510G1, /* bq27510G1 */
BQ27510G2, /* bq27510G2 */
@@ -15,26 +16,16 @@ enum bq27xxx_chip {
BQ27520G3, /* bq27520G3 */
BQ27520G4, /* bq27520G4 */
BQ27530, /* bq27530, bq27531 */
+ BQ27531,
BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
+ BQ27542,
+ BQ27546,
+ BQ27742,
BQ27545, /* bq27545 */
BQ27421, /* bq27421, bq27425, bq27441, bq27621 */
-};
-
-/**
- * struct bq27xxx_plaform_data - Platform data for bq27xxx devices
- * @name: Name of the battery.
- * @chip: Chip class number of this device.
- * @read: HDQ read callback.
- * This function should provide access to the HDQ bus the battery is
- * connected to.
- * The first parameter is a pointer to the battery device, the second the
- * register to be read. The return value should either be the content of
- * the passed register or an error value.
- */
-struct bq27xxx_platform_data {
- const char *name;
- enum bq27xxx_chip chip;
- int (*read)(struct device *dev, unsigned int);
+ BQ27425,
+ BQ27441,
+ BQ27621,
};
struct bq27xxx_device_info;
@@ -63,7 +54,7 @@ struct bq27xxx_device_info {
struct device *dev;
int id;
enum bq27xxx_chip chip;
- bool ram_chip;
+ u32 opts;
const char *name;
struct bq27xxx_dm_reg *dm_regs;
u32 unseal_key;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index de89066b72b1..79e90b3d3288 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -332,6 +332,8 @@ extern int power_supply_get_battery_info(struct power_supply *psy,
struct power_supply_battery_info *info);
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
+extern int power_supply_set_input_current_limit_from_supplier(
+ struct power_supply *psy);
extern int power_supply_set_battery_charged(struct power_supply *psy);
#ifdef CONFIG_POWER_SUPPLY
diff --git a/include/linux/pps-gpio.h b/include/linux/pps-gpio.h
index 0035abe41b9a..56f35dd3d01d 100644
--- a/include/linux/pps-gpio.h
+++ b/include/linux/pps-gpio.h
@@ -29,4 +29,4 @@ struct pps_gpio_platform_data {
const char *gpio_label;
};
-#endif
+#endif /* _PPS_GPIO_H */
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index 35ac903956c7..80a980cc8d95 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -22,7 +22,6 @@
#define LINUX_PPS_KERNEL_H
#include <linux/pps.h>
-
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/time.h>
@@ -35,9 +34,9 @@ struct pps_device;
/* The specific PPS source info */
struct pps_source_info {
- char name[PPS_MAX_NAME_LEN]; /* simbolic name */
+ char name[PPS_MAX_NAME_LEN]; /* symbolic name */
char path[PPS_MAX_NAME_LEN]; /* path of connected device */
- int mode; /* PPS's allowed mode */
+ int mode; /* PPS allowed mode */
void (*echo)(struct pps_device *pps,
int event, void *data); /* PPS echo function */
@@ -57,10 +56,10 @@ struct pps_event_time {
struct pps_device {
struct pps_source_info info; /* PSS source info */
- struct pps_kparams params; /* PPS's current params */
+ struct pps_kparams params; /* PPS current params */
- __u32 assert_sequence; /* PPS' assert event seq # */
- __u32 clear_sequence; /* PPS' clear event seq # */
+ __u32 assert_sequence; /* PPS assert event seq # */
+ __u32 clear_sequence; /* PPS clear event seq # */
struct pps_ktime assert_tu;
struct pps_ktime clear_tu;
int current_mode; /* PPS mode at event time */
@@ -69,7 +68,7 @@ struct pps_device {
wait_queue_head_t queue; /* PPS event queue */
unsigned int id; /* PPS source unique ID */
- void const *lookup_cookie; /* pps_lookup_dev only */
+ void const *lookup_cookie; /* For pps_lookup_dev() only */
struct cdev cdev;
struct device *dev;
struct fasync_struct *async_queue; /* fasync method */
@@ -101,7 +100,7 @@ extern struct pps_device *pps_register_source(
extern void pps_unregister_source(struct pps_device *pps);
extern void pps_event(struct pps_device *pps,
struct pps_event_time *ts, int event, void *data);
-/* Look up a pps device by magic cookie */
+/* Look up a pps_device by magic cookie */
struct pps_device *pps_lookup_dev(void const *cookie);
static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
@@ -132,4 +131,3 @@ static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta
}
#endif /* LINUX_PPS_KERNEL_H */
-
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 2d2bf592d9db..76124dd4e36d 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -28,13 +28,7 @@ extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
const struct file_operations *,
void *);
-static inline struct proc_dir_entry *proc_create(
- const char *name, umode_t mode, struct proc_dir_entry *parent,
- const struct file_operations *proc_fops)
-{
- return proc_create_data(name, mode, parent, proc_fops, NULL);
-}
-
+struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct file_operations *proc_fops);
extern void proc_set_size(struct proc_dir_entry *, loff_t);
extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
extern void *PDE_DATA(const struct inode *);
diff --git a/include/linux/property.h b/include/linux/property.h
index 7e77039e6b81..6bebee13c5e0 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -51,46 +51,52 @@ int device_property_read_string(struct device *dev, const char *propname,
int device_property_match_string(struct device *dev,
const char *propname, const char *string);
-bool fwnode_device_is_available(struct fwnode_handle *fwnode);
-bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname);
-int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
+bool fwnode_device_is_available(const struct fwnode_handle *fwnode);
+bool fwnode_property_present(const struct fwnode_handle *fwnode,
+ const char *propname);
+int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode,
const char *propname, u8 *val,
size_t nval);
-int fwnode_property_read_u16_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode,
const char *propname, u16 *val,
size_t nval);
-int fwnode_property_read_u32_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode,
const char *propname, u32 *val,
size_t nval);
-int fwnode_property_read_u64_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode,
const char *propname, u64 *val,
size_t nval);
-int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
const char *propname, const char **val,
size_t nval);
-int fwnode_property_read_string(struct fwnode_handle *fwnode,
+int fwnode_property_read_string(const struct fwnode_handle *fwnode,
const char *propname, const char **val);
-int fwnode_property_match_string(struct fwnode_handle *fwnode,
+int fwnode_property_match_string(const struct fwnode_handle *fwnode,
const char *propname, const char *string);
+int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *prop, const char *nargs_prop,
+ unsigned int nargs, unsigned int index,
+ struct fwnode_reference_args *args);
-struct fwnode_handle *fwnode_get_parent(struct fwnode_handle *fwnode);
-struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode);
-struct fwnode_handle *fwnode_get_next_child_node(struct fwnode_handle *fwnode,
- struct fwnode_handle *child);
+struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_get_next_parent(
+ struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_get_next_child_node(
+ const struct fwnode_handle *fwnode, struct fwnode_handle *child);
#define fwnode_for_each_child_node(fwnode, child) \
for (child = fwnode_get_next_child_node(fwnode, NULL); child; \
child = fwnode_get_next_child_node(fwnode, child))
-struct fwnode_handle *device_get_next_child_node(struct device *dev,
- struct fwnode_handle *child);
+struct fwnode_handle *device_get_next_child_node(
+ struct device *dev, struct fwnode_handle *child);
#define device_for_each_child_node(dev, child) \
for (child = device_get_next_child_node(dev, NULL); child; \
child = device_get_next_child_node(dev, child))
-struct fwnode_handle *fwnode_get_named_child_node(struct fwnode_handle *fwnode,
- const char *childname);
+struct fwnode_handle *fwnode_get_named_child_node(
+ const struct fwnode_handle *fwnode, const char *childname);
struct fwnode_handle *device_get_named_child_node(struct device *dev,
const char *childname);
@@ -129,31 +135,31 @@ static inline int device_property_read_u64(struct device *dev,
return device_property_read_u64_array(dev, propname, val, 1);
}
-static inline bool fwnode_property_read_bool(struct fwnode_handle *fwnode,
+static inline bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
const char *propname)
{
return fwnode_property_present(fwnode, propname);
}
-static inline int fwnode_property_read_u8(struct fwnode_handle *fwnode,
+static inline int fwnode_property_read_u8(const struct fwnode_handle *fwnode,
const char *propname, u8 *val)
{
return fwnode_property_read_u8_array(fwnode, propname, val, 1);
}
-static inline int fwnode_property_read_u16(struct fwnode_handle *fwnode,
+static inline int fwnode_property_read_u16(const struct fwnode_handle *fwnode,
const char *propname, u16 *val)
{
return fwnode_property_read_u16_array(fwnode, propname, val, 1);
}
-static inline int fwnode_property_read_u32(struct fwnode_handle *fwnode,
+static inline int fwnode_property_read_u32(const struct fwnode_handle *fwnode,
const char *propname, u32 *val)
{
return fwnode_property_read_u32_array(fwnode, propname, val, 1);
}
-static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode,
+static inline int fwnode_property_read_u64(const struct fwnode_handle *fwnode,
const char *propname, u64 *val)
{
return fwnode_property_read_u64_array(fwnode, propname, val, 1);
@@ -274,19 +280,20 @@ int device_get_phy_mode(struct device *dev);
void *device_get_mac_address(struct device *dev, char *addr, int alen);
struct fwnode_handle *fwnode_graph_get_next_endpoint(
- struct fwnode_handle *fwnode, struct fwnode_handle *prev);
+ const struct fwnode_handle *fwnode, struct fwnode_handle *prev);
struct fwnode_handle *
-fwnode_graph_get_port_parent(struct fwnode_handle *fwnode);
+fwnode_graph_get_port_parent(const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_graph_get_remote_port_parent(
- struct fwnode_handle *fwnode);
+ const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_graph_get_remote_port(
- struct fwnode_handle *fwnode);
+ const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_graph_get_remote_endpoint(
- struct fwnode_handle *fwnode);
-struct fwnode_handle *fwnode_graph_get_remote_node(struct fwnode_handle *fwnode,
- u32 port, u32 endpoint);
+ const struct fwnode_handle *fwnode);
+struct fwnode_handle *
+fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port,
+ u32 endpoint);
-int fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
+int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 0eef0a2b1901..d60de4a39810 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -323,6 +323,7 @@ struct qed_eth_ops {
int (*configure_arfs_searcher)(struct qed_dev *cdev,
bool en_searcher);
+ int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
};
const struct qed_eth_ops *qed_get_eth_ops(void);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index ef39c7f40ae6..cc646ca97974 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -161,6 +161,18 @@ enum qed_nvm_images {
QED_NVM_IMAGE_FCOE_CFG,
};
+struct qed_link_eee_params {
+ u32 tx_lpi_timer;
+#define QED_EEE_1G_ADV BIT(0)
+#define QED_EEE_10G_ADV BIT(1)
+
+ /* Capabilities are represented using QED_EEE_*_ADV values */
+ u8 adv_caps;
+ u8 lp_adv_caps;
+ bool enable;
+ bool tx_lpi_enable;
+};
+
enum qed_led_mode {
QED_LED_MODE_OFF,
QED_LED_MODE_ON,
@@ -172,8 +184,9 @@ enum qed_led_mode {
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
-#define QED_COALESCE_MAX 0xFF
+#define QED_COALESCE_MAX 0x1FF
#define QED_DEFAULT_RX_USECS 12
+#define QED_DEFAULT_TX_USECS 48
/* forward */
struct qed_dev;
@@ -408,6 +421,7 @@ struct qed_link_params {
#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
+#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
u32 override_flags;
bool autoneg;
u32 adv_speeds;
@@ -422,6 +436,7 @@ struct qed_link_params {
#define QED_LINK_LOOPBACK_EXT BIT(3)
#define QED_LINK_LOOPBACK_MAC BIT(4)
u32 loopback_mode;
+ struct qed_link_eee_params eee;
};
struct qed_link_output {
@@ -437,6 +452,12 @@ struct qed_link_output {
u8 port; /* In PORT defs */
bool autoneg;
u32 pause_config;
+
+ /* EEE - capability & param */
+ bool eee_supported;
+ bool eee_active;
+ u8 sup_caps;
+ struct qed_link_eee_params eee;
};
struct qed_probe_params {
@@ -654,16 +675,6 @@ struct qed_common_ops {
enum qed_nvm_images type, u8 *buf, u16 len);
/**
- * @brief get_coalesce - Get coalesce parameters in usec
- *
- * @param cdev
- * @param rx_coal - Rx coalesce value in usec
- * @param tx_coal - Tx coalesce value in usec
- *
- */
- void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal);
-
-/**
* @brief set_coalesce - Configure Rx coalesce value in usec
*
* @param cdev
@@ -674,8 +685,8 @@ struct qed_common_ops {
*
* @return 0 on success, error otherwise.
*/
- int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
- u16 qid, u16 sb_id);
+ int (*set_coalesce)(struct qed_dev *cdev,
+ u16 rx_coal, u16 tx_coal, void *handle);
/**
* @brief set_led - Configure LED mode
diff --git a/include/linux/quota.h b/include/linux/quota.h
index bfd077ca6ac3..5ac9de4fcd6f 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -223,12 +223,12 @@ struct mem_dqinfo {
struct quota_format_type *dqi_format;
int dqi_fmt_id; /* Id of the dqi_format - used when turning
* quotas on after remount RW */
- struct list_head dqi_dirty_list; /* List of dirty dquots */
- unsigned long dqi_flags;
- unsigned int dqi_bgrace;
- unsigned int dqi_igrace;
- qsize_t dqi_max_spc_limit;
- qsize_t dqi_max_ino_limit;
+ struct list_head dqi_dirty_list; /* List of dirty dquots [dq_list_lock] */
+ unsigned long dqi_flags; /* DFQ_ flags [dq_data_lock] */
+ unsigned int dqi_bgrace; /* Space grace time [dq_data_lock] */
+ unsigned int dqi_igrace; /* Inode grace time [dq_data_lock] */
+ qsize_t dqi_max_spc_limit; /* Maximum space limit [static] */
+ qsize_t dqi_max_ino_limit; /* Maximum inode limit [static] */
void *dqi_priv;
};
@@ -293,18 +293,18 @@ static inline void dqstats_dec(unsigned int type)
* clear them when it sees fit. */
struct dquot {
- struct hlist_node dq_hash; /* Hash list in memory */
- struct list_head dq_inuse; /* List of all quotas */
- struct list_head dq_free; /* Free list element */
- struct list_head dq_dirty; /* List of dirty dquots */
+ struct hlist_node dq_hash; /* Hash list in memory [dq_list_lock] */
+ struct list_head dq_inuse; /* List of all quotas [dq_list_lock] */
+ struct list_head dq_free; /* Free list element [dq_list_lock] */
+ struct list_head dq_dirty; /* List of dirty dquots [dq_list_lock] */
struct mutex dq_lock; /* dquot IO lock */
+ spinlock_t dq_dqb_lock; /* Lock protecting dq_dqb changes */
atomic_t dq_count; /* Use count */
- wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */
struct super_block *dq_sb; /* superblock this applies to */
struct kqid dq_id; /* ID this applies to (uid, gid, projid) */
- loff_t dq_off; /* Offset of dquot on disk */
+ loff_t dq_off; /* Offset of dquot on disk [dq_lock, stable once set] */
unsigned long dq_flags; /* See DQ_* */
- struct mem_dqblk dq_dqb; /* Diskquota usage */
+ struct mem_dqblk dq_dqb; /* Diskquota usage [dq_dqb_lock] */
};
/* Operations which must be implemented by each quota format */
@@ -491,6 +491,9 @@ enum {
*/
#define DQUOT_NEGATIVE_USAGE (1 << (DQUOT_STATE_LAST + 1))
/* Allow negative quota usage */
+/* Do not track dirty dquots in a list */
+#define DQUOT_NOLIST_DIRTY (1 << (DQUOT_STATE_LAST + 2))
+
static inline unsigned int dquot_state_flag(unsigned int flags, int type)
{
return flags << type;
@@ -521,7 +524,7 @@ static inline void quota_send_warning(struct kqid qid, dev_t dev,
struct quota_info {
unsigned int flags; /* Flags for diskquotas on this device */
- struct mutex dqio_mutex; /* lock device while I/O in progress */
+ struct rw_semaphore dqio_sem; /* Lock quota file while I/O in progress */
struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index dda22f45fc1b..0ce6fc49962e 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -38,11 +38,6 @@ void __quota_error(struct super_block *sb, const char *func,
/*
* declaration of quota_function calls in kernel.
*/
-void inode_add_rsv_space(struct inode *inode, qsize_t number);
-void inode_claim_rsv_space(struct inode *inode, qsize_t number);
-void inode_sub_rsv_space(struct inode *inode, qsize_t number);
-void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
-
int dquot_initialize(struct inode *inode);
bool dquot_initialize_needed(struct inode *inode);
void dquot_drop(struct inode *inode);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 3e5735064b71..567ebb5eaab0 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -357,8 +357,25 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index,
unsigned new_order);
int radix_tree_join(struct radix_tree_root *, unsigned long index,
unsigned new_order, void *);
-void __rcu **idr_get_free(struct radix_tree_root *, struct radix_tree_iter *,
- gfp_t, int end);
+
+void __rcu **idr_get_free_cmn(struct radix_tree_root *root,
+ struct radix_tree_iter *iter, gfp_t gfp,
+ unsigned long max);
+static inline void __rcu **idr_get_free(struct radix_tree_root *root,
+ struct radix_tree_iter *iter,
+ gfp_t gfp,
+ int end)
+{
+ return idr_get_free_cmn(root, iter, gfp, end > 0 ? end - 1 : INT_MAX);
+}
+
+static inline void __rcu **idr_get_free_ext(struct radix_tree_root *root,
+ struct radix_tree_iter *iter,
+ gfp_t gfp,
+ unsigned long end)
+{
+ return idr_get_free_cmn(root, iter, gfp, end - 1);
+}
enum {
RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 30f945329818..583cdd3d49ca 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -121,6 +121,7 @@ extern const struct raid6_recov_calls raid6_recov_ssse3;
extern const struct raid6_recov_calls raid6_recov_avx2;
extern const struct raid6_recov_calls raid6_recov_avx512;
extern const struct raid6_recov_calls raid6_recov_s390xc;
+extern const struct raid6_recov_calls raid6_recov_neon;
extern const struct raid6_calls raid6_neonx1;
extern const struct raid6_calls raid6_neonx2;
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index e585018498d5..d574361943ea 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -44,10 +44,25 @@ struct rb_root {
struct rb_node *rb_node;
};
+/*
+ * Leftmost-cached rbtrees.
+ *
+ * We do not cache the rightmost node based on footprint
+ * size vs number of potential users that could benefit
+ * from O(1) rb_last(). Just not worth it, users that want
+ * this feature can always implement the logic explicitly.
+ * Furthermore, users that want to cache both pointers may
+ * find it a bit asymmetric, but that's ok.
+ */
+struct rb_root_cached {
+ struct rb_root rb_root;
+ struct rb_node *rb_leftmost;
+};
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT (struct rb_root) { NULL, }
+#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
@@ -69,6 +84,12 @@ extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
+extern void rb_insert_color_cached(struct rb_node *,
+ struct rb_root_cached *, bool);
+extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *);
+/* Same as rb_first(), but O(1) */
+#define rb_first_cached(root) (root)->rb_leftmost
+
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
extern struct rb_node *rb_next_postorder(const struct rb_node *);
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 9702b6e183bc..6bfd2b581f75 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -41,7 +41,9 @@ struct rb_augment_callbacks {
void (*rotate)(struct rb_node *old, struct rb_node *new);
};
-extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+extern void __rb_insert_augmented(struct rb_node *node,
+ struct rb_root *root,
+ bool newleft, struct rb_node **leftmost,
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
/*
* Fixup the rbtree and update the augmented information when rebalancing.
@@ -57,7 +59,16 @@ static inline void
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- __rb_insert_augmented(node, root, augment->rotate);
+ __rb_insert_augmented(node, root, false, NULL, augment->rotate);
+}
+
+static inline void
+rb_insert_augmented_cached(struct rb_node *node,
+ struct rb_root_cached *root, bool newleft,
+ const struct rb_augment_callbacks *augment)
+{
+ __rb_insert_augmented(node, &root->rb_root,
+ newleft, &root->rb_leftmost, augment->rotate);
}
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
@@ -150,6 +161,7 @@ extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
static __always_inline struct rb_node *
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+ struct rb_node **leftmost,
const struct rb_augment_callbacks *augment)
{
struct rb_node *child = node->rb_right;
@@ -157,6 +169,9 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
struct rb_node *parent, *rebalance;
unsigned long pc;
+ if (leftmost && node == *leftmost)
+ *leftmost = rb_next(node);
+
if (!tmp) {
/*
* Case 1: node to erase has no more than 1 child (easy!)
@@ -256,9 +271,21 @@ static __always_inline void
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
- struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
+ struct rb_node *rebalance = __rb_erase_augmented(node, root,
+ NULL, augment);
if (rebalance)
__rb_erase_color(rebalance, root, augment->rotate);
}
+static __always_inline void
+rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
+ const struct rb_augment_callbacks *augment)
+{
+ struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root,
+ &root->rb_leftmost,
+ augment);
+ if (rebalance)
+ __rb_erase_color(rebalance, &root->rb_root, augment->rotate);
+}
+
#endif /* _LINUX_RBTREE_AUGMENTED_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index f816fc72b51e..de50d8a4cf41 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -58,8 +58,6 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
void synchronize_sched(void);
-void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
-void synchronize_rcu_tasks(void);
void rcu_barrier_tasks(void);
#ifdef CONFIG_PREEMPT_RCU
@@ -105,11 +103,13 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */
void rcu_init(void);
+extern int rcu_scheduler_active __read_mostly;
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu);
void rcu_cpu_starting(unsigned int cpu);
+void rcutree_migrate_callbacks(int cpu);
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
@@ -164,8 +164,6 @@ static inline void rcu_init_nohz(void) { }
* macro rather than an inline function to avoid #include hell.
*/
#ifdef CONFIG_TASKS_RCU
-#define TASKS_RCU(x) x
-extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch_lite(t) \
do { \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
@@ -176,10 +174,17 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
rcu_all_qs(); \
rcu_note_voluntary_context_switch_lite(t); \
} while (0)
+void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
+void synchronize_rcu_tasks(void);
+void exit_tasks_rcu_start(void);
+void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU */
-#define TASKS_RCU(x) do { } while (0)
#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
+#define call_rcu_tasks call_rcu_sched
+#define synchronize_rcu_tasks synchronize_sched
+static inline void exit_tasks_rcu_start(void) { }
+static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU */
/**
@@ -408,6 +413,22 @@ static inline void rcu_preempt_sleep_check(void) { }
})
/**
+ * rcu_swap_protected() - swap an RCU and a regular pointer
+ * @rcu_ptr: RCU pointer
+ * @ptr: regular pointer
+ * @c: the conditions under which the dereference will take place
+ *
+ * Perform swap(@rcu_ptr, @ptr) where @rcu_ptr is an RCU-annotated pointer and
+ * @c is the argument that is passed to the rcu_dereference_protected() call
+ * used to read that pointer.
+ */
+#define rcu_swap_protected(rcu_ptr, ptr, c) do { \
+ typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \
+ rcu_assign_pointer((rcu_ptr), (ptr)); \
+ (ptr) = __tmp; \
+} while (0)
+
+/**
* rcu_access_pointer() - fetch RCU pointer with no dereferencing
* @p: The pointer to read
*
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 5becbbccb998..b3dbf9502fd0 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -116,13 +116,11 @@ static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
static inline void exit_rcu(void) { }
-
-#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
-extern int rcu_scheduler_active __read_mostly;
+#ifdef CONFIG_SRCU
void rcu_scheduler_starting(void);
-#else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
+#else /* #ifndef CONFIG_SRCU */
static inline void rcu_scheduler_starting(void) { }
-#endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
+#endif /* #else #ifndef CONFIG_SRCU */
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_is_watching(void) { return true; }
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 591792c8e5b0..48b7c9c68c4d 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -53,6 +53,9 @@ extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
extern __must_check bool refcount_dec_and_test(refcount_t *r);
extern void refcount_dec(refcount_t *r);
#else
+# ifdef CONFIG_ARCH_HAS_REFCOUNT
+# include <asm/refcount.h>
+# else
static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r)
{
return atomic_add_unless(&r->refs, i, 0);
@@ -87,6 +90,7 @@ static inline void refcount_dec(refcount_t *r)
{
atomic_dec(&r->refs);
}
+# endif /* !CONFIG_ARCH_HAS_REFCOUNT */
#endif /* CONFIG_REFCOUNT_FULL */
extern __must_check bool refcount_dec_if_one(refcount_t *r);
diff --git a/include/linux/regulator/mt6380-regulator.h b/include/linux/regulator/mt6380-regulator.h
new file mode 100644
index 000000000000..465182da6315
--- /dev/null
+++ b/include/linux/regulator/mt6380-regulator.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Chenglin Xu <chenglin.xu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_REGULATOR_mt6380_H
+#define __LINUX_REGULATOR_mt6380_H
+
+enum {
+ MT6380_ID_VCPU = 0,
+ MT6380_ID_VCORE,
+ MT6380_ID_VRF,
+ MT6380_ID_VMLDO,
+ MT6380_ID_VALDO,
+ MT6380_ID_VPHYLDO,
+ MT6380_ID_VDDRLDO,
+ MT6380_ID_VTLDO,
+ MT6380_ID_RG_MAX,
+};
+
+#define MT6380_MAX_REGULATOR MT6380_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_mt6380_H */
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 81da49564ff4..44e630eb3d94 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -510,6 +510,8 @@ struct rproc_vdev {
};
struct rproc *rproc_get_by_phandle(phandle phandle);
+struct rproc *rproc_get_by_child(struct device *dev);
+
struct rproc *rproc_alloc(struct device *dev, const char *name,
const struct rproc_ops *ops,
const char *firmware, int len);
diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h
new file mode 100644
index 000000000000..fa8e38681b4b
--- /dev/null
+++ b/include/linux/remoteproc/qcom_rproc.h
@@ -0,0 +1,22 @@
+#ifndef __QCOM_RPROC_H__
+#define __QCOM_RPROC_H__
+
+struct notifier_block;
+
+#if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON)
+
+int qcom_register_ssr_notifier(struct notifier_block *nb);
+void qcom_unregister_ssr_notifier(struct notifier_block *nb);
+
+#else
+
+static inline int qcom_register_ssr_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline void qcom_unregister_ssr_notifier(struct notifier_block *nb) {}
+
+#endif
+
+#endif
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 156cfd330b66..21fc84d82d41 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -254,6 +254,9 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
unsigned *pshared_count,
struct dma_fence ***pshared);
+int reservation_object_copy_fences(struct reservation_object *dst,
+ struct reservation_object *src);
+
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
unsigned long timeout);
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 13d8681210d5..56463f37f3e6 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -25,6 +25,11 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
int __must_check device_reset(struct device *dev);
+struct reset_control *devm_reset_control_array_get(struct device *dev,
+ bool shared, bool optional);
+struct reset_control *of_reset_control_array_get(struct device_node *np,
+ bool shared, bool optional);
+
static inline int device_reset_optional(struct device *dev)
{
return device_reset(dev);
@@ -89,6 +94,18 @@ static inline struct reset_control *__devm_reset_control_get(
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
+static inline struct reset_control *
+devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
+{
+ return optional ? NULL : ERR_PTR(-ENOTSUPP);
+}
+
+static inline struct reset_control *
+of_reset_control_array_get(struct device_node *np, bool shared, bool optional)
+{
+ return optional ? NULL : ERR_PTR(-ENOTSUPP);
+}
+
#endif /* CONFIG_RESET_CONTROLLER */
/**
@@ -374,4 +391,55 @@ static inline struct reset_control *devm_reset_control_get_by_index(
{
return devm_reset_control_get_exclusive_by_index(dev, index);
}
+
+/*
+ * APIs to manage a list of reset controllers
+ */
+static inline struct reset_control *
+devm_reset_control_array_get_exclusive(struct device *dev)
+{
+ return devm_reset_control_array_get(dev, false, false);
+}
+
+static inline struct reset_control *
+devm_reset_control_array_get_shared(struct device *dev)
+{
+ return devm_reset_control_array_get(dev, true, false);
+}
+
+static inline struct reset_control *
+devm_reset_control_array_get_optional_exclusive(struct device *dev)
+{
+ return devm_reset_control_array_get(dev, false, true);
+}
+
+static inline struct reset_control *
+devm_reset_control_array_get_optional_shared(struct device *dev)
+{
+ return devm_reset_control_array_get(dev, true, true);
+}
+
+static inline struct reset_control *
+of_reset_control_array_get_exclusive(struct device_node *node)
+{
+ return of_reset_control_array_get(node, false, false);
+}
+
+static inline struct reset_control *
+of_reset_control_array_get_shared(struct device_node *node)
+{
+ return of_reset_control_array_get(node, true, false);
+}
+
+static inline struct reset_control *
+of_reset_control_array_get_optional_exclusive(struct device_node *node)
+{
+ return of_reset_control_array_get(node, false, true);
+}
+
+static inline struct reset_control *
+of_reset_control_array_get_optional_shared(struct device_node *node)
+{
+ return of_reset_control_array_get(node, true, true);
+}
#endif
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 7d56a7ea2b2e..361c08e35dbc 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -127,7 +127,7 @@ struct rhashtable;
* @head_offset: Offset of rhash_head in struct to be hashed
* @max_size: Maximum size while expanding
* @min_size: Minimum size while shrinking
- * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
+ * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
* @automatic_shrinking: Enable automatic shrinking of tables
* @nulls_base: Base value to generate nulls marker
* @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 43ef2c30cb0f..733d3d8181e2 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -55,7 +55,9 @@ struct anon_vma {
* is serialized by a system wide lock only visible to
* mm_take_all_locks() (mm_all_locks_mutex).
*/
- struct rb_root rb_root; /* Interval tree of private "related" vmas */
+
+ /* Interval tree of private "related" vmas */
+ struct rb_root_cached rb_root;
};
/*
@@ -93,8 +95,9 @@ enum ttu_flags {
TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
* and caller guarantees they will
* do a final flush if necessary */
- TTU_RMAP_LOCKED = 0x80 /* do not grab rmap lock:
+ TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
* caller holds it */
+ TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
};
#ifdef CONFIG_MMU
diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h
new file mode 100644
index 000000000000..a622f029836e
--- /dev/null
+++ b/include/linux/rpmsg/qcom_glink.h
@@ -0,0 +1,27 @@
+#ifndef _LINUX_RPMSG_QCOM_GLINK_H
+#define _LINUX_RPMSG_QCOM_GLINK_H
+
+#include <linux/device.h>
+
+struct qcom_glink;
+
+#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SMEM)
+
+struct qcom_glink *qcom_glink_smem_register(struct device *parent,
+ struct device_node *node);
+void qcom_glink_smem_unregister(struct qcom_glink *glink);
+
+#else
+
+static inline struct qcom_glink *
+qcom_glink_smem_register(struct device *parent,
+ struct device_node *node)
+{
+ return NULL;
+}
+
+static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {}
+
+#endif
+
+#endif
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 0a0f0d14a5fb..e6d0f9c1cafd 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -72,8 +72,6 @@ extern struct class *rtc_class;
* issued through ioctl() ...
*/
struct rtc_class_ops {
- int (*open)(struct device *);
- void (*release)(struct device *);
int (*ioctl)(struct device *, unsigned int, unsigned long);
int (*read_time)(struct device *, struct rtc_time *);
int (*set_time)(struct device *, struct rtc_time *);
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 44fd002f7cd5..53fcbe9de7fd 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -22,18 +22,17 @@ extern int max_lock_depth; /* for sysctl */
* The rt_mutex structure
*
* @wait_lock: spinlock to protect the structure
- * @waiters: rbtree root to enqueue waiters in priority order
- * @waiters_leftmost: top waiter
+ * @waiters: rbtree root to enqueue waiters in priority order;
+ * caches top-waiter (leftmost node).
* @owner: the mutex owner
*/
struct rt_mutex {
raw_spinlock_t wait_lock;
- struct rb_root waiters;
- struct rb_node *waiters_leftmost;
+ struct rb_root_cached waiters;
struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
int save_state;
- const char *name, *file;
+ const char *name, *file;
int line;
void *magic;
#endif
@@ -84,7 +83,7 @@ do { \
#define __RT_MUTEX_INITIALIZER(mutexname) \
{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .waiters = RB_ROOT \
+ , .waiters = RB_ROOT_CACHED \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
__DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index ae0528b834cd..e784761a4443 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -32,6 +32,7 @@ struct rw_semaphore {
#define RWSEM_UNLOCKED_VALUE 0x00000000
extern void __down_read(struct rw_semaphore *sem);
+extern int __must_check __down_read_killable(struct rw_semaphore *sem);
extern int __down_read_trylock(struct rw_semaphore *sem);
extern void __down_write(struct rw_semaphore *sem);
extern int __must_check __down_write_killable(struct rw_semaphore *sem);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index dd1d14250340..0ad7318ff299 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -44,6 +44,7 @@ struct rw_semaphore {
};
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h
deleted file mode 100644
index 7343f71783dc..000000000000
--- a/include/linux/rxrpc.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* AF_RXRPC parameters
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_RXRPC_H
-#define _LINUX_RXRPC_H
-
-#include <linux/in.h>
-#include <linux/in6.h>
-
-/*
- * RxRPC socket address
- */
-struct sockaddr_rxrpc {
- sa_family_t srx_family; /* address family */
- u16 srx_service; /* service desired */
- u16 transport_type; /* type of transport socket (SOCK_DGRAM) */
- u16 transport_len; /* length of transport address */
- union {
- sa_family_t family; /* transport address family */
- struct sockaddr_in sin; /* IPv4 transport address */
- struct sockaddr_in6 sin6; /* IPv6 transport address */
- } transport;
-};
-
-/*
- * RxRPC socket options
- */
-#define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */
-#define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */
-#define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */
-#define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */
-#define RXRPC_UPGRADEABLE_SERVICE 5 /* Upgrade service[0] -> service[1] */
-#define RXRPC_SUPPORTED_CMSG 6 /* Get highest supported control message type */
-
-/*
- * RxRPC control messages
- * - If neither abort or accept are specified, the message is a data message.
- * - terminal messages mean that a user call ID tag can be recycled
- * - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg()
- */
-enum rxrpc_cmsg_type {
- RXRPC_USER_CALL_ID = 1, /* sr: user call ID specifier */
- RXRPC_ABORT = 2, /* sr: abort request / notification [terminal] */
- RXRPC_ACK = 3, /* -r: [Service] RPC op final ACK received [terminal] */
- RXRPC_NET_ERROR = 5, /* -r: network error received [terminal] */
- RXRPC_BUSY = 6, /* -r: server busy received [terminal] */
- RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */
- RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */
- RXRPC_ACCEPT = 9, /* s-: [Service] accept request */
- RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
- RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
- RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
- RXRPC__SUPPORTED
-};
-
-/*
- * RxRPC security levels
- */
-#define RXRPC_SECURITY_PLAIN 0 /* plain secure-checksummed packets only */
-#define RXRPC_SECURITY_AUTH 1 /* authenticated packets */
-#define RXRPC_SECURITY_ENCRYPT 2 /* encrypted packets */
-
-/*
- * RxRPC security indices
- */
-#define RXRPC_SECURITY_NONE 0 /* no security protocol */
-#define RXRPC_SECURITY_RXKAD 2 /* kaserver or kerberos 4 */
-#define RXRPC_SECURITY_RXGK 4 /* gssapi-based */
-#define RXRPC_SECURITY_RXK5 5 /* kerberos 5 */
-
-#endif /* _LINUX_RXRPC_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c05ac5f5aa03..26a7df4e558c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -65,25 +65,23 @@ struct task_group;
*/
/* Used in tsk->state: */
-#define TASK_RUNNING 0
-#define TASK_INTERRUPTIBLE 1
-#define TASK_UNINTERRUPTIBLE 2
-#define __TASK_STOPPED 4
-#define __TASK_TRACED 8
+#define TASK_RUNNING 0x0000
+#define TASK_INTERRUPTIBLE 0x0001
+#define TASK_UNINTERRUPTIBLE 0x0002
+#define __TASK_STOPPED 0x0004
+#define __TASK_TRACED 0x0008
/* Used in tsk->exit_state: */
-#define EXIT_DEAD 16
-#define EXIT_ZOMBIE 32
+#define EXIT_DEAD 0x0010
+#define EXIT_ZOMBIE 0x0020
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
/* Used in tsk->state again: */
-#define TASK_DEAD 64
-#define TASK_WAKEKILL 128
-#define TASK_WAKING 256
-#define TASK_PARKED 512
-#define TASK_NOLOAD 1024
-#define TASK_NEW 2048
-#define TASK_STATE_MAX 4096
-
-#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
+#define TASK_PARKED 0x0040
+#define TASK_DEAD 0x0080
+#define TASK_WAKEKILL 0x0100
+#define TASK_WAKING 0x0200
+#define TASK_NOLOAD 0x0400
+#define TASK_NEW 0x0800
+#define TASK_STATE_MAX 0x1000
/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
@@ -99,7 +97,8 @@ struct task_group;
/* get_task_state(): */
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
- __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
+ __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
+ TASK_PARKED)
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
@@ -589,9 +588,10 @@ struct task_struct {
#ifdef CONFIG_TASKS_RCU
unsigned long rcu_tasks_nvcsw;
- bool rcu_tasks_holdout;
- struct list_head rcu_tasks_holdout_list;
+ u8 rcu_tasks_holdout;
+ u8 rcu_tasks_idx;
int rcu_tasks_idle_cpu;
+ struct list_head rcu_tasks_holdout_list;
#endif /* #ifdef CONFIG_TASKS_RCU */
struct sched_info sched_info;
@@ -811,8 +811,7 @@ struct task_struct {
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task: */
- struct rb_root pi_waiters;
- struct rb_node *pi_waiters_leftmost;
+ struct rb_root_cached pi_waiters;
/* Updated under owner's pi_lock and rq lock */
struct task_struct *pi_top_task;
/* Deadlock detection and priority inheritance handling: */
@@ -846,7 +845,17 @@ struct task_struct {
int lockdep_depth;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
- gfp_t lockdep_reclaim_gfp;
+#endif
+
+#ifdef CONFIG_LOCKDEP_CROSSRELEASE
+#define MAX_XHLOCKS_NR 64UL
+ struct hist_lock *xhlocks; /* Crossrelease history locks */
+ unsigned int xhlock_idx;
+ /* For restoring at history boundaries */
+ unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
+ unsigned int hist_id;
+ /* For overwrite check at each context exit */
+ unsigned int hist_id_save[XHLOCK_CTX_NR];
#endif
#ifdef CONFIG_UBSAN
@@ -898,8 +907,9 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list;
#endif
-#ifdef CONFIG_INTEL_RDT_A
- int closid;
+#ifdef CONFIG_INTEL_RDT
+ u32 closid;
+ u32 rmid;
#endif
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
@@ -1232,6 +1242,36 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
+#define TASK_REPORT_IDLE (TASK_REPORT + 1)
+#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
+
+static inline unsigned int __get_task_state(struct task_struct *tsk)
+{
+ unsigned int tsk_state = READ_ONCE(tsk->state);
+ unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
+
+ if (tsk_state == TASK_IDLE)
+ state = TASK_REPORT_IDLE;
+
+ return fls(state);
+}
+
+static inline char __task_state_to_char(unsigned int state)
+{
+ static const char state_char[] = "RSDTtXZPI";
+
+ BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
+
+ return state_char[state];
+}
+
+static inline char task_state_to_char(struct task_struct *tsk)
+{
+ return __task_state_to_char(__get_task_state(tsk));
+}
+
/**
* is_global_init - check if a task structure is init. Since init
* is free to have sub-threads we need to check tgid.
diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h
index e0eaee54c5a4..5d58d49e9f87 100644
--- a/include/linux/sched/debug.h
+++ b/include/linux/sched/debug.h
@@ -6,6 +6,7 @@
*/
struct task_struct;
+struct pid_namespace;
extern void dump_cpu_task(int cpu);
@@ -34,7 +35,8 @@ extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_SCHED_DEBUG
struct seq_file;
-extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
+extern void proc_sched_show_task(struct task_struct *p,
+ struct pid_namespace *ns, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
#endif
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 2b24a6974847..ae53e413fb13 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -88,7 +88,7 @@ extern void mmput(struct mm_struct *);
/* same as above but performs the slow path from the async context. Can
* be called from the atomic context as well
*/
-extern void mmput_async(struct mm_struct *);
+void mmput_async(struct mm_struct *);
#endif
/* Grab a reference to a task's mm, if it is not already going away */
@@ -167,6 +167,14 @@ static inline gfp_t current_gfp_context(gfp_t flags)
return flags;
}
+#ifdef CONFIG_LOCKDEP
+extern void fs_reclaim_acquire(gfp_t gfp_mask);
+extern void fs_reclaim_release(gfp_t gfp_mask);
+#else
+static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
+static inline void fs_reclaim_release(gfp_t gfp_mask) { }
+#endif
+
static inline unsigned int memalloc_noio_save(void)
{
unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index c97e5f096927..79a2a744648d 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -30,7 +30,6 @@ extern int lockdep_tasklist_lock_is_held(void);
extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
-extern void init_idle_bootup_task(struct task_struct *idle);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 5d5415e129d4..3c07e4135127 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -36,7 +36,8 @@ struct user_struct {
struct hlist_node uidhash_node;
kuid_t uid;
-#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
+#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \
+ defined(CONFIG_NET)
atomic_long_t locked_vm;
#endif
};
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 99e866487e2f..82b171e1aa0b 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -273,87 +273,85 @@ struct sctp_init_chunk {
/* Section 3.3.2.1. IPv4 Address Parameter (5) */
-typedef struct sctp_ipv4addr_param {
+struct sctp_ipv4addr_param {
struct sctp_paramhdr param_hdr;
- struct in_addr addr;
-} sctp_ipv4addr_param_t;
+ struct in_addr addr;
+};
/* Section 3.3.2.1. IPv6 Address Parameter (6) */
-typedef struct sctp_ipv6addr_param {
+struct sctp_ipv6addr_param {
struct sctp_paramhdr param_hdr;
struct in6_addr addr;
-} sctp_ipv6addr_param_t;
+};
/* Section 3.3.2.1 Cookie Preservative (9) */
-typedef struct sctp_cookie_preserve_param {
+struct sctp_cookie_preserve_param {
struct sctp_paramhdr param_hdr;
- __be32 lifespan_increment;
-} sctp_cookie_preserve_param_t;
+ __be32 lifespan_increment;
+};
/* Section 3.3.2.1 Host Name Address (11) */
-typedef struct sctp_hostname_param {
+struct sctp_hostname_param {
struct sctp_paramhdr param_hdr;
uint8_t hostname[0];
-} sctp_hostname_param_t;
+};
/* Section 3.3.2.1 Supported Address Types (12) */
-typedef struct sctp_supported_addrs_param {
+struct sctp_supported_addrs_param {
struct sctp_paramhdr param_hdr;
__be16 types[0];
-} sctp_supported_addrs_param_t;
-
-/* Appendix A. ECN Capable (32768) */
-typedef struct sctp_ecn_capable_param {
- struct sctp_paramhdr param_hdr;
-} sctp_ecn_capable_param_t;
+};
/* ADDIP Section 3.2.6 Adaptation Layer Indication */
-typedef struct sctp_adaptation_ind_param {
+struct sctp_adaptation_ind_param {
struct sctp_paramhdr param_hdr;
__be32 adaptation_ind;
-} sctp_adaptation_ind_param_t;
+};
/* ADDIP Section 4.2.7 Supported Extensions Parameter */
-typedef struct sctp_supported_ext_param {
+struct sctp_supported_ext_param {
struct sctp_paramhdr param_hdr;
__u8 chunks[0];
-} sctp_supported_ext_param_t;
+};
/* AUTH Section 3.1 Random */
-typedef struct sctp_random_param {
+struct sctp_random_param {
struct sctp_paramhdr param_hdr;
__u8 random_val[0];
-} sctp_random_param_t;
+};
/* AUTH Section 3.2 Chunk List */
-typedef struct sctp_chunks_param {
+struct sctp_chunks_param {
struct sctp_paramhdr param_hdr;
__u8 chunks[0];
-} sctp_chunks_param_t;
+};
/* AUTH Section 3.3 HMAC Algorithm */
-typedef struct sctp_hmac_algo_param {
+struct sctp_hmac_algo_param {
struct sctp_paramhdr param_hdr;
__be16 hmac_ids[0];
-} sctp_hmac_algo_param_t;
+};
/* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
* The INIT ACK chunk is used to acknowledge the initiation of an SCTP
* association.
*/
-typedef struct sctp_init_chunk sctp_initack_chunk_t;
+struct sctp_initack_chunk {
+ struct sctp_chunkhdr chunk_hdr;
+ struct sctp_inithdr init_hdr;
+};
/* Section 3.3.3.1 State Cookie (7) */
-typedef struct sctp_cookie_param {
+struct sctp_cookie_param {
struct sctp_paramhdr p;
__u8 body[0];
-} sctp_cookie_param_t;
+};
/* Section 3.3.3.1 Unrecognized Parameters (8) */
-typedef struct sctp_unrecognized_param {
+struct sctp_unrecognized_param {
struct sctp_paramhdr param_hdr;
struct sctp_paramhdr unrecognized;
-} sctp_unrecognized_param_t;
+};
@@ -365,30 +363,28 @@ typedef struct sctp_unrecognized_param {
* subsequences of DATA chunks as represented by their TSNs.
*/
-typedef struct sctp_gap_ack_block {
+struct sctp_gap_ack_block {
__be16 start;
__be16 end;
-} sctp_gap_ack_block_t;
-
-typedef __be32 sctp_dup_tsn_t;
+};
-typedef union {
- sctp_gap_ack_block_t gab;
- sctp_dup_tsn_t dup;
-} sctp_sack_variable_t;
+union sctp_sack_variable {
+ struct sctp_gap_ack_block gab;
+ __be32 dup;
+};
-typedef struct sctp_sackhdr {
+struct sctp_sackhdr {
__be32 cum_tsn_ack;
__be32 a_rwnd;
__be16 num_gap_ack_blocks;
__be16 num_dup_tsns;
- sctp_sack_variable_t variable[0];
-} sctp_sackhdr_t;
+ union sctp_sack_variable variable[0];
+};
-typedef struct sctp_sack_chunk {
+struct sctp_sack_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_sackhdr_t sack_hdr;
-} sctp_sack_chunk_t;
+ struct sctp_sackhdr sack_hdr;
+};
/* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4):
@@ -398,49 +394,49 @@ typedef struct sctp_sack_chunk {
* the present association.
*/
-typedef struct sctp_heartbeathdr {
+struct sctp_heartbeathdr {
struct sctp_paramhdr info;
-} sctp_heartbeathdr_t;
+};
-typedef struct sctp_heartbeat_chunk {
+struct sctp_heartbeat_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_heartbeathdr_t hb_hdr;
-} sctp_heartbeat_chunk_t;
+ struct sctp_heartbeathdr hb_hdr;
+};
/* For the abort and shutdown ACK we must carry the init tag in the
* common header. Just the common header is all that is needed with a
* chunk descriptor.
*/
-typedef struct sctp_abort_chunk {
+struct sctp_abort_chunk {
struct sctp_chunkhdr uh;
-} sctp_abort_chunk_t;
+};
/* For the graceful shutdown we must carry the tag (in common header)
* and the highest consecutive acking value.
*/
-typedef struct sctp_shutdownhdr {
+struct sctp_shutdownhdr {
__be32 cum_tsn_ack;
-} sctp_shutdownhdr_t;
+};
-struct sctp_shutdown_chunk_t {
+struct sctp_shutdown_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_shutdownhdr_t shutdown_hdr;
+ struct sctp_shutdownhdr shutdown_hdr;
};
/* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */
-typedef struct sctp_errhdr {
+struct sctp_errhdr {
__be16 cause;
__be16 length;
__u8 variable[0];
-} sctp_errhdr_t;
+};
-typedef struct sctp_operr_chunk {
+struct sctp_operr_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_errhdr_t err_hdr;
-} sctp_operr_chunk_t;
+ struct sctp_errhdr err_hdr;
+};
/* RFC 2960 3.3.10 - Operation Error
*
@@ -461,7 +457,7 @@ typedef struct sctp_operr_chunk {
* 9 No User Data
* 10 Cookie Received While Shutting Down
*/
-typedef enum {
+enum sctp_error {
SCTP_ERROR_NO_ERROR = cpu_to_be16(0x00),
SCTP_ERROR_INV_STRM = cpu_to_be16(0x01),
@@ -516,33 +512,28 @@ typedef enum {
* 0x0105 Unsupported HMAC Identifier
*/
SCTP_ERROR_UNSUP_HMAC = cpu_to_be16(0x0105)
-} sctp_error_t;
+};
/* RFC 2960. Appendix A. Explicit Congestion Notification.
* Explicit Congestion Notification Echo (ECNE) (12)
*/
-typedef struct sctp_ecnehdr {
+struct sctp_ecnehdr {
__be32 lowest_tsn;
-} sctp_ecnehdr_t;
+};
-typedef struct sctp_ecne_chunk {
+struct sctp_ecne_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_ecnehdr_t ence_hdr;
-} sctp_ecne_chunk_t;
+ struct sctp_ecnehdr ence_hdr;
+};
/* RFC 2960. Appendix A. Explicit Congestion Notification.
* Congestion Window Reduced (CWR) (13)
*/
-typedef struct sctp_cwrhdr {
+struct sctp_cwrhdr {
__be32 lowest_tsn;
-} sctp_cwrhdr_t;
-
-typedef struct sctp_cwr_chunk {
- struct sctp_chunkhdr chunk_hdr;
- sctp_cwrhdr_t cwr_hdr;
-} sctp_cwr_chunk_t;
+};
/* PR-SCTP
* 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN)
@@ -638,20 +629,20 @@ struct sctp_fwdtsn_chunk {
* The ASCONF Parameter Response is used in the ASCONF-ACK to
* report status of ASCONF processing.
*/
-typedef struct sctp_addip_param {
- struct sctp_paramhdr param_hdr;
- __be32 crr_id;
-} sctp_addip_param_t;
+struct sctp_addip_param {
+ struct sctp_paramhdr param_hdr;
+ __be32 crr_id;
+};
-typedef struct sctp_addiphdr {
+struct sctp_addiphdr {
__be32 serial;
__u8 params[0];
-} sctp_addiphdr_t;
+};
-typedef struct sctp_addip_chunk {
+struct sctp_addip_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_addiphdr_t addip_hdr;
-} sctp_addip_chunk_t;
+ struct sctp_addiphdr addip_hdr;
+};
/* AUTH
* Section 4.1 Authentication Chunk (AUTH)
@@ -702,16 +693,16 @@ typedef struct sctp_addip_chunk {
* HMAC: n bytes (unsigned integer) This hold the result of the HMAC
* calculation.
*/
-typedef struct sctp_authhdr {
+struct sctp_authhdr {
__be16 shkey_id;
__be16 hmac_id;
__u8 hmac[0];
-} sctp_authhdr_t;
+};
-typedef struct sctp_auth_chunk {
+struct sctp_auth_chunk {
struct sctp_chunkhdr chunk_hdr;
- sctp_authhdr_t auth_hdr;
-} sctp_auth_chunk_t;
+ struct sctp_authhdr auth_hdr;
+};
struct sctp_infox {
struct sctp_info *sctpinfo;
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index ecc296c137cd..c8bef436b61d 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -3,7 +3,8 @@
#include <uapi/linux/seccomp.h>
-#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC)
+#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
+ SECCOMP_FILTER_FLAG_LOG)
#ifdef CONFIG_SECCOMP
diff --git a/include/linux/security.h b/include/linux/security.h
index b6ea1dc9cc9d..ce6265960d6c 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -85,12 +85,13 @@ extern int cap_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
extern int cap_bprm_set_creds(struct linux_binprm *bprm);
-extern int cap_bprm_secureexec(struct linux_binprm *bprm);
extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
extern int cap_inode_need_killpriv(struct dentry *dentry);
extern int cap_inode_killpriv(struct dentry *dentry);
+extern int cap_inode_getsecurity(struct inode *inode, const char *name,
+ void **buffer, bool alloc);
extern int cap_mmap_addr(unsigned long addr);
extern int cap_mmap_file(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags);
@@ -232,7 +233,6 @@ int security_bprm_set_creds(struct linux_binprm *bprm);
int security_bprm_check(struct linux_binprm *bprm);
void security_bprm_committing_creds(struct linux_binprm *bprm);
void security_bprm_committed_creds(struct linux_binprm *bprm);
-int security_bprm_secureexec(struct linux_binprm *bprm);
int security_sb_alloc(struct super_block *sb);
void security_sb_free(struct super_block *sb);
int security_sb_copy_data(char *orig, char *copy);
@@ -318,7 +318,6 @@ int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
int security_file_open(struct file *file, const struct cred *cred);
-int security_task_create(unsigned long clone_flags);
int security_task_alloc(struct task_struct *task, unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -541,11 +540,6 @@ static inline void security_bprm_committed_creds(struct linux_binprm *bprm)
{
}
-static inline int security_bprm_secureexec(struct linux_binprm *bprm)
-{
- return cap_bprm_secureexec(bprm);
-}
-
static inline int security_sb_alloc(struct super_block *sb)
{
return 0;
@@ -885,11 +879,6 @@ static inline int security_file_open(struct file *file,
return 0;
}
-static inline int security_task_create(unsigned long clone_flags)
-{
- return 0;
-}
-
static inline int security_task_alloc(struct task_struct *task,
unsigned long clone_flags)
{
diff --git a/include/linux/seg6_local.h b/include/linux/seg6_local.h
new file mode 100644
index 000000000000..ee63e76fe0c7
--- /dev/null
+++ b/include/linux/seg6_local.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_SEG6_LOCAL_H
+#define _LINUX_SEG6_LOCAL_H
+
+#include <uapi/linux/seg6_local.h>
+
+#endif
diff --git a/include/linux/sem.h b/include/linux/sem.h
index de2deb8676bd..0083128318f6 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -4,6 +4,7 @@
#include <linux/atomic.h>
#include <linux/rcupdate.h>
#include <linux/cache.h>
+#include <linux/time64.h>
#include <uapi/linux/sem.h>
struct task_struct;
@@ -30,7 +31,7 @@ struct sem {
/* One sem_array data structure for each set of semaphores in the system. */
struct sem_array {
struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
- time_t sem_ctime; /* create/last semctl() time */
+ time64_t sem_ctime; /* create/last semctl() time */
struct list_head pending_alter; /* pending operations */
/* that alter the array */
struct list_head pending_const; /* pending complex operations */
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 61fbb440449c..a27ef5f56431 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -80,9 +80,10 @@ struct uart_8250_ops {
};
struct uart_8250_em485 {
- struct timer_list start_tx_timer; /* "rs485 start tx" timer */
- struct timer_list stop_tx_timer; /* "rs485 stop tx" timer */
- struct timer_list *active_timer; /* pointer to active timer */
+ struct hrtimer start_tx_timer; /* "rs485 start tx" timer */
+ struct hrtimer stop_tx_timer; /* "rs485 stop tx" timer */
+ struct hrtimer *active_timer; /* pointer to active timer */
+ struct uart_8250_port *port; /* for hrtimer callbacks */
};
/*
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 1775500294bb..5553e04e59c9 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -20,7 +20,7 @@
#ifndef LINUX_SERIAL_CORE_H
#define LINUX_SERIAL_CORE_H
-
+#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/interrupt.h>
#include <linux/circ_buf.h>
@@ -144,7 +144,7 @@ struct uart_port {
unsigned char x_char; /* xon/xoff char */
unsigned char regshift; /* reg offset shift */
unsigned char iotype; /* io access style */
- unsigned char unused1;
+ unsigned char quirks; /* internal quirks */
#define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */
#define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */
@@ -155,6 +155,9 @@ struct uart_port {
#define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */
#define UPIO_MEM16 (SERIAL_IO_MEM16) /* 16b little endian */
+ /* quirks must be updated while holding port mutex */
+#define UPQ_NO_TXEN_TEST BIT(0)
+
unsigned int read_status_mask; /* driver specific */
unsigned int ignore_status_mask; /* driver specific */
struct uart_state *state; /* pointer to parent state */
@@ -175,7 +178,6 @@ struct uart_port {
* [for bit definitions in the UPF_CHANGE_MASK]
*
* Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable
- * except bit 15 (UPF_NO_TXEN_TEST) which is masked off.
* The remaining bits are serial-core specific and not modifiable by
* userspace.
*/
@@ -192,7 +194,6 @@ struct uart_port {
#define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ )
#define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ )
#define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ )
-#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15))
#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
#define UPF_NO_THRE_TEST ((__force upf_t) (1 << 19))
@@ -246,7 +247,6 @@ struct uart_port {
struct device *dev; /* parent device */
unsigned char hub6; /* this should be in the 8250 driver */
unsigned char suspended;
- unsigned char irq_wake;
unsigned char unused[2];
const char *name; /* port name */
struct attribute_group *attr_group; /* port specific attributes */
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
new file mode 100644
index 000000000000..4a906f560817
--- /dev/null
+++ b/include/linux/sfp.h
@@ -0,0 +1,434 @@
+#ifndef LINUX_SFP_H
+#define LINUX_SFP_H
+
+#include <linux/phy.h>
+
+struct __packed sfp_eeprom_base {
+ u8 phys_id;
+ u8 phys_ext_id;
+ u8 connector;
+#if defined __BIG_ENDIAN_BITFIELD
+ u8 e10g_base_er:1;
+ u8 e10g_base_lrm:1;
+ u8 e10g_base_lr:1;
+ u8 e10g_base_sr:1;
+ u8 if_1x_sx:1;
+ u8 if_1x_lx:1;
+ u8 if_1x_copper_active:1;
+ u8 if_1x_copper_passive:1;
+
+ u8 escon_mmf_1310_led:1;
+ u8 escon_smf_1310_laser:1;
+ u8 sonet_oc192_short_reach:1;
+ u8 sonet_reach_bit1:1;
+ u8 sonet_reach_bit2:1;
+ u8 sonet_oc48_long_reach:1;
+ u8 sonet_oc48_intermediate_reach:1;
+ u8 sonet_oc48_short_reach:1;
+
+ u8 unallocated_5_7:1;
+ u8 sonet_oc12_smf_long_reach:1;
+ u8 sonet_oc12_smf_intermediate_reach:1;
+ u8 sonet_oc12_short_reach:1;
+ u8 unallocated_5_3:1;
+ u8 sonet_oc3_smf_long_reach:1;
+ u8 sonet_oc3_smf_intermediate_reach:1;
+ u8 sonet_oc3_short_reach:1;
+
+ u8 e_base_px:1;
+ u8 e_base_bx10:1;
+ u8 e100_base_fx:1;
+ u8 e100_base_lx:1;
+ u8 e1000_base_t:1;
+ u8 e1000_base_cx:1;
+ u8 e1000_base_lx:1;
+ u8 e1000_base_sx:1;
+
+ u8 fc_ll_v:1;
+ u8 fc_ll_s:1;
+ u8 fc_ll_i:1;
+ u8 fc_ll_l:1;
+ u8 fc_ll_m:1;
+ u8 fc_tech_sa:1;
+ u8 fc_tech_lc:1;
+ u8 fc_tech_electrical_inter_enclosure:1;
+
+ u8 fc_tech_electrical_intra_enclosure:1;
+ u8 fc_tech_sn:1;
+ u8 fc_tech_sl:1;
+ u8 fc_tech_ll:1;
+ u8 sfp_ct_active:1;
+ u8 sfp_ct_passive:1;
+ u8 unallocated_8_1:1;
+ u8 unallocated_8_0:1;
+
+ u8 fc_media_tw:1;
+ u8 fc_media_tp:1;
+ u8 fc_media_mi:1;
+ u8 fc_media_tv:1;
+ u8 fc_media_m6:1;
+ u8 fc_media_m5:1;
+ u8 unallocated_9_1:1;
+ u8 fc_media_sm:1;
+
+ u8 fc_speed_1200:1;
+ u8 fc_speed_800:1;
+ u8 fc_speed_1600:1;
+ u8 fc_speed_400:1;
+ u8 fc_speed_3200:1;
+ u8 fc_speed_200:1;
+ u8 unallocated_10_1:1;
+ u8 fc_speed_100:1;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+ u8 if_1x_copper_passive:1;
+ u8 if_1x_copper_active:1;
+ u8 if_1x_lx:1;
+ u8 if_1x_sx:1;
+ u8 e10g_base_sr:1;
+ u8 e10g_base_lr:1;
+ u8 e10g_base_lrm:1;
+ u8 e10g_base_er:1;
+
+ u8 sonet_oc3_short_reach:1;
+ u8 sonet_oc3_smf_intermediate_reach:1;
+ u8 sonet_oc3_smf_long_reach:1;
+ u8 unallocated_5_3:1;
+ u8 sonet_oc12_short_reach:1;
+ u8 sonet_oc12_smf_intermediate_reach:1;
+ u8 sonet_oc12_smf_long_reach:1;
+ u8 unallocated_5_7:1;
+
+ u8 sonet_oc48_short_reach:1;
+ u8 sonet_oc48_intermediate_reach:1;
+ u8 sonet_oc48_long_reach:1;
+ u8 sonet_reach_bit2:1;
+ u8 sonet_reach_bit1:1;
+ u8 sonet_oc192_short_reach:1;
+ u8 escon_smf_1310_laser:1;
+ u8 escon_mmf_1310_led:1;
+
+ u8 e1000_base_sx:1;
+ u8 e1000_base_lx:1;
+ u8 e1000_base_cx:1;
+ u8 e1000_base_t:1;
+ u8 e100_base_lx:1;
+ u8 e100_base_fx:1;
+ u8 e_base_bx10:1;
+ u8 e_base_px:1;
+
+ u8 fc_tech_electrical_inter_enclosure:1;
+ u8 fc_tech_lc:1;
+ u8 fc_tech_sa:1;
+ u8 fc_ll_m:1;
+ u8 fc_ll_l:1;
+ u8 fc_ll_i:1;
+ u8 fc_ll_s:1;
+ u8 fc_ll_v:1;
+
+ u8 unallocated_8_0:1;
+ u8 unallocated_8_1:1;
+ u8 sfp_ct_passive:1;
+ u8 sfp_ct_active:1;
+ u8 fc_tech_ll:1;
+ u8 fc_tech_sl:1;
+ u8 fc_tech_sn:1;
+ u8 fc_tech_electrical_intra_enclosure:1;
+
+ u8 fc_media_sm:1;
+ u8 unallocated_9_1:1;
+ u8 fc_media_m5:1;
+ u8 fc_media_m6:1;
+ u8 fc_media_tv:1;
+ u8 fc_media_mi:1;
+ u8 fc_media_tp:1;
+ u8 fc_media_tw:1;
+
+ u8 fc_speed_100:1;
+ u8 unallocated_10_1:1;
+ u8 fc_speed_200:1;
+ u8 fc_speed_3200:1;
+ u8 fc_speed_400:1;
+ u8 fc_speed_1600:1;
+ u8 fc_speed_800:1;
+ u8 fc_speed_1200:1;
+#else
+#error Unknown Endian
+#endif
+ u8 encoding;
+ u8 br_nominal;
+ u8 rate_id;
+ u8 link_len[6];
+ char vendor_name[16];
+ u8 extended_cc;
+ char vendor_oui[3];
+ char vendor_pn[16];
+ char vendor_rev[4];
+ union {
+ __be16 optical_wavelength;
+ u8 cable_spec;
+ };
+ u8 reserved62;
+ u8 cc_base;
+};
+
+struct __packed sfp_eeprom_ext {
+ __be16 options;
+ u8 br_max;
+ u8 br_min;
+ char vendor_sn[16];
+ char datecode[8];
+ u8 diagmon;
+ u8 enhopts;
+ u8 sff8472_compliance;
+ u8 cc_ext;
+};
+
+struct __packed sfp_eeprom_id {
+ struct sfp_eeprom_base base;
+ struct sfp_eeprom_ext ext;
+};
+
+/* SFP EEPROM registers */
+enum {
+ SFP_PHYS_ID = 0x00,
+ SFP_PHYS_EXT_ID = 0x01,
+ SFP_CONNECTOR = 0x02,
+ SFP_COMPLIANCE = 0x03,
+ SFP_ENCODING = 0x0b,
+ SFP_BR_NOMINAL = 0x0c,
+ SFP_RATE_ID = 0x0d,
+ SFP_LINK_LEN_SM_KM = 0x0e,
+ SFP_LINK_LEN_SM_100M = 0x0f,
+ SFP_LINK_LEN_50UM_OM2_10M = 0x10,
+ SFP_LINK_LEN_62_5UM_OM1_10M = 0x11,
+ SFP_LINK_LEN_COPPER_1M = 0x12,
+ SFP_LINK_LEN_50UM_OM4_10M = 0x12,
+ SFP_LINK_LEN_50UM_OM3_10M = 0x13,
+ SFP_VENDOR_NAME = 0x14,
+ SFP_VENDOR_OUI = 0x25,
+ SFP_VENDOR_PN = 0x28,
+ SFP_VENDOR_REV = 0x38,
+ SFP_OPTICAL_WAVELENGTH_MSB = 0x3c,
+ SFP_OPTICAL_WAVELENGTH_LSB = 0x3d,
+ SFP_CABLE_SPEC = 0x3c,
+ SFP_CC_BASE = 0x3f,
+ SFP_OPTIONS = 0x40, /* 2 bytes, MSB, LSB */
+ SFP_BR_MAX = 0x42,
+ SFP_BR_MIN = 0x43,
+ SFP_VENDOR_SN = 0x44,
+ SFP_DATECODE = 0x54,
+ SFP_DIAGMON = 0x5c,
+ SFP_ENHOPTS = 0x5d,
+ SFP_SFF8472_COMPLIANCE = 0x5e,
+ SFP_CC_EXT = 0x5f,
+
+ SFP_PHYS_ID_SFP = 0x03,
+ SFP_PHYS_EXT_ID_SFP = 0x04,
+ SFP_CONNECTOR_UNSPEC = 0x00,
+ /* codes 01-05 not supportable on SFP, but some modules have single SC */
+ SFP_CONNECTOR_SC = 0x01,
+ SFP_CONNECTOR_FIBERJACK = 0x06,
+ SFP_CONNECTOR_LC = 0x07,
+ SFP_CONNECTOR_MT_RJ = 0x08,
+ SFP_CONNECTOR_MU = 0x09,
+ SFP_CONNECTOR_SG = 0x0a,
+ SFP_CONNECTOR_OPTICAL_PIGTAIL = 0x0b,
+ SFP_CONNECTOR_MPO_1X12 = 0x0c,
+ SFP_CONNECTOR_MPO_2X16 = 0x0d,
+ SFP_CONNECTOR_HSSDC_II = 0x20,
+ SFP_CONNECTOR_COPPER_PIGTAIL = 0x21,
+ SFP_CONNECTOR_RJ45 = 0x22,
+ SFP_CONNECTOR_NOSEPARATE = 0x23,
+ SFP_CONNECTOR_MXC_2X16 = 0x24,
+ SFP_ENCODING_UNSPEC = 0x00,
+ SFP_ENCODING_8B10B = 0x01,
+ SFP_ENCODING_4B5B = 0x02,
+ SFP_ENCODING_NRZ = 0x03,
+ SFP_ENCODING_8472_MANCHESTER = 0x04,
+ SFP_ENCODING_8472_SONET = 0x05,
+ SFP_ENCODING_8472_64B66B = 0x06,
+ SFP_ENCODING_256B257B = 0x07,
+ SFP_ENCODING_PAM4 = 0x08,
+ SFP_OPTIONS_HIGH_POWER_LEVEL = BIT(13),
+ SFP_OPTIONS_PAGING_A2 = BIT(12),
+ SFP_OPTIONS_RETIMER = BIT(11),
+ SFP_OPTIONS_COOLED_XCVR = BIT(10),
+ SFP_OPTIONS_POWER_DECL = BIT(9),
+ SFP_OPTIONS_RX_LINEAR_OUT = BIT(8),
+ SFP_OPTIONS_RX_DECISION_THRESH = BIT(7),
+ SFP_OPTIONS_TUNABLE_TX = BIT(6),
+ SFP_OPTIONS_RATE_SELECT = BIT(5),
+ SFP_OPTIONS_TX_DISABLE = BIT(4),
+ SFP_OPTIONS_TX_FAULT = BIT(3),
+ SFP_OPTIONS_LOS_INVERTED = BIT(2),
+ SFP_OPTIONS_LOS_NORMAL = BIT(1),
+ SFP_DIAGMON_DDM = BIT(6),
+ SFP_DIAGMON_INT_CAL = BIT(5),
+ SFP_DIAGMON_EXT_CAL = BIT(4),
+ SFP_DIAGMON_RXPWR_AVG = BIT(3),
+ SFP_DIAGMON_ADDRMODE = BIT(2),
+ SFP_ENHOPTS_ALARMWARN = BIT(7),
+ SFP_ENHOPTS_SOFT_TX_DISABLE = BIT(6),
+ SFP_ENHOPTS_SOFT_TX_FAULT = BIT(5),
+ SFP_ENHOPTS_SOFT_RX_LOS = BIT(4),
+ SFP_ENHOPTS_SOFT_RATE_SELECT = BIT(3),
+ SFP_ENHOPTS_APP_SELECT_SFF8079 = BIT(2),
+ SFP_ENHOPTS_SOFT_RATE_SFF8431 = BIT(1),
+ SFP_SFF8472_COMPLIANCE_NONE = 0x00,
+ SFP_SFF8472_COMPLIANCE_REV9_3 = 0x01,
+ SFP_SFF8472_COMPLIANCE_REV9_5 = 0x02,
+ SFP_SFF8472_COMPLIANCE_REV10_2 = 0x03,
+ SFP_SFF8472_COMPLIANCE_REV10_4 = 0x04,
+ SFP_SFF8472_COMPLIANCE_REV11_0 = 0x05,
+ SFP_SFF8472_COMPLIANCE_REV11_3 = 0x06,
+ SFP_SFF8472_COMPLIANCE_REV11_4 = 0x07,
+ SFP_SFF8472_COMPLIANCE_REV12_0 = 0x08,
+};
+
+/* SFP Diagnostics */
+enum {
+ /* Alarm and warnings stored MSB at lower address then LSB */
+ SFP_TEMP_HIGH_ALARM = 0x00,
+ SFP_TEMP_LOW_ALARM = 0x02,
+ SFP_TEMP_HIGH_WARN = 0x04,
+ SFP_TEMP_LOW_WARN = 0x06,
+ SFP_VOLT_HIGH_ALARM = 0x08,
+ SFP_VOLT_LOW_ALARM = 0x0a,
+ SFP_VOLT_HIGH_WARN = 0x0c,
+ SFP_VOLT_LOW_WARN = 0x0e,
+ SFP_BIAS_HIGH_ALARM = 0x10,
+ SFP_BIAS_LOW_ALARM = 0x12,
+ SFP_BIAS_HIGH_WARN = 0x14,
+ SFP_BIAS_LOW_WARN = 0x16,
+ SFP_TXPWR_HIGH_ALARM = 0x18,
+ SFP_TXPWR_LOW_ALARM = 0x1a,
+ SFP_TXPWR_HIGH_WARN = 0x1c,
+ SFP_TXPWR_LOW_WARN = 0x1e,
+ SFP_RXPWR_HIGH_ALARM = 0x20,
+ SFP_RXPWR_LOW_ALARM = 0x22,
+ SFP_RXPWR_HIGH_WARN = 0x24,
+ SFP_RXPWR_LOW_WARN = 0x26,
+ SFP_LASER_TEMP_HIGH_ALARM = 0x28,
+ SFP_LASER_TEMP_LOW_ALARM = 0x2a,
+ SFP_LASER_TEMP_HIGH_WARN = 0x2c,
+ SFP_LASER_TEMP_LOW_WARN = 0x2e,
+ SFP_TEC_CUR_HIGH_ALARM = 0x30,
+ SFP_TEC_CUR_LOW_ALARM = 0x32,
+ SFP_TEC_CUR_HIGH_WARN = 0x34,
+ SFP_TEC_CUR_LOW_WARN = 0x36,
+ SFP_CAL_RXPWR4 = 0x38,
+ SFP_CAL_RXPWR3 = 0x3c,
+ SFP_CAL_RXPWR2 = 0x40,
+ SFP_CAL_RXPWR1 = 0x44,
+ SFP_CAL_RXPWR0 = 0x48,
+ SFP_CAL_TXI_SLOPE = 0x4c,
+ SFP_CAL_TXI_OFFSET = 0x4e,
+ SFP_CAL_TXPWR_SLOPE = 0x50,
+ SFP_CAL_TXPWR_OFFSET = 0x52,
+ SFP_CAL_T_SLOPE = 0x54,
+ SFP_CAL_T_OFFSET = 0x56,
+ SFP_CAL_V_SLOPE = 0x58,
+ SFP_CAL_V_OFFSET = 0x5a,
+ SFP_CHKSUM = 0x5f,
+
+ SFP_TEMP = 0x60,
+ SFP_VCC = 0x62,
+ SFP_TX_BIAS = 0x64,
+ SFP_TX_POWER = 0x66,
+ SFP_RX_POWER = 0x68,
+ SFP_LASER_TEMP = 0x6a,
+ SFP_TEC_CUR = 0x6c,
+
+ SFP_STATUS = 0x6e,
+ SFP_ALARM = 0x70,
+
+ SFP_EXT_STATUS = 0x76,
+ SFP_VSL = 0x78,
+ SFP_PAGE = 0x7f,
+};
+
+struct device_node;
+struct ethtool_eeprom;
+struct ethtool_modinfo;
+struct net_device;
+struct sfp_bus;
+
+struct sfp_upstream_ops {
+ int (*module_insert)(void *, const struct sfp_eeprom_id *id);
+ void (*module_remove)(void *);
+ void (*link_down)(void *);
+ void (*link_up)(void *);
+ int (*connect_phy)(void *, struct phy_device *);
+ void (*disconnect_phy)(void *);
+};
+
+#if IS_ENABLED(CONFIG_SFP)
+int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ unsigned long *support);
+phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id);
+void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ unsigned long *support);
+
+int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo);
+int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
+ u8 *data);
+void sfp_upstream_start(struct sfp_bus *bus);
+void sfp_upstream_stop(struct sfp_bus *bus);
+struct sfp_bus *sfp_register_upstream(struct device_node *np,
+ struct net_device *ndev, void *upstream,
+ const struct sfp_upstream_ops *ops);
+void sfp_unregister_upstream(struct sfp_bus *bus);
+#else
+static inline int sfp_parse_port(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ unsigned long *support)
+{
+ return PORT_OTHER;
+}
+
+static inline phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id)
+{
+ return PHY_INTERFACE_MODE_NA;
+}
+
+static inline void sfp_parse_support(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ unsigned long *support)
+{
+}
+
+static inline int sfp_get_module_info(struct sfp_bus *bus,
+ struct ethtool_modinfo *modinfo)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int sfp_get_module_eeprom(struct sfp_bus *bus,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void sfp_upstream_start(struct sfp_bus *bus)
+{
+}
+
+static inline void sfp_upstream_stop(struct sfp_bus *bus)
+{
+}
+
+static inline struct sfp_bus *sfp_register_upstream(struct device_node *np,
+ struct net_device *ndev, void *upstream,
+ const struct sfp_upstream_ops *ops)
+{
+ return (struct sfp_bus *)-1;
+}
+
+static inline void sfp_unregister_upstream(struct sfp_bus *bus)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/shm.h b/include/linux/shm.h
index 0fb7061ec54c..74a4b3b64352 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -12,9 +12,9 @@ struct shmid_kernel /* private to the kernel */
struct file *shm_file;
unsigned long shm_nattch;
unsigned long shm_segsz;
- time_t shm_atim;
- time_t shm_dtim;
- time_t shm_ctim;
+ time64_t shm_atim;
+ time64_t shm_dtim;
+ time64_t shm_ctim;
pid_t shm_cprid;
pid_t shm_lprid;
struct user_struct *mlock_user;
@@ -27,23 +27,6 @@ struct shmid_kernel /* private to the kernel */
/* shm_mode upper byte flags */
#define SHM_DEST 01000 /* segment will be destroyed on last detach */
#define SHM_LOCKED 02000 /* segment will not be swapped */
-#define SHM_HUGETLB 04000 /* segment will use huge TLB pages */
-#define SHM_NORESERVE 010000 /* don't check for reservations */
-
-/* Bits [26:31] are reserved */
-
-/*
- * When SHM_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
- * This gives us 6 bits, which is enough until someone invents 128 bit address
- * spaces.
- *
- * Assume these are all power of twos.
- * When 0 use the default page size.
- */
-#define SHM_HUGE_SHIFT 26
-#define SHM_HUGE_MASK 0x3f
-#define SHM_HUGE_2MB (21 << SHM_HUGE_SHIFT)
-#define SHM_HUGE_1GB (30 << SHM_HUGE_SHIFT)
#ifdef CONFIG_SYSVIPC
struct sysv_shm {
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index a7d6bd2a918f..b6c3540e07bc 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -137,9 +137,15 @@ extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
unsigned long dst_addr,
unsigned long src_addr,
struct page **pagep);
+extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
+ pmd_t *dst_pmd,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr);
#else
#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
src_addr, pagep) ({ BUG(); 0; })
+#define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \
+ dst_addr) ({ BUG(); 0; })
#endif
#endif
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 4fcacd915d45..51d189615bda 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -18,6 +18,13 @@ struct shrink_control {
*/
unsigned long nr_to_scan;
+ /*
+ * How many objects did scan_objects process?
+ * This defaults to nr_to_scan before every call, but the callee
+ * should track its actual progress.
+ */
+ unsigned long nr_scanned;
+
/* current node being shrunk (for NUMA aware shrinkers) */
int nid;
diff --git a/include/linux/signal.h b/include/linux/signal.h
index e2678b5dbb21..38564e3e54c7 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -21,6 +21,20 @@ static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
+enum siginfo_layout {
+ SIL_KILL,
+ SIL_TIMER,
+ SIL_POLL,
+ SIL_FAULT,
+ SIL_CHLD,
+ SIL_RT,
+#ifdef __ARCH_SIGSYS
+ SIL_SYS,
+#endif
+};
+
+enum siginfo_layout siginfo_layout(int sig, int si_code);
+
/*
* Define some primitives to manipulate sigset_t.
*/
@@ -380,10 +394,18 @@ int unhandled_signal(struct task_struct *tsk, int sig);
rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \
rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) )
+#define SIG_SPECIFIC_SICODES_MASK (\
+ rt_sigmask(SIGILL) | rt_sigmask(SIGFPE) | \
+ rt_sigmask(SIGSEGV) | rt_sigmask(SIGBUS) | \
+ rt_sigmask(SIGTRAP) | rt_sigmask(SIGCHLD) | \
+ rt_sigmask(SIGPOLL) | rt_sigmask(SIGSYS) | \
+ SIGEMT_MASK )
+
#define sig_kernel_only(sig) siginmask(sig, SIG_KERNEL_ONLY_MASK)
#define sig_kernel_coredump(sig) siginmask(sig, SIG_KERNEL_COREDUMP_MASK)
#define sig_kernel_ignore(sig) siginmask(sig, SIG_KERNEL_IGNORE_MASK)
#define sig_kernel_stop(sig) siginmask(sig, SIG_KERNEL_STOP_MASK)
+#define sig_specific_sicodes(sig) siginmask(sig, SIG_SPECIFIC_SICODES_MASK)
#define sig_fatal(t, signr) \
(!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index dbe29b6c9bd6..72299ef00061 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -22,6 +22,7 @@
#include <linux/cache.h>
#include <linux/rbtree.h>
#include <linux/socket.h>
+#include <linux/refcount.h>
#include <linux/atomic.h>
#include <asm/types.h>
@@ -345,6 +346,42 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
frag->size -= delta;
}
+static inline bool skb_frag_must_loop(struct page *p)
+{
+#if defined(CONFIG_HIGHMEM)
+ if (PageHighMem(p))
+ return true;
+#endif
+ return false;
+}
+
+/**
+ * skb_frag_foreach_page - loop over pages in a fragment
+ *
+ * @f: skb frag to operate on
+ * @f_off: offset from start of f->page.p
+ * @f_len: length from f_off to loop over
+ * @p: (temp var) current page
+ * @p_off: (temp var) offset from start of current page,
+ * non-zero only on first page.
+ * @p_len: (temp var) length in current page,
+ * < PAGE_SIZE only on first and last page.
+ * @copied: (temp var) length so far, excluding current p_len.
+ *
+ * A fragment can hold a compound page, in which case per-page
+ * operations, notably kmap_atomic, must be called for each
+ * regular page.
+ */
+#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
+ for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
+ p_off = (f_off) & (PAGE_SIZE - 1), \
+ p_len = skb_frag_must_loop(p) ? \
+ min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
+ copied = 0; \
+ copied < f_len; \
+ copied += p_len, p++, p_off = 0, \
+ p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
+
#define HAVE_HW_TIME_STAMP
/**
@@ -393,6 +430,7 @@ enum {
SKBTX_SCHED_TSTAMP = 1 << 6,
};
+#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
SKBTX_SCHED_TSTAMP)
#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
@@ -407,10 +445,46 @@ enum {
*/
struct ubuf_info {
void (*callback)(struct ubuf_info *, bool zerocopy_success);
- void *ctx;
- unsigned long desc;
+ union {
+ struct {
+ unsigned long desc;
+ void *ctx;
+ };
+ struct {
+ u32 id;
+ u16 len;
+ u16 zerocopy:1;
+ u32 bytelen;
+ };
+ };
+ refcount_t refcnt;
+
+ struct mmpin {
+ struct user_struct *user;
+ unsigned int num_pg;
+ } mmp;
};
+#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
+
+struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
+struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
+ struct ubuf_info *uarg);
+
+static inline void sock_zerocopy_get(struct ubuf_info *uarg)
+{
+ refcount_inc(&uarg->refcnt);
+}
+
+void sock_zerocopy_put(struct ubuf_info *uarg);
+void sock_zerocopy_put_abort(struct ubuf_info *uarg);
+
+void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
+
+int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
+ struct msghdr *msg, int len,
+ struct ubuf_info *uarg);
+
/* This data is invariant across clones and lives at
* the end of the header data, ie. at skb->end.
*/
@@ -463,39 +537,38 @@ enum {
enum {
SKB_GSO_TCPV4 = 1 << 0,
- SKB_GSO_UDP = 1 << 1,
/* This indicates the skb is from an untrusted source. */
- SKB_GSO_DODGY = 1 << 2,
+ SKB_GSO_DODGY = 1 << 1,
/* This indicates the tcp segment has CWR set. */
- SKB_GSO_TCP_ECN = 1 << 3,
+ SKB_GSO_TCP_ECN = 1 << 2,
- SKB_GSO_TCP_FIXEDID = 1 << 4,
+ SKB_GSO_TCP_FIXEDID = 1 << 3,
- SKB_GSO_TCPV6 = 1 << 5,
+ SKB_GSO_TCPV6 = 1 << 4,
- SKB_GSO_FCOE = 1 << 6,
+ SKB_GSO_FCOE = 1 << 5,
- SKB_GSO_GRE = 1 << 7,
+ SKB_GSO_GRE = 1 << 6,
- SKB_GSO_GRE_CSUM = 1 << 8,
+ SKB_GSO_GRE_CSUM = 1 << 7,
- SKB_GSO_IPXIP4 = 1 << 9,
+ SKB_GSO_IPXIP4 = 1 << 8,
- SKB_GSO_IPXIP6 = 1 << 10,
+ SKB_GSO_IPXIP6 = 1 << 9,
- SKB_GSO_UDP_TUNNEL = 1 << 11,
+ SKB_GSO_UDP_TUNNEL = 1 << 10,
- SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
+ SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
- SKB_GSO_PARTIAL = 1 << 13,
+ SKB_GSO_PARTIAL = 1 << 12,
- SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
+ SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
- SKB_GSO_SCTP = 1 << 15,
+ SKB_GSO_SCTP = 1 << 14,
- SKB_GSO_ESP = 1 << 16,
+ SKB_GSO_ESP = 1 << 15,
};
#if BITS_PER_LONG > 32
@@ -885,7 +958,7 @@ void kfree_skb(struct sk_buff *skb);
void kfree_skb_list(struct sk_buff *segs);
void skb_tx_error(struct sk_buff *skb);
void consume_skb(struct sk_buff *skb);
-void consume_stateless_skb(struct sk_buff *skb);
+void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;
@@ -945,12 +1018,6 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
}
-struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
-static inline struct sk_buff *alloc_skb_head(gfp_t priority)
-{
- return __alloc_skb_head(priority, -1);
-}
-
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
@@ -973,7 +1040,23 @@ int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg
int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
int offset, int len);
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
-int skb_pad(struct sk_buff *skb, int pad);
+int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
+
+/**
+ * skb_pad - zero pad the tail of an skb
+ * @skb: buffer to pad
+ * @pad: space to pad
+ *
+ * Ensure that a buffer is followed by a padding area that is zero
+ * filled. Used by network drivers which may DMA or transfer data
+ * beyond the buffer end onto the wire.
+ *
+ * May return error in out of memory cases. The skb is freed on error.
+ */
+static inline int skb_pad(struct sk_buff *skb, int pad)
+{
+ return __skb_pad(skb, pad, true);
+}
#define dev_kfree_skb(a) consume_skb(a)
int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
@@ -1129,8 +1212,6 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
return skb->hash;
}
-__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
-
static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
{
if (!skb->l4_hash && !skb->sw_hash) {
@@ -1143,20 +1224,6 @@ static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6
return skb->hash;
}
-__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
-
-static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
-{
- if (!skb->l4_hash && !skb->sw_hash) {
- struct flow_keys keys;
- __u32 hash = __get_hash_from_flowi4(fl4, &keys);
-
- __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
- }
-
- return skb->hash;
-}
-
__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
@@ -1201,6 +1268,50 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
return &skb_shinfo(skb)->hwtstamps;
}
+static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
+{
+ bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
+
+ return is_zcopy ? skb_uarg(skb) : NULL;
+}
+
+static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
+{
+ if (skb && uarg && !skb_zcopy(skb)) {
+ sock_zerocopy_get(uarg);
+ skb_shinfo(skb)->destructor_arg = uarg;
+ skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+ }
+}
+
+/* Release a reference on a zerocopy structure */
+static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
+{
+ struct ubuf_info *uarg = skb_zcopy(skb);
+
+ if (uarg) {
+ if (uarg->callback == sock_zerocopy_callback) {
+ uarg->zerocopy = uarg->zerocopy && zerocopy;
+ sock_zerocopy_put(uarg);
+ } else {
+ uarg->callback(uarg, zerocopy);
+ }
+
+ skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
+ }
+}
+
+/* Abort a zerocopy operation and revert zckey on error in send syscall */
+static inline void skb_zcopy_abort(struct sk_buff *skb)
+{
+ struct ubuf_info *uarg = skb_zcopy(skb);
+
+ if (uarg) {
+ sock_zerocopy_put_abort(uarg);
+ skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
+ }
+}
+
/**
* skb_queue_empty - check if a queue is empty
* @list: queue head
@@ -1783,13 +1894,18 @@ static inline unsigned int skb_headlen(const struct sk_buff *skb)
return skb->len - skb->data_len;
}
-static inline unsigned int skb_pagelen(const struct sk_buff *skb)
+static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
{
unsigned int i, len = 0;
for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
- return len + skb_headlen(skb);
+ return len;
+}
+
+static inline unsigned int skb_pagelen(const struct sk_buff *skb)
+{
+ return skb_headlen(skb) + __skb_pagelen(skb);
}
/**
@@ -2434,7 +2550,17 @@ static inline void skb_orphan(struct sk_buff *skb)
*/
static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
{
- if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
+ if (likely(!skb_zcopy(skb)))
+ return 0;
+ if (skb_uarg(skb)->callback == sock_zerocopy_callback)
+ return 0;
+ return skb_copy_ubufs(skb, gfp_mask);
+}
+
+/* Frags must be orphaned, even if refcounted, if skb might loop to rx path */
+static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
+{
+ if (likely(!skb_zcopy(skb)))
return 0;
return skb_copy_ubufs(skb, gfp_mask);
}
@@ -2825,25 +2951,42 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
* skb_put_padto - increase size and pad an skbuff up to a minimal size
* @skb: buffer to pad
* @len: minimal length
+ * @free_on_error: free buffer on error
*
* Pads up a buffer to ensure the trailing bytes exist and are
* blanked. If the buffer already contains sufficient data it
* is untouched. Otherwise it is extended. Returns zero on
- * success. The skb is freed on error.
+ * success. The skb is freed on error if @free_on_error is true.
*/
-static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
+ bool free_on_error)
{
unsigned int size = skb->len;
if (unlikely(size < len)) {
len -= size;
- if (skb_pad(skb, len))
+ if (__skb_pad(skb, len, free_on_error))
return -ENOMEM;
__skb_put(skb, len);
}
return 0;
}
+/**
+ * skb_put_padto - increase size and pad an skbuff up to a minimal size
+ * @skb: buffer to pad
+ * @len: minimal length
+ *
+ * Pads up a buffer to ensure the trailing bytes exist and are
+ * blanked. If the buffer already contains sufficient data it
+ * is untouched. Otherwise it is extended. Returns zero on
+ * success. The skb is freed on error.
+ */
+static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+{
+ return __skb_put_padto(skb, len, true);
+}
+
static inline int skb_add_data(struct sk_buff *skb,
struct iov_iter *from, int copy)
{
@@ -2866,6 +3009,8 @@ static inline int skb_add_data(struct sk_buff *skb,
static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
const struct page *page, int off)
{
+ if (skb_zcopy(skb))
+ return false;
if (i) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -3120,6 +3265,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int len,
unsigned int flags);
+int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
+ int len);
+int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index cc0faf3a90be..0783b622311e 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -115,6 +115,10 @@ struct kmem_cache {
#endif
#endif
+#ifdef CONFIG_SLAB_FREELIST_HARDENED
+ unsigned long random;
+#endif
+
#ifdef CONFIG_NUMA
/*
* Defragmentation by allocating from a remote node.
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 68123c1fe549..98b1fe027fc9 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -14,13 +14,17 @@
#include <linux/llist.h>
typedef void (*smp_call_func_t)(void *info);
-struct call_single_data {
+struct __call_single_data {
struct llist_node llist;
smp_call_func_t func;
void *info;
unsigned int flags;
};
+/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
+typedef struct __call_single_data call_single_data_t
+ __aligned(sizeof(struct __call_single_data));
+
/* total number of cpus in this system (may exceed NR_CPUS) */
extern unsigned int total_cpus;
@@ -48,7 +52,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
smp_call_func_t func, void *info, bool wait,
gfp_t gfp_flags);
-int smp_call_function_single_async(int cpu, struct call_single_data *csd);
+int smp_call_function_single_async(int cpu, call_single_data_t *csd);
#ifdef CONFIG_SMP
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index 12910cf19869..c149aa7bedf3 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -55,7 +55,7 @@ smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
}
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
-int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *);
+void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
+ const struct cpumask *);
#endif
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
index a5714e93fb34..a0182ec2a621 100644
--- a/include/linux/soc/mediatek/infracfg.h
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -20,6 +20,13 @@
#define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22)
#define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23)
+#define MT7622_TOP_AXI_PROT_EN_ETHSYS (BIT(3) | BIT(17))
+#define MT7622_TOP_AXI_PROT_EN_HIF0 (BIT(24) | BIT(25))
+#define MT7622_TOP_AXI_PROT_EN_HIF1 (BIT(26) | BIT(27) | \
+ BIT(28))
+#define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \
+ BIT(7) | BIT(8))
+
int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask);
int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask);
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index 2b7882666ef6..66693bc4c6ad 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -17,6 +17,8 @@
#ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
#define __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
+#include <linux/dmaengine.h>
+
/*
* PKTDMA descriptor manipulation macros for host packet descriptor
*/
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 8b13db5163cc..8ad963cdc88c 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -287,6 +287,7 @@ struct ucred {
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
+#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
descriptor received through
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index d9510e8522d4..69e079c5ff98 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -118,24 +118,41 @@ do { \
#endif
/*
- * Despite its name it doesn't necessarily has to be a full barrier.
- * It should only guarantee that a STORE before the critical section
- * can not be reordered with LOADs and STOREs inside this section.
- * spin_lock() is the one-way barrier, this LOAD can not escape out
- * of the region. So the default implementation simply ensures that
- * a STORE can not move into the critical section, smp_wmb() should
- * serialize it with another STORE done by spin_lock().
+ * This barrier must provide two things:
+ *
+ * - it must guarantee a STORE before the spin_lock() is ordered against a
+ * LOAD after it, see the comments at its two usage sites.
+ *
+ * - it must ensure the critical section is RCsc.
+ *
+ * The latter is important for cases where we observe values written by other
+ * CPUs in spin-loops, without barriers, while being subject to scheduling.
+ *
+ * CPU0 CPU1 CPU2
+ *
+ * for (;;) {
+ * if (READ_ONCE(X))
+ * break;
+ * }
+ * X=1
+ * <sched-out>
+ * <sched-in>
+ * r = X;
+ *
+ * without transitivity it could be that CPU1 observes X!=0 breaks the loop,
+ * we get migrated and CPU2 sees X==0.
+ *
+ * Since most load-store architectures implement ACQUIRE with an smp_mb() after
+ * the LL/SC loop, they need no further barriers. Similarly all our TSO
+ * architectures imply an smp_mb() for each atomic instruction and equally don't
+ * need more.
+ *
+ * Architectures that can implement ACQUIRE better need to take care.
*/
-#ifndef smp_mb__before_spinlock
-#define smp_mb__before_spinlock() smp_wmb()
+#ifndef smp_mb__after_spinlock
+#define smp_mb__after_spinlock() do { } while (0)
#endif
-/**
- * raw_spin_unlock_wait - wait until the spinlock gets unlocked
- * @lock: the spinlock in question.
- */
-#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
-
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
@@ -369,31 +386,6 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock)
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
-/**
- * spin_unlock_wait - Interpose between successive critical sections
- * @lock: the spinlock whose critical sections are to be interposed.
- *
- * Semantically this is equivalent to a spin_lock() immediately
- * followed by a spin_unlock(). However, most architectures have
- * more efficient implementations in which the spin_unlock_wait()
- * cannot block concurrent lock acquisition, and in some cases
- * where spin_unlock_wait() does not write to the lock variable.
- * Nevertheless, spin_unlock_wait() can have high overhead, so if
- * you feel the need to use it, please check to see if there is
- * a better way to get your job done.
- *
- * The ordering guarantees provided by spin_unlock_wait() are:
- *
- * 1. All accesses preceding the spin_unlock_wait() happen before
- * any accesses in later critical sections for this same lock.
- * 2. All accesses following the spin_unlock_wait() happen after
- * any accesses in earlier critical sections for this same lock.
- */
-static __always_inline void spin_unlock_wait(spinlock_t *lock)
-{
- raw_spin_unlock_wait(&lock->rlock);
-}
-
static __always_inline int spin_is_locked(spinlock_t *lock)
{
return raw_spin_is_locked(&lock->rlock);
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 0d9848de677d..612fb530af41 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -26,11 +26,6 @@
#ifdef CONFIG_DEBUG_SPINLOCK
#define arch_spin_is_locked(x) ((x)->slock == 0)
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->slock, VAL);
-}
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
@@ -73,7 +68,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#else /* DEBUG_SPINLOCK */
#define arch_spin_is_locked(lock) ((void)(lock), 0)
-#define arch_spin_unlock_wait(lock) do { barrier(); (void)(lock); } while (0)
/* for sched/core.c and kernel_lock.c: */
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index cfbfc540cafc..261471f407a5 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -87,4 +87,17 @@ static inline void srcu_barrier(struct srcu_struct *sp)
synchronize_srcu(sp);
}
+/* Defined here to avoid size increase for non-torture kernels. */
+static inline void srcu_torture_stats_print(struct srcu_struct *sp,
+ char *tt, char *tf)
+{
+ int idx;
+
+ idx = READ_ONCE(sp->srcu_idx) & 0x1;
+ pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
+ tt, tf, idx,
+ READ_ONCE(sp->srcu_lock_nesting[!idx]),
+ READ_ONCE(sp->srcu_lock_nesting[idx]));
+}
+
#endif
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 42973f787e7e..a949f4f9e4d7 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -104,8 +104,6 @@ struct srcu_struct {
#define SRCU_STATE_SCAN1 1
#define SRCU_STATE_SCAN2 2
-void process_srcu(struct work_struct *work);
-
#define __SRCU_STRUCT_INIT(name) \
{ \
.sda = &name##_srcu_data, \
@@ -141,5 +139,6 @@ void process_srcu(struct work_struct *work);
void synchronize_srcu_expedited(struct srcu_struct *sp);
void srcu_barrier(struct srcu_struct *sp);
+void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf);
#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index a467e617eeb0..54d21783e18d 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -99,6 +99,36 @@ extern __kernel_size_t strcspn(const char *,const char *);
#ifndef __HAVE_ARCH_MEMSET
extern void * memset(void *,int,__kernel_size_t);
#endif
+
+#ifndef __HAVE_ARCH_MEMSET16
+extern void *memset16(uint16_t *, uint16_t, __kernel_size_t);
+#endif
+
+#ifndef __HAVE_ARCH_MEMSET32
+extern void *memset32(uint32_t *, uint32_t, __kernel_size_t);
+#endif
+
+#ifndef __HAVE_ARCH_MEMSET64
+extern void *memset64(uint64_t *, uint64_t, __kernel_size_t);
+#endif
+
+static inline void *memset_l(unsigned long *p, unsigned long v,
+ __kernel_size_t n)
+{
+ if (BITS_PER_LONG == 32)
+ return memset32((uint32_t *)p, v, n);
+ else
+ return memset64((uint64_t *)p, v, n);
+}
+
+static inline void *memset_p(void **p, void *v, __kernel_size_t n)
+{
+ if (BITS_PER_LONG == 32)
+ return memset32((uint32_t *)p, (uintptr_t)v, n);
+ else
+ return memset64((uint64_t *)p, (uintptr_t)v, n);
+}
+
#ifndef __HAVE_ARCH_MEMCPY
extern void * memcpy(void *,const void *,__kernel_size_t);
#endif
@@ -200,6 +230,7 @@ static inline const char *kbasename(const char *path)
void fortify_panic(const char *name) __noreturn __cold;
void __read_overflow(void) __compiletime_error("detected read beyond size of object passed as 1st parameter");
void __read_overflow2(void) __compiletime_error("detected read beyond size of object passed as 2nd parameter");
+void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter");
void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
@@ -395,4 +426,22 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q)
#endif
+/**
+ * memcpy_and_pad - Copy one buffer to another with padding
+ * @dest: Where to copy to
+ * @dest_len: The destination buffer size
+ * @src: Where to copy from
+ * @count: The number of bytes to copy
+ * @pad: Character to use for padding if space is left in destination.
+ */
+static inline void memcpy_and_pad(void *dest, size_t dest_len,
+ const void *src, size_t count, int pad)
+{
+ if (dest_len > count) {
+ memcpy(dest, src, count);
+ memset(dest + count, pad, dest_len - count);
+ } else
+ memcpy(dest, src, dest_len);
+}
+
#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 50a99a117da7..c1768f9d993b 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -139,6 +139,8 @@ struct rpc_task_setup {
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
#define RPC_TASK_ACTIVE 2
+#define RPC_TASK_MSG_RECV 3
+#define RPC_TASK_MSG_RECV_WAIT 4
#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index a3f8af9bd543..38f561b2dda3 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -99,7 +99,7 @@ struct svc_serv {
unsigned int sv_nrpools; /* number of thread pools */
struct svc_pool * sv_pools; /* array of thread pools */
- struct svc_serv_ops *sv_ops; /* server operations */
+ const struct svc_serv_ops *sv_ops; /* server operations */
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct list_head sv_cb_list; /* queue for callback requests
* that arrive over the same
@@ -465,7 +465,7 @@ int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
int svc_bind(struct svc_serv *serv, struct net *net);
struct svc_serv *svc_create(struct svc_program *, unsigned int,
- struct svc_serv_ops *);
+ const struct svc_serv_ops *);
struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
struct svc_pool *pool, int node);
struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
@@ -475,7 +475,7 @@ void svc_exit_thread(struct svc_rqst *);
unsigned int svc_pool_map_get(void);
void svc_pool_map_put(void);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
- struct svc_serv_ops *);
+ const struct svc_serv_ops *);
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index ddb7f94a9d06..6a2ad38f5458 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -31,7 +31,7 @@ struct svc_xprt_ops {
struct svc_xprt_class {
const char *xcl_name;
struct module *xcl_owner;
- struct svc_xprt_ops *xcl_ops;
+ const struct svc_xprt_ops *xcl_ops;
struct list_head xcl_list;
u32 xcl_max_payload;
int xcl_ident;
@@ -49,7 +49,7 @@ struct svc_xpt_user {
struct svc_xprt {
struct svc_xprt_class *xpt_class;
- struct svc_xprt_ops *xpt_ops;
+ const struct svc_xprt_ops *xpt_ops;
struct kref xpt_ref;
struct list_head xpt_list;
struct list_head xpt_ready;
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 261b48a2701d..86b59e3525a5 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -239,6 +239,19 @@ extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data);
+/**
+ * xdr_stream_remaining - Return the number of bytes remaining in the stream
+ * @xdr: pointer to struct xdr_stream
+ *
+ * Return value:
+ * Number of bytes remaining in @xdr before xdr->end
+ */
+static inline size_t
+xdr_stream_remaining(const struct xdr_stream *xdr)
+{
+ return xdr->nwords << 2;
+}
+
ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
size_t maxlen, gfp_t gfp_flags);
/**
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index eab1c749e192..5a7bff41f6b7 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -174,7 +174,7 @@ enum xprt_transports {
struct rpc_xprt {
struct kref kref; /* Reference count */
- struct rpc_xprt_ops * ops; /* transport methods */
+ const struct rpc_xprt_ops *ops; /* transport methods */
const struct rpc_timeout *timeout; /* timeout parms */
struct sockaddr_storage addr; /* server address */
@@ -232,6 +232,7 @@ struct rpc_xprt {
*/
spinlock_t transport_lock; /* lock transport info */
spinlock_t reserve_lock; /* lock slot table */
+ spinlock_t recv_lock; /* lock receive list */
u32 xid; /* Next XID value to use */
struct rpc_task * snd_task; /* Task blocked in send */
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
@@ -372,6 +373,8 @@ void xprt_write_space(struct rpc_xprt *xprt);
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result);
struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
void xprt_complete_rqst(struct rpc_task *task, int copied);
+void xprt_pin_rqst(struct rpc_rqst *req);
+void xprt_unpin_rqst(struct rpc_rqst *req);
void xprt_release_rqst_cong(struct rpc_task *task);
void xprt_disconnect_done(struct rpc_xprt *xprt);
void xprt_force_disconnect(struct rpc_xprt *xprt);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0b1cf32edfd7..d10b7980799d 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -33,10 +33,10 @@ static inline void pm_restore_console(void)
typedef int __bitwise suspend_state_t;
#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
-#define PM_SUSPEND_FREEZE ((__force suspend_state_t) 1)
+#define PM_SUSPEND_TO_IDLE ((__force suspend_state_t) 1)
#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2)
#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
-#define PM_SUSPEND_MIN PM_SUSPEND_FREEZE
+#define PM_SUSPEND_MIN PM_SUSPEND_TO_IDLE
#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
enum suspend_stat_step {
@@ -186,7 +186,7 @@ struct platform_suspend_ops {
void (*recover)(void);
};
-struct platform_freeze_ops {
+struct platform_s2idle_ops {
int (*begin)(void);
int (*prepare)(void);
void (*wake)(void);
@@ -196,6 +196,9 @@ struct platform_freeze_ops {
};
#ifdef CONFIG_SUSPEND
+extern suspend_state_t mem_sleep_current;
+extern suspend_state_t mem_sleep_default;
+
/**
* suspend_set_ops - set platform dependent suspend operations
* @ops: The new suspend operations to set.
@@ -234,22 +237,22 @@ static inline bool pm_resume_via_firmware(void)
}
/* Suspend-to-idle state machnine. */
-enum freeze_state {
- FREEZE_STATE_NONE, /* Not suspended/suspending. */
- FREEZE_STATE_ENTER, /* Enter suspend-to-idle. */
- FREEZE_STATE_WAKE, /* Wake up from suspend-to-idle. */
+enum s2idle_states {
+ S2IDLE_STATE_NONE, /* Not suspended/suspending. */
+ S2IDLE_STATE_ENTER, /* Enter suspend-to-idle. */
+ S2IDLE_STATE_WAKE, /* Wake up from suspend-to-idle. */
};
-extern enum freeze_state __read_mostly suspend_freeze_state;
+extern enum s2idle_states __read_mostly s2idle_state;
-static inline bool idle_should_freeze(void)
+static inline bool idle_should_enter_s2idle(void)
{
- return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER);
+ return unlikely(s2idle_state == S2IDLE_STATE_ENTER);
}
extern void __init pm_states_init(void);
-extern void freeze_set_ops(const struct platform_freeze_ops *ops);
-extern void freeze_wake(void);
+extern void s2idle_set_ops(const struct platform_s2idle_ops *ops);
+extern void s2idle_wake(void);
/**
* arch_suspend_disable_irqs - disable IRQs for suspend
@@ -281,10 +284,10 @@ static inline bool pm_resume_via_firmware(void) { return false; }
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
-static inline bool idle_should_freeze(void) { return false; }
+static inline bool idle_should_enter_s2idle(void) { return false; }
static inline void __init pm_states_init(void) {}
-static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {}
-static inline void freeze_wake(void) {}
+static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {}
+static inline void s2idle_wake(void) {}
#endif /* !CONFIG_SUSPEND */
/* struct pbe is used for creating lists of pages that should be restored
@@ -427,6 +430,7 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
/* drivers/base/power/wakeup.c */
extern bool events_check_enabled;
extern unsigned int pm_wakeup_irq;
+extern suspend_state_t pm_suspend_target_state;
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
@@ -491,10 +495,24 @@ static inline void unlock_system_sleep(void) {}
#ifdef CONFIG_PM_SLEEP_DEBUG
extern bool pm_print_times_enabled;
+extern bool pm_debug_messages_on;
+extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...);
#else
#define pm_print_times_enabled (false)
+#define pm_debug_messages_on (false)
+
+#include <linux/printk.h>
+
+#define __pm_pr_dbg(defer, fmt, ...) \
+ no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
#endif
+#define pm_pr_dbg(fmt, ...) \
+ __pm_pr_dbg(false, fmt, ##__VA_ARGS__)
+
+#define pm_deferred_pr_dbg(fmt, ...) \
+ __pm_pr_dbg(true, fmt, ##__VA_ARGS__)
+
#ifdef CONFIG_PM_AUTOSLEEP
/* kernel/power/autosleep.c */
diff --git a/include/linux/swait.h b/include/linux/swait.h
index c1f9c62a8a50..73e97a08d3d0 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -79,9 +79,63 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name
DECLARE_SWAIT_QUEUE_HEAD(name)
#endif
-static inline int swait_active(struct swait_queue_head *q)
+/**
+ * swait_active -- locklessly test for waiters on the queue
+ * @wq: the waitqueue to test for waiters
+ *
+ * returns true if the wait list is not empty
+ *
+ * NOTE: this function is lockless and requires care, incorrect usage _will_
+ * lead to sporadic and non-obvious failure.
+ *
+ * NOTE2: this function has the same above implications as regular waitqueues.
+ *
+ * Use either while holding swait_queue_head::lock or when used for wakeups
+ * with an extra smp_mb() like:
+ *
+ * CPU0 - waker CPU1 - waiter
+ *
+ * for (;;) {
+ * @cond = true; prepare_to_swait(&wq_head, &wait, state);
+ * smp_mb(); // smp_mb() from set_current_state()
+ * if (swait_active(wq_head)) if (@cond)
+ * wake_up(wq_head); break;
+ * schedule();
+ * }
+ * finish_swait(&wq_head, &wait);
+ *
+ * Because without the explicit smp_mb() it's possible for the
+ * swait_active() load to get hoisted over the @cond store such that we'll
+ * observe an empty wait list while the waiter might not observe @cond.
+ * This, in turn, can trigger missing wakeups.
+ *
+ * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
+ * which (when the lock is uncontended) are of roughly equal cost.
+ */
+static inline int swait_active(struct swait_queue_head *wq)
{
- return !list_empty(&q->task_list);
+ return !list_empty(&wq->task_list);
+}
+
+/**
+ * swq_has_sleeper - check if there are any waiting processes
+ * @wq: the waitqueue to test for waiters
+ *
+ * Returns true if @wq has waiting processes
+ *
+ * Please refer to the comment for swait_active.
+ */
+static inline bool swq_has_sleeper(struct swait_queue_head *wq)
+{
+ /*
+ * We need to be sure we are in sync with the list_add()
+ * modifications to the wait queue (task_list).
+ *
+ * This memory barrier should be paired with one on the
+ * waiting side.
+ */
+ smp_mb();
+ return swait_active(wq);
}
extern void swake_up(struct swait_queue_head *q);
@@ -169,4 +223,59 @@ do { \
__ret; \
})
+#define __swait_event_idle(wq, condition) \
+ (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
+
+/**
+ * swait_event_idle - wait without system load contribution
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_IDLE) until the @condition evaluates to
+ * true. The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * This function is mostly used when a kthread or workqueue waits for some
+ * condition and doesn't want to contribute to system load. Signals are
+ * ignored.
+ */
+#define swait_event_idle(wq, condition) \
+do { \
+ if (condition) \
+ break; \
+ __swait_event_idle(wq, condition); \
+} while (0)
+
+#define __swait_event_idle_timeout(wq, condition, timeout) \
+ ___swait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_IDLE, timeout, \
+ __ret = schedule_timeout(__ret))
+
+/**
+ * swait_event_idle_timeout - wait up to timeout without load contribution
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout at which we'll give up in jiffies
+ *
+ * The process is put to sleep (TASK_IDLE) until the @condition evaluates to
+ * true. The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * This function is mostly used when a kthread or workqueue waits for some
+ * condition and doesn't want to contribute to system load. Signals are
+ * ignored.
+ *
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * or the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed.
+ */
+#define swait_event_idle_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __swait_event_idle_timeout(wq, \
+ condition, timeout); \
+ __ret; \
+})
+
#endif /* _LINUX_SWAIT_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d83d28e53e62..8a807292037f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -51,6 +51,23 @@ static inline int current_is_kswapd(void)
*/
/*
+ * Unaddressable device memory support. See include/linux/hmm.h and
+ * Documentation/vm/hmm.txt. Short description is we need struct pages for
+ * device memory that is unaddressable (inaccessible) by CPU, so that we can
+ * migrate part of a process memory to device memory.
+ *
+ * When a page is migrated from CPU to device, we set the CPU page table entry
+ * to a special SWP_DEVICE_* entry.
+ */
+#ifdef CONFIG_DEVICE_PRIVATE
+#define SWP_DEVICE_NUM 2
+#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
+#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
+#else
+#define SWP_DEVICE_NUM 0
+#endif
+
+/*
* NUMA node memory migration support
*/
#ifdef CONFIG_MIGRATION
@@ -72,7 +89,8 @@ static inline int current_is_kswapd(void)
#endif
#define MAX_SWAPFILES \
- ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
+ ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
+ SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
/*
* Magic header for a swap area. The first part of the union is
@@ -188,6 +206,7 @@ struct swap_cluster_info {
};
#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
+#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
/*
* We assign a cluster to each CPU, so each CPU can allocate swap entry from
@@ -211,7 +230,7 @@ struct swap_info_struct {
unsigned long flags; /* SWP_USED etc: see above */
signed short prio; /* swap priority of this type */
struct plist_node list; /* entry in swap_active_head */
- struct plist_node avail_list; /* entry in swap_avail_head */
+ struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */
signed char type; /* strange name for an index */
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
@@ -250,6 +269,25 @@ struct swap_info_struct {
struct swap_cluster_list discard_clusters; /* discard clusters list */
};
+#ifdef CONFIG_64BIT
+#define SWAP_RA_ORDER_CEILING 5
+#else
+/* Avoid stack overflow, because we need to save part of page table */
+#define SWAP_RA_ORDER_CEILING 3
+#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
+#endif
+
+struct vma_swap_readahead {
+ unsigned short win;
+ unsigned short offset;
+ unsigned short nr_pte;
+#ifdef CONFIG_64BIT
+ pte_t *ptes;
+#else
+ pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
+#endif
+};
+
/* linux/mm/workingset.c */
void *workingset_eviction(struct address_space *mapping, struct page *page);
bool workingset_refault(void *shadow);
@@ -262,8 +300,8 @@ extern unsigned long totalreserve_pages;
extern unsigned long nr_free_buffer_pages(void);
extern unsigned long nr_free_pagecache_pages(void);
-/* Definition of global_page_state not available yet */
-#define nr_free_pages() global_page_state(NR_FREE_PAGES)
+/* Definition of global_zone_page_state not available yet */
+#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
/* linux/mm/swap.c */
@@ -349,6 +387,7 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *,
#define SWAP_ADDRESS_SPACE_SHIFT 14
#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
extern struct address_space *swapper_spaces[];
+extern bool swap_vma_readahead;
#define swap_address_space(entry) \
(&swapper_spaces[swp_type(entry)][swp_offset(entry) \
>> SWAP_ADDRESS_SPACE_SHIFT])
@@ -361,7 +400,9 @@ extern void __delete_from_swap_cache(struct page *);
extern void delete_from_swap_cache(struct page *);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
-extern struct page *lookup_swap_cache(swp_entry_t);
+extern struct page *lookup_swap_cache(swp_entry_t entry,
+ struct vm_area_struct *vma,
+ unsigned long addr);
extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr,
bool do_poll);
@@ -371,11 +412,23 @@ extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
extern struct page *swapin_readahead(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr);
+extern struct page *swap_readahead_detect(struct vm_fault *vmf,
+ struct vma_swap_readahead *swap_ra);
+extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
+ struct vm_fault *vmf,
+ struct vma_swap_readahead *swap_ra);
+
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
+extern atomic_t nr_rotate_swap;
extern bool has_usable_swap(void);
+static inline bool swap_use_vma_readahead(void)
+{
+ return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap);
+}
+
/* Swap 50% full? Release swapcache more aggressively.. */
static inline bool vm_swap_full(void)
{
@@ -434,8 +487,8 @@ static inline void show_swap_cache_info(void)
{
}
-#define free_swap_and_cache(swp) is_migration_entry(swp)
-#define swapcache_prepare(swp) is_migration_entry(swp)
+#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
+#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
{
@@ -465,12 +518,32 @@ static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
return NULL;
}
+static inline bool swap_use_vma_readahead(void)
+{
+ return false;
+}
+
+static inline struct page *swap_readahead_detect(
+ struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
+{
+ return NULL;
+}
+
+static inline struct page *do_swap_page_readahead(
+ swp_entry_t fentry, gfp_t gfp_mask,
+ struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
+{
+ return NULL;
+}
+
static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
{
return 0;
}
-static inline struct page *lookup_swap_cache(swp_entry_t swp)
+static inline struct page *lookup_swap_cache(swp_entry_t swp,
+ struct vm_area_struct *vma,
+ unsigned long addr)
{
return NULL;
}
@@ -509,8 +582,8 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-#define reuse_swap_page(page, total_mapcount) \
- (page_trans_huge_mapcount(page, total_mapcount) == 1)
+#define reuse_swap_page(page, total_map_swapcount) \
+ (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
static inline int try_to_free_swap(struct page *page)
{
@@ -526,6 +599,15 @@ static inline swp_entry_t get_swap_page(struct page *page)
#endif /* CONFIG_SWAP */
+#ifdef CONFIG_THP_SWAP
+extern int split_swap_cluster(swp_entry_t entry);
+#else
+static inline int split_swap_cluster(swp_entry_t entry)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_MEMCG
static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index c5ff7b217ee6..291c4b534658 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -100,10 +100,79 @@ static inline void *swp_to_radix_entry(swp_entry_t entry)
return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
}
+#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
+static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
+{
+ return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
+ page_to_pfn(page));
+}
+
+static inline bool is_device_private_entry(swp_entry_t entry)
+{
+ int type = swp_type(entry);
+ return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
+}
+
+static inline void make_device_private_entry_read(swp_entry_t *entry)
+{
+ *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
+}
+
+static inline bool is_write_device_private_entry(swp_entry_t entry)
+{
+ return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
+}
+
+static inline struct page *device_private_entry_to_page(swp_entry_t entry)
+{
+ return pfn_to_page(swp_offset(entry));
+}
+
+int device_private_entry_fault(struct vm_area_struct *vma,
+ unsigned long addr,
+ swp_entry_t entry,
+ unsigned int flags,
+ pmd_t *pmdp);
+#else /* CONFIG_DEVICE_PRIVATE */
+static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
+{
+ return swp_entry(0, 0);
+}
+
+static inline void make_device_private_entry_read(swp_entry_t *entry)
+{
+}
+
+static inline bool is_device_private_entry(swp_entry_t entry)
+{
+ return false;
+}
+
+static inline bool is_write_device_private_entry(swp_entry_t entry)
+{
+ return false;
+}
+
+static inline struct page *device_private_entry_to_page(swp_entry_t entry)
+{
+ return NULL;
+}
+
+static inline int device_private_entry_fault(struct vm_area_struct *vma,
+ unsigned long addr,
+ swp_entry_t entry,
+ unsigned int flags,
+ pmd_t *pmdp)
+{
+ return VM_FAULT_SIGBUS;
+}
+#endif /* CONFIG_DEVICE_PRIVATE */
+
#ifdef CONFIG_MIGRATION
static inline swp_entry_t make_migration_entry(struct page *page, int write)
{
- BUG_ON(!PageLocked(page));
+ BUG_ON(!PageLocked(compound_head(page)));
+
return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
page_to_pfn(page));
}
@@ -126,7 +195,7 @@ static inline struct page *migration_entry_to_page(swp_entry_t entry)
* Any use of migration entries may only occur while the
* corresponding page is locked
*/
- BUG_ON(!PageLocked(p));
+ BUG_ON(!PageLocked(compound_head(p)));
return p;
}
@@ -148,7 +217,11 @@ static inline int is_migration_entry(swp_entry_t swp)
{
return 0;
}
-#define migration_entry_to_page(swp) NULL
+static inline struct page *migration_entry_to_page(swp_entry_t entry)
+{
+ return NULL;
+}
+
static inline void make_migration_entry_read(swp_entry_t *entryp) { }
static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
spinlock_t *ptl) { }
@@ -163,6 +236,70 @@ static inline int is_write_migration_entry(swp_entry_t entry)
#endif
+struct page_vma_mapped_walk;
+
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+ struct page *page);
+
+extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
+ struct page *new);
+
+extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
+
+static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
+{
+ swp_entry_t arch_entry;
+
+ if (pmd_swp_soft_dirty(pmd))
+ pmd = pmd_swp_clear_soft_dirty(pmd);
+ arch_entry = __pmd_to_swp_entry(pmd);
+ return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+}
+
+static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
+{
+ swp_entry_t arch_entry;
+
+ arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
+ return __swp_entry_to_pmd(arch_entry);
+}
+
+static inline int is_pmd_migration_entry(pmd_t pmd)
+{
+ return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
+}
+#else
+static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+ struct page *page)
+{
+ BUILD_BUG();
+}
+
+static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
+ struct page *new)
+{
+ BUILD_BUG();
+}
+
+static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
+
+static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
+{
+ return swp_entry(0, 0);
+}
+
+static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
+{
+ return __pmd(0);
+}
+
+static inline int is_pmd_migration_entry(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_MEMORY_FAILURE
extern atomic_long_t num_poisoned_pages __read_mostly;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 3cb15ea48aee..a78186d826d7 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -100,11 +100,12 @@ union bpf_attr;
#define __MAP(n,...) __MAP##n(__VA_ARGS__)
#define __SC_DECL(t, a) t a
-#define __TYPE_IS_L(t) (__same_type((t)0, 0L))
-#define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
-#define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
+#define __TYPE_AS(t, v) __same_type((__force t)0, v)
+#define __TYPE_IS_L(t) (__TYPE_AS(t, 0L))
+#define __TYPE_IS_UL(t) (__TYPE_AS(t, 0UL))
+#define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL))
#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
-#define __SC_CAST(t, a) (t) a
+#define __SC_CAST(t, a) (__force t) a
#define __SC_ARGS(t, a) a
#define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
@@ -172,8 +173,20 @@ extern struct trace_event_functions exit_syscall_print_funcs;
static struct syscall_metadata __used \
__attribute__((section("__syscalls_metadata"))) \
*__p_syscall_meta_##sname = &__syscall_meta_##sname;
+
+static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
+{
+ return tp_event->class == &event_class_syscall_enter ||
+ tp_event->class == &event_class_syscall_exit;
+}
+
#else
#define SYSCALL_METADATA(sname, nb, ...)
+
+static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
+{
+ return 0;
+}
#endif
#define SYSCALL_DEFINE0(sname) \
@@ -187,6 +200,8 @@ extern struct trace_event_functions exit_syscall_print_funcs;
#define SYSCALL_DEFINE5(name, ...) SYSCALL_DEFINEx(5, _##name, __VA_ARGS__)
#define SYSCALL_DEFINE6(name, ...) SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE_MAXARGS 6
+
#define SYSCALL_DEFINEx(x, sname, ...) \
SYSCALL_METADATA(sname, x, __VA_ARGS__) \
__SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
@@ -206,6 +221,26 @@ extern struct trace_event_functions exit_syscall_print_funcs;
} \
static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+/*
+ * Called before coming back to user-mode. Returning to user-mode with an
+ * address limit different than USER_DS can allow to overwrite kernel memory.
+ */
+static inline void addr_limit_user_check(void)
+{
+#ifdef TIF_FSCHECK
+ if (!test_thread_flag(TIF_FSCHECK))
+ return;
+#endif
+
+ if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS),
+ "Invalid address limit on user-mode return"))
+ force_sig(SIGKILL, current);
+
+#ifdef TIF_FSCHECK
+ clear_thread_flag(TIF_FSCHECK);
+#endif
+}
+
asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
qid_t id, void __user *addr);
asmlinkage long sys_time(time_t __user *tloc);
@@ -578,12 +613,12 @@ asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_preadv2(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
- int flags);
+ rwf_t flags);
asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_pwritev2(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
- int flags);
+ rwf_t flags);
asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode);
asmlinkage long sys_chdir(const char __user *filename);
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
index c3a7f0cc3a27..e1c3632f4e81 100644
--- a/include/linux/syslog.h
+++ b/include/linux/syslog.h
@@ -49,13 +49,4 @@
int do_syslog(int type, char __user *buf, int count, int source);
-#ifdef CONFIG_PRINTK
-int check_syslog_permissions(int type, int source);
-#else
-static inline int check_syslog_permissions(int type, int source)
-{
- return 0;
-}
-#endif
-
#endif /* _LINUX_SYSLOG_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 542ca1ae02c4..4aa40ef02d32 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -192,15 +192,6 @@ struct tcp_sock {
struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
- /* Data for direct copy to user */
- struct {
- struct sk_buff_head prequeue;
- struct task_struct *task;
- struct msghdr *msg;
- int memory;
- int len;
- } ucopy;
-
u32 snd_wl1; /* Sequence for window update */
u32 snd_wnd; /* The window we expect to receive */
u32 max_window; /* Maximal window ever seen from peer */
@@ -273,7 +264,7 @@ struct tcp_sock {
u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
u32 snd_cwnd_used;
u32 snd_cwnd_stamp;
- u32 prior_cwnd; /* Congestion window at start of Recovery. */
+ u32 prior_cwnd; /* cwnd right before starting loss recovery */
u32 prr_delivered; /* Number of newly delivered packets to
* receiver in Recovery. */
u32 prr_out; /* Total number of pkts sent during Recovery. */
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index 0f175b8f6456..cb889afe576b 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -28,6 +28,7 @@
#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */
#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */
+struct device;
struct tee_device;
struct tee_shm;
struct tee_shm_pool;
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index dab11f97e1c6..fd5b959c753c 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -102,6 +102,7 @@ enum thermal_notify_event {
THERMAL_DEVICE_DOWN, /* Thermal device is down */
THERMAL_DEVICE_UP, /* Thermal device is up after a down event */
THERMAL_DEVICE_POWER_CAPABILITY_CHANGED, /* power capability changed */
+ THERMAL_TABLE_CHANGED, /* Thermal table(s) changed */
};
struct thermal_zone_device_ops {
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 250a27614328..5f7eeab990fe 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -38,7 +38,11 @@ enum {
#ifdef __KERNEL__
-#ifdef CONFIG_DEBUG_STACK_USAGE
+#ifndef THREAD_ALIGN
+#define THREAD_ALIGN THREAD_SIZE
+#endif
+
+#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)
# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
__GFP_ZERO)
#else
diff --git a/include/linux/time.h b/include/linux/time.h
index 4abb32d4c6b8..9bc1f945777c 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -178,7 +178,7 @@ extern int do_setitimer(int which, struct itimerval *value,
struct itimerval *ovalue);
extern int do_getitimer(int which, struct itimerval *value);
-extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
+extern long do_utimes(int dfd, const char __user *filename, struct timespec64 *times, int flags);
/*
* Similar to the struct tm in userspace <time.h>, but it needs to be here so
@@ -285,4 +285,19 @@ static inline bool itimerspec64_valid(const struct itimerspec64 *its)
return true;
}
+/**
+ * time_after32 - compare two 32-bit relative times
+ * @a: the time which may be after @b
+ * @b: the time which may be before @a
+ *
+ * time_after32(a, b) returns true if the time @a is after time @b.
+ * time_before32(b, a) returns true if the time @b is before time @a.
+ *
+ * Similar to time_after(), compare two 32-bit timestamps for relative
+ * times. This is useful for comparing 32-bit seconds values that can't
+ * be converted to 64-bit values (e.g. due to disk format or wire protocol
+ * issues) when it is known that the times are less than 68 years apart.
+ */
+#define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0)
+#define time_before32(b, a) time_after32(a, b)
#endif
diff --git a/include/linux/timer.h b/include/linux/timer.h
index e6789b8757d5..6383c528b148 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -168,6 +168,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \
__setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
+#define TIMER_DATA_TYPE unsigned long
+#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE)
+
+static inline void timer_setup(struct timer_list *timer,
+ void (*callback)(struct timer_list *),
+ unsigned int flags)
+{
+ __setup_timer(timer, (TIMER_FUNC_TYPE)callback,
+ (TIMER_DATA_TYPE)timer, flags);
+}
+
+#define from_timer(var, callback_timer, timer_fieldname) \
+ container_of(callback_timer, typeof(*var), timer_fieldname)
+
/**
* timer_pending - is a timer pending?
* @timer: the timer in question
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
new file mode 100644
index 000000000000..0d2d3da46139
--- /dev/null
+++ b/include/linux/tnum.h
@@ -0,0 +1,81 @@
+/* tnum: tracked (or tristate) numbers
+ *
+ * A tnum tracks knowledge about the bits of a value. Each bit can be either
+ * known (0 or 1), or unknown (x). Arithmetic operations on tnums will
+ * propagate the unknown bits such that the tnum result represents all the
+ * possible results for possible values of the operands.
+ */
+#include <linux/types.h>
+
+struct tnum {
+ u64 value;
+ u64 mask;
+};
+
+/* Constructors */
+/* Represent a known constant as a tnum. */
+struct tnum tnum_const(u64 value);
+/* A completely unknown value */
+extern const struct tnum tnum_unknown;
+/* A value that's unknown except that @min <= value <= @max */
+struct tnum tnum_range(u64 min, u64 max);
+
+/* Arithmetic and logical ops */
+/* Shift a tnum left (by a fixed shift) */
+struct tnum tnum_lshift(struct tnum a, u8 shift);
+/* Shift a tnum right (by a fixed shift) */
+struct tnum tnum_rshift(struct tnum a, u8 shift);
+/* Add two tnums, return @a + @b */
+struct tnum tnum_add(struct tnum a, struct tnum b);
+/* Subtract two tnums, return @a - @b */
+struct tnum tnum_sub(struct tnum a, struct tnum b);
+/* Bitwise-AND, return @a & @b */
+struct tnum tnum_and(struct tnum a, struct tnum b);
+/* Bitwise-OR, return @a | @b */
+struct tnum tnum_or(struct tnum a, struct tnum b);
+/* Bitwise-XOR, return @a ^ @b */
+struct tnum tnum_xor(struct tnum a, struct tnum b);
+/* Multiply two tnums, return @a * @b */
+struct tnum tnum_mul(struct tnum a, struct tnum b);
+
+/* Return a tnum representing numbers satisfying both @a and @b */
+struct tnum tnum_intersect(struct tnum a, struct tnum b);
+
+/* Return @a with all but the lowest @size bytes cleared */
+struct tnum tnum_cast(struct tnum a, u8 size);
+
+/* Returns true if @a is a known constant */
+static inline bool tnum_is_const(struct tnum a)
+{
+ return !a.mask;
+}
+
+/* Returns true if @a == tnum_const(@b) */
+static inline bool tnum_equals_const(struct tnum a, u64 b)
+{
+ return tnum_is_const(a) && a.value == b;
+}
+
+/* Returns true if @a is completely unknown */
+static inline bool tnum_is_unknown(struct tnum a)
+{
+ return !~a.mask;
+}
+
+/* Returns true if @a is known to be a multiple of @size.
+ * @size must be a power of two.
+ */
+bool tnum_is_aligned(struct tnum a, u64 size);
+
+/* Returns true if @b represents a subset of @a. */
+bool tnum_in(struct tnum a, struct tnum b);
+
+/* Formatting functions. These have snprintf-like semantics: they will write
+ * up to @size bytes (including the terminating NUL byte), and return the number
+ * of bytes (excluding the terminating NUL) which would have been written had
+ * sufficient space been available. (Thus tnum_sbin always returns 64.)
+ */
+/* Format a tnum as a pair of hex numbers (value; mask) */
+int tnum_strn(char *str, size_t size, struct tnum a);
+/* Format a tnum as tristate binary expansion */
+int tnum_sbin(char *str, size_t size, struct tnum a);
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 536c80ff7ad9..2e0f22298fe9 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -217,7 +217,6 @@ enum {
TRACE_EVENT_FL_CAP_ANY_BIT,
TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
- TRACE_EVENT_FL_WAS_ENABLED_BIT,
TRACE_EVENT_FL_TRACEPOINT_BIT,
TRACE_EVENT_FL_KPROBE_BIT,
TRACE_EVENT_FL_UPROBE_BIT,
@@ -229,9 +228,6 @@ enum {
* CAP_ANY - Any user can enable for perf
* NO_SET_FILTER - Set when filter has error and is to be ignored
* IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
- * WAS_ENABLED - Set and stays set when an event was ever enabled
- * (used for module unloading, if a module event is enabled,
- * it is best to clear the buffers that used it).
* TRACEPOINT - Event is a tracepoint
* KPROBE - Event is a kprobe
* UPROBE - Event is a uprobe
@@ -241,7 +237,6 @@ enum {
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
- TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
@@ -277,6 +272,7 @@ struct trace_event_call {
int perf_refcount;
struct hlist_head __percpu *perf_events;
struct bpf_prog *prog;
+ struct perf_event *bpf_prog_owner;
int (*perf_perm)(struct trace_event_call *,
struct perf_event *);
@@ -306,6 +302,7 @@ enum {
EVENT_FILE_FL_TRIGGER_MODE_BIT,
EVENT_FILE_FL_TRIGGER_COND_BIT,
EVENT_FILE_FL_PID_FILTER_BIT,
+ EVENT_FILE_FL_WAS_ENABLED_BIT,
};
/*
@@ -321,6 +318,7 @@ enum {
* TRIGGER_MODE - When set, invoke the triggers associated with the event
* TRIGGER_COND - When set, one or more triggers has an associated filter
* PID_FILTER - When set, the event is filtered based on pid
+ * WAS_ENABLED - Set when enabled to know to clear trace on module removal
*/
enum {
EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
@@ -333,6 +331,7 @@ enum {
EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
+ EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
};
struct trace_event_file {
@@ -508,9 +507,9 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
u64 count, struct pt_regs *regs, void *head,
- struct task_struct *task)
+ struct task_struct *task, struct perf_event *event)
{
- perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
+ perf_tp_event(type, count, raw_data, size, regs, head, rctx, task, event);
}
#endif
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 79c30daf46a9..cf53eb539f6e 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -261,6 +261,8 @@ struct tty_port {
*/
#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */
#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */
+#define TTY_PORT_KOPENED 5 /* device exclusively opened by
+ kernel */
/*
* Where all of the state associated with a tty is kept while the tty
@@ -399,8 +401,8 @@ extern struct tty_struct *get_current_tty(void);
/* tty_io.c */
extern int __init tty_init(void);
extern const char *tty_name(const struct tty_struct *tty);
-extern struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
- struct file *filp);
+extern struct tty_struct *tty_kopen(dev_t device);
+extern void tty_kclose(struct tty_struct *tty);
extern int tty_dev_name_to_number(const char *name, dev_t *number);
#else
static inline void tty_kref_put(struct tty_struct *tty)
@@ -422,9 +424,10 @@ static inline int __init tty_init(void)
{ return 0; }
static inline const char *tty_name(const struct tty_struct *tty)
{ return "(none)"; }
-static inline struct tty_struct *tty_open_by_driver(dev_t device,
- struct inode *inode, struct file *filp)
-{ return NULL; }
+static inline struct tty_struct *tty_kopen(dev_t device)
+{ return ERR_PTR(-ENODEV); }
+static inline void tty_kclose(struct tty_struct *tty)
+{ }
static inline int tty_dev_name_to_number(const char *name, dev_t *number)
{ return -ENOTSUPP; }
#endif
@@ -652,6 +655,19 @@ static inline void tty_port_set_initialized(struct tty_port *port, bool val)
clear_bit(TTY_PORT_INITIALIZED, &port->iflags);
}
+static inline bool tty_port_kopened(struct tty_port *port)
+{
+ return test_bit(TTY_PORT_KOPENED, &port->iflags);
+}
+
+static inline void tty_port_set_kopened(struct tty_port *port, bool val)
+{
+ if (val)
+ set_bit(TTY_PORT_KOPENED, &port->iflags);
+ else
+ clear_bit(TTY_PORT_KOPENED, &port->iflags);
+}
+
extern struct tty_struct *tty_port_tty_get(struct tty_port *port);
extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty);
extern int tty_port_carrier_raised(struct tty_port *port);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 00b2213f6a35..fcdc0f5d9098 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -243,6 +243,7 @@
#include <linux/list.h>
#include <linux/cdev.h>
#include <linux/termios.h>
+#include <linux/seq_file.h>
struct tty_struct;
struct tty_driver;
@@ -285,6 +286,7 @@ struct tty_operations {
int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew);
int (*get_icount)(struct tty_struct *tty,
struct serial_icounter_struct *icount);
+ void (*show_fdinfo)(struct tty_struct *tty, struct seq_file *m);
#ifdef CONFIG_CONSOLE_POLL
int (*poll_init)(struct tty_driver *driver, int line, char *options);
int (*poll_get_char)(struct tty_driver *driver, int line);
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index c28dd523f96e..d43837f2ce3a 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -12,6 +12,7 @@ extern int tty_prepare_flip_string(struct tty_port *port,
unsigned char **chars, size_t size);
extern void tty_flip_buffer_push(struct tty_port *port);
void tty_schedule_flip(struct tty_port *port);
+int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
static inline int tty_insert_flip_char(struct tty_port *port,
unsigned char ch, char flag)
@@ -26,7 +27,7 @@ static inline int tty_insert_flip_char(struct tty_port *port,
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
- return tty_insert_flip_string_flags(port, &ch, &flag, 1);
+ return __tty_insert_flip_char(port, ch, flag);
}
static inline int tty_insert_flip_string(struct tty_port *port,
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index acdd6f915a8d..20ef8e6ec2db 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -156,7 +156,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
}
#ifdef CONFIG_COMPAT
static __always_inline unsigned long __must_check
-copy_in_user(void __user *to, const void *from, unsigned long n)
+copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
might_fault();
if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
diff --git a/include/linux/umh.h b/include/linux/umh.h
new file mode 100644
index 000000000000..244aff638220
--- /dev/null
+++ b/include/linux/umh.h
@@ -0,0 +1,69 @@
+#ifndef __LINUX_UMH_H__
+#define __LINUX_UMH_H__
+
+#include <linux/gfp.h>
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/compiler.h>
+#include <linux/workqueue.h>
+#include <linux/sysctl.h>
+
+struct cred;
+struct file;
+
+#define UMH_NO_WAIT 0 /* don't wait at all */
+#define UMH_WAIT_EXEC 1 /* wait for the exec, but not the process */
+#define UMH_WAIT_PROC 2 /* wait for the process to complete */
+#define UMH_KILLABLE 4 /* wait for EXEC/PROC killable */
+
+struct subprocess_info {
+ struct work_struct work;
+ struct completion *complete;
+ const char *path;
+ char **argv;
+ char **envp;
+ int wait;
+ int retval;
+ int (*init)(struct subprocess_info *info, struct cred *new);
+ void (*cleanup)(struct subprocess_info *info);
+ void *data;
+} __randomize_layout;
+
+extern int
+call_usermodehelper(const char *path, char **argv, char **envp, int wait);
+
+extern struct subprocess_info *
+call_usermodehelper_setup(const char *path, char **argv, char **envp,
+ gfp_t gfp_mask,
+ int (*init)(struct subprocess_info *info, struct cred *new),
+ void (*cleanup)(struct subprocess_info *), void *data);
+
+extern int
+call_usermodehelper_exec(struct subprocess_info *info, int wait);
+
+extern struct ctl_table usermodehelper_table[];
+
+enum umh_disable_depth {
+ UMH_ENABLED = 0,
+ UMH_FREEZING,
+ UMH_DISABLED,
+};
+
+extern int __usermodehelper_disable(enum umh_disable_depth depth);
+extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
+
+static inline int usermodehelper_disable(void)
+{
+ return __usermodehelper_disable(UMH_DISABLED);
+}
+
+static inline void usermodehelper_enable(void)
+{
+ __usermodehelper_set_disable_depth(UMH_ENABLED);
+}
+
+extern int usermodehelper_read_trylock(void);
+extern long usermodehelper_read_lock_wait(long timeout);
+extern void usermodehelper_read_unlock(void);
+
+#endif /* __LINUX_UMH_H__ */
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index c5fdfcf99828..d725cff7268d 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -58,6 +58,7 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_OVERRIDE_TX_BURST BIT(10)
#define CI_HDRC_OVERRIDE_RX_BURST BIT(11)
#define CI_HDRC_OVERRIDE_PHY_CONTROL BIT(12) /* Glue layer manages phy */
+#define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 1a4a4bacfae6..21468a722c4a 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -48,6 +48,7 @@ struct usb_ep;
* by adding a zero length packet as needed;
* @short_not_ok: When reading data, makes short packets be
* treated as errors (queue stops advancing till cleanup).
+ * @dma_mapped: Indicates if request has been mapped to DMA (internal)
* @complete: Function called when request completes, so this request and
* its buffer may be re-used. The function will always be called with
* interrupts disabled, and it must not sleep.
@@ -103,6 +104,7 @@ struct usb_request {
unsigned no_interrupt:1;
unsigned zero:1;
unsigned short_not_ok:1;
+ unsigned dma_mapped:1;
void (*complete)(struct usb_ep *ep,
struct usb_request *req);
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 299245105610..8c6914873a16 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -12,6 +12,7 @@
#include <linux/extcon.h>
#include <linux/notifier.h>
#include <linux/usb.h>
+#include <uapi/linux/usb/charger.h>
enum usb_phy_interface {
USBPHY_INTERFACE_MODE_UNKNOWN,
@@ -72,6 +73,17 @@ struct usb_phy_io_ops {
int (*write)(struct usb_phy *x, u32 val, u32 reg);
};
+struct usb_charger_current {
+ unsigned int sdp_min;
+ unsigned int sdp_max;
+ unsigned int dcp_min;
+ unsigned int dcp_max;
+ unsigned int cdp_min;
+ unsigned int cdp_max;
+ unsigned int aca_min;
+ unsigned int aca_max;
+};
+
struct usb_phy {
struct device *dev;
const char *label;
@@ -91,6 +103,13 @@ struct usb_phy {
struct extcon_dev *id_edev;
struct notifier_block vbus_nb;
struct notifier_block id_nb;
+ struct notifier_block type_nb;
+
+ /* Support USB charger */
+ enum usb_charger_type chg_type;
+ enum usb_charger_state chg_state;
+ struct usb_charger_current chg_cur;
+ struct work_struct chg_work;
/* for notification of usb_phy_events */
struct atomic_notifier_head notifier;
@@ -129,6 +148,12 @@ struct usb_phy {
enum usb_device_speed speed);
int (*notify_disconnect)(struct usb_phy *x,
enum usb_device_speed speed);
+
+ /*
+ * Charger detection method can be implemented if you need to
+ * manually detect the charger type.
+ */
+ enum usb_charger_type (*charger_detect)(struct usb_phy *x);
};
/**
@@ -219,6 +244,12 @@ extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x);
extern int usb_bind_phy(const char *dev_name, u8 index,
const char *phy_dev_name);
extern void usb_phy_set_event(struct usb_phy *x, unsigned long event);
+extern void usb_phy_set_charger_current(struct usb_phy *usb_phy,
+ unsigned int mA);
+extern void usb_phy_get_charger_current(struct usb_phy *usb_phy,
+ unsigned int *min, unsigned int *max);
+extern void usb_phy_set_charger_state(struct usb_phy *usb_phy,
+ enum usb_charger_state state);
#else
static inline struct usb_phy *usb_get_phy(enum usb_phy_type type)
{
@@ -270,12 +301,33 @@ static inline int usb_bind_phy(const char *dev_name, u8 index,
static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event)
{
}
+
+static inline void usb_phy_set_charger_current(struct usb_phy *usb_phy,
+ unsigned int mA)
+{
+}
+
+static inline void usb_phy_get_charger_current(struct usb_phy *usb_phy,
+ unsigned int *min,
+ unsigned int *max)
+{
+}
+
+static inline void usb_phy_set_charger_state(struct usb_phy *usb_phy,
+ enum usb_charger_state state)
+{
+}
#endif
static inline int
usb_phy_set_power(struct usb_phy *x, unsigned mA)
{
- if (x && x->set_power)
+ if (!x)
+ return 0;
+
+ usb_phy_set_charger_current(x, mA);
+
+ if (x->set_power)
return x->set_power(x, mA);
return 0;
}
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index b3575ce29148..c18e01252346 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -112,8 +112,9 @@ extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t,
extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *);
extern int proc_setgroups_show(struct seq_file *m, void *v);
extern bool userns_may_setgroups(const struct user_namespace *ns);
+extern bool in_userns(const struct user_namespace *ancestor,
+ const struct user_namespace *child);
extern bool current_in_userns(const struct user_namespace *target_ns);
-
struct ns_common *ns_get_owner(struct ns_common *ns);
#else
@@ -144,6 +145,12 @@ static inline bool userns_may_setgroups(const struct user_namespace *ns)
return true;
}
+static inline bool in_userns(const struct user_namespace *ancestor,
+ const struct user_namespace *child)
+{
+ return true;
+}
+
static inline bool current_in_userns(const struct user_namespace *target_ns)
{
return true;
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 5209b5ed2a64..32fb046f2173 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -18,9 +18,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
case VIRTIO_NET_HDR_GSO_TCPV6:
gso_type = SKB_GSO_TCPV6;
break;
- case VIRTIO_NET_HDR_GSO_UDP:
- gso_type = SKB_GSO_UDP;
- break;
default:
return -EINVAL;
}
@@ -73,8 +70,6 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6)
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
- else if (sinfo->gso_type & SKB_GSO_UDP)
- hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
else
return -EINVAL;
if (sinfo->gso_type & SKB_GSO_TCP_ECN)
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 37e8d31a4632..d77bc35278b0 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -85,6 +85,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#endif
THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
+ THP_SWPOUT,
+ THP_SWPOUT_FALLBACK,
#endif
#ifdef CONFIG_MEMORY_BALLOON
BALLOON_INFLATE,
@@ -104,6 +106,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
VMACACHE_FIND_HITS,
VMACACHE_FULL_FLUSHES,
#endif
+#ifdef CONFIG_SWAP
+ SWAP_RA,
+ SWAP_RA_HIT,
+#endif
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index b3d85f30d424..ade7cb5f1359 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -107,8 +107,37 @@ static inline void vm_events_fold_cpu(int cpu)
* Zone and node-based page accounting with per cpu differentials.
*/
extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
+extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
+#ifdef CONFIG_NUMA
+static inline void zone_numa_state_add(long x, struct zone *zone,
+ enum numa_stat_item item)
+{
+ atomic_long_add(x, &zone->vm_numa_stat[item]);
+ atomic_long_add(x, &vm_numa_stat[item]);
+}
+
+static inline unsigned long global_numa_state(enum numa_stat_item item)
+{
+ long x = atomic_long_read(&vm_numa_stat[item]);
+
+ return x;
+}
+
+static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
+ enum numa_stat_item item)
+{
+ long x = atomic_long_read(&zone->vm_numa_stat[item]);
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
+
+ return x;
+}
+#endif /* CONFIG_NUMA */
+
static inline void zone_page_state_add(long x, struct zone *zone,
enum zone_stat_item item)
{
@@ -123,7 +152,7 @@ static inline void node_page_state_add(long x, struct pglist_data *pgdat,
atomic_long_add(x, &vm_node_stat[item]);
}
-static inline unsigned long global_page_state(enum zone_stat_item item)
+static inline unsigned long global_zone_page_state(enum zone_stat_item item)
{
long x = atomic_long_read(&vm_zone_stat[item]);
#ifdef CONFIG_SMP
@@ -194,12 +223,14 @@ static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
#ifdef CONFIG_NUMA
+extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
extern unsigned long sum_zone_node_page_state(int node,
- enum zone_stat_item item);
+ enum zone_stat_item item);
+extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
extern unsigned long node_page_state(struct pglist_data *pgdat,
enum node_stat_item item);
#else
-#define sum_zone_node_page_state(node, item) global_page_state(item)
+#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
#define node_page_state(node, item) global_node_page_state(item)
#endif /* CONFIG_NUMA */
diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h
index f38c10ba3ff5..30b6e0d2a942 100644
--- a/include/linux/vt_buffer.h
+++ b/include/linux/vt_buffer.h
@@ -13,6 +13,7 @@
#ifndef _LINUX_VT_BUFFER_H_
#define _LINUX_VT_BUFFER_H_
+#include <linux/string.h>
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
#include <asm/vga.h>
@@ -26,24 +27,33 @@
#ifndef VT_BUF_HAVE_MEMSETW
static inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
{
+#ifdef VT_BUF_HAVE_RW
count /= 2;
while (count--)
scr_writew(c, s++);
+#else
+ memset16(s, c, count / 2);
+#endif
}
#endif
#ifndef VT_BUF_HAVE_MEMCPYW
static inline void scr_memcpyw(u16 *d, const u16 *s, unsigned int count)
{
+#ifdef VT_BUF_HAVE_RW
count /= 2;
while (count--)
scr_writew(scr_readw(s++), d++);
+#else
+ memcpy(d, s, count);
+#endif
}
#endif
#ifndef VT_BUF_HAVE_MEMMOVEW
static inline void scr_memmovew(u16 *d, const u16 *s, unsigned int count)
{
+#ifdef VT_BUF_HAVE_RW
if (d < s)
scr_memcpyw(d, s, count);
else {
@@ -53,6 +63,9 @@ static inline void scr_memmovew(u16 *d, const u16 *s, unsigned int count)
while (count--)
scr_writew(scr_readw(--s), --d);
}
+#else
+ memmove(d, s, count);
+#endif
}
#endif
diff --git a/include/linux/w1.h b/include/linux/w1.h
index 90cbe7e65059..5b2972946dda 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -68,6 +68,7 @@ struct w1_reg_num {
* @family: module for device family type
* @family_data: pointer for use by the family module
* @dev: kernel device identifier
+ * @hwmon: pointer to hwmon device
*
*/
struct w1_slave {
@@ -83,6 +84,7 @@ struct w1_slave {
struct w1_family *family;
void *family_data;
struct device dev;
+ struct device *hwmon;
};
typedef void (*w1_slave_found_callback)(struct w1_master *, u64);
@@ -250,11 +252,13 @@ void w1_remove_master_device(struct w1_bus_master *master);
* @add_slave: add_slave
* @remove_slave: remove_slave
* @groups: sysfs group
+ * @chip_info: pointer to struct hwmon_chip_info
*/
struct w1_family_ops {
int (*add_slave)(struct w1_slave *sl);
void (*remove_slave)(struct w1_slave *sl);
const struct attribute_group **groups;
+ const struct hwmon_chip_info *chip_info;
};
/**
diff --git a/include/linux/wait.h b/include/linux/wait.h
index dc19880c02f5..87c4641023fb 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -18,6 +18,7 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int
/* wait_queue_entry::flags */
#define WQ_FLAG_EXCLUSIVE 0x01
#define WQ_FLAG_WOKEN 0x02
+#define WQ_FLAG_BOOKMARK 0x04
/*
* A single wait-queue entry structure:
@@ -184,6 +185,8 @@ __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq
void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
+void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
+ unsigned int mode, void *key, wait_queue_entry_t *bookmark);
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index db6dc9dc0482..1c49431f3121 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -323,8 +323,8 @@ enum {
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
- __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
+ __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h
new file mode 100644
index 000000000000..9e1f42cb57e9
--- /dev/null
+++ b/include/linux/xxhash.h
@@ -0,0 +1,236 @@
+/*
+ * xxHash - Extremely Fast Hash algorithm
+ * Copyright (C) 2012-2016, Yann Collet.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ *
+ * You can contact the author at:
+ * - xxHash homepage: http://cyan4973.github.io/xxHash/
+ * - xxHash source repository: https://github.com/Cyan4973/xxHash
+ */
+
+/*
+ * Notice extracted from xxHash homepage:
+ *
+ * xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
+ * It also successfully passes all tests from the SMHasher suite.
+ *
+ * Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2
+ * Duo @3GHz)
+ *
+ * Name Speed Q.Score Author
+ * xxHash 5.4 GB/s 10
+ * CrapWow 3.2 GB/s 2 Andrew
+ * MumurHash 3a 2.7 GB/s 10 Austin Appleby
+ * SpookyHash 2.0 GB/s 10 Bob Jenkins
+ * SBox 1.4 GB/s 9 Bret Mulvey
+ * Lookup3 1.2 GB/s 9 Bob Jenkins
+ * SuperFastHash 1.2 GB/s 1 Paul Hsieh
+ * CityHash64 1.05 GB/s 10 Pike & Alakuijala
+ * FNV 0.55 GB/s 5 Fowler, Noll, Vo
+ * CRC32 0.43 GB/s 9
+ * MD5-32 0.33 GB/s 10 Ronald L. Rivest
+ * SHA1-32 0.28 GB/s 10
+ *
+ * Q.Score is a measure of quality of the hash function.
+ * It depends on successfully passing SMHasher test set.
+ * 10 is a perfect score.
+ *
+ * A 64-bits version, named xxh64 offers much better speed,
+ * but for 64-bits applications only.
+ * Name Speed on 64 bits Speed on 32 bits
+ * xxh64 13.8 GB/s 1.9 GB/s
+ * xxh32 6.8 GB/s 6.0 GB/s
+ */
+
+#ifndef XXHASH_H
+#define XXHASH_H
+
+#include <linux/types.h>
+
+/*-****************************
+ * Simple Hash Functions
+ *****************************/
+
+/**
+ * xxh32() - calculate the 32-bit hash of the input with a given seed.
+ *
+ * @input: The data to hash.
+ * @length: The length of the data to hash.
+ * @seed: The seed can be used to alter the result predictably.
+ *
+ * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
+ *
+ * Return: The 32-bit hash of the data.
+ */
+uint32_t xxh32(const void *input, size_t length, uint32_t seed);
+
+/**
+ * xxh64() - calculate the 64-bit hash of the input with a given seed.
+ *
+ * @input: The data to hash.
+ * @length: The length of the data to hash.
+ * @seed: The seed can be used to alter the result predictably.
+ *
+ * This function runs 2x faster on 64-bit systems, but slower on 32-bit systems.
+ *
+ * Return: The 64-bit hash of the data.
+ */
+uint64_t xxh64(const void *input, size_t length, uint64_t seed);
+
+/*-****************************
+ * Streaming Hash Functions
+ *****************************/
+
+/*
+ * These definitions are only meant to allow allocation of XXH state
+ * statically, on stack, or in a struct for example.
+ * Do not use members directly.
+ */
+
+/**
+ * struct xxh32_state - private xxh32 state, do not use members directly
+ */
+struct xxh32_state {
+ uint32_t total_len_32;
+ uint32_t large_len;
+ uint32_t v1;
+ uint32_t v2;
+ uint32_t v3;
+ uint32_t v4;
+ uint32_t mem32[4];
+ uint32_t memsize;
+};
+
+/**
+ * struct xxh32_state - private xxh64 state, do not use members directly
+ */
+struct xxh64_state {
+ uint64_t total_len;
+ uint64_t v1;
+ uint64_t v2;
+ uint64_t v3;
+ uint64_t v4;
+ uint64_t mem64[4];
+ uint32_t memsize;
+};
+
+/**
+ * xxh32_reset() - reset the xxh32 state to start a new hashing operation
+ *
+ * @state: The xxh32 state to reset.
+ * @seed: Initialize the hash state with this seed.
+ *
+ * Call this function on any xxh32_state to prepare for a new hashing operation.
+ */
+void xxh32_reset(struct xxh32_state *state, uint32_t seed);
+
+/**
+ * xxh32_update() - hash the data given and update the xxh32 state
+ *
+ * @state: The xxh32 state to update.
+ * @input: The data to hash.
+ * @length: The length of the data to hash.
+ *
+ * After calling xxh32_reset() call xxh32_update() as many times as necessary.
+ *
+ * Return: Zero on success, otherwise an error code.
+ */
+int xxh32_update(struct xxh32_state *state, const void *input, size_t length);
+
+/**
+ * xxh32_digest() - produce the current xxh32 hash
+ *
+ * @state: Produce the current xxh32 hash of this state.
+ *
+ * A hash value can be produced at any time. It is still possible to continue
+ * inserting input into the hash state after a call to xxh32_digest(), and
+ * generate new hashes later on, by calling xxh32_digest() again.
+ *
+ * Return: The xxh32 hash stored in the state.
+ */
+uint32_t xxh32_digest(const struct xxh32_state *state);
+
+/**
+ * xxh64_reset() - reset the xxh64 state to start a new hashing operation
+ *
+ * @state: The xxh64 state to reset.
+ * @seed: Initialize the hash state with this seed.
+ */
+void xxh64_reset(struct xxh64_state *state, uint64_t seed);
+
+/**
+ * xxh64_update() - hash the data given and update the xxh64 state
+ * @state: The xxh64 state to update.
+ * @input: The data to hash.
+ * @length: The length of the data to hash.
+ *
+ * After calling xxh64_reset() call xxh64_update() as many times as necessary.
+ *
+ * Return: Zero on success, otherwise an error code.
+ */
+int xxh64_update(struct xxh64_state *state, const void *input, size_t length);
+
+/**
+ * xxh64_digest() - produce the current xxh64 hash
+ *
+ * @state: Produce the current xxh64 hash of this state.
+ *
+ * A hash value can be produced at any time. It is still possible to continue
+ * inserting input into the hash state after a call to xxh64_digest(), and
+ * generate new hashes later on, by calling xxh64_digest() again.
+ *
+ * Return: The xxh64 hash stored in the state.
+ */
+uint64_t xxh64_digest(const struct xxh64_state *state);
+
+/*-**************************
+ * Utils
+ ***************************/
+
+/**
+ * xxh32_copy_state() - copy the source state into the destination state
+ *
+ * @src: The source xxh32 state.
+ * @dst: The destination xxh32 state.
+ */
+void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src);
+
+/**
+ * xxh64_copy_state() - copy the source state into the destination state
+ *
+ * @src: The source xxh64 state.
+ * @dst: The destination xxh64 state.
+ */
+void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src);
+
+#endif /* XXHASH_H */
diff --git a/include/linux/zstd.h b/include/linux/zstd.h
new file mode 100644
index 000000000000..249575e2485f
--- /dev/null
+++ b/include/linux/zstd.h
@@ -0,0 +1,1157 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under the BSD-style license found in the
+ * LICENSE file in the root directory of https://github.com/facebook/zstd.
+ * An additional grant of patent rights can be found in the PATENTS file in the
+ * same directory.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation. This program is dual-licensed; you may select
+ * either version 2 of the GNU General Public License ("GPL") or BSD license
+ * ("BSD").
+ */
+
+#ifndef ZSTD_H
+#define ZSTD_H
+
+/* ====== Dependency ======*/
+#include <linux/types.h> /* size_t */
+
+
+/*-*****************************************************************************
+ * Introduction
+ *
+ * zstd, short for Zstandard, is a fast lossless compression algorithm,
+ * targeting real-time compression scenarios at zlib-level and better
+ * compression ratios. The zstd compression library provides in-memory
+ * compression and decompression functions. The library supports compression
+ * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled
+ * ultra, should be used with caution, as they require more memory.
+ * Compression can be done in:
+ * - a single step, reusing a context (described as Explicit memory management)
+ * - unbounded multiple steps (described as Streaming compression)
+ * The compression ratio achievable on small data can be highly improved using
+ * compression with a dictionary in:
+ * - a single step (described as Simple dictionary API)
+ * - a single step, reusing a dictionary (described as Fast dictionary API)
+ ******************************************************************************/
+
+/*====== Helper functions ======*/
+
+/**
+ * enum ZSTD_ErrorCode - zstd error codes
+ *
+ * Functions that return size_t can be checked for errors using ZSTD_isError()
+ * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode().
+ */
+typedef enum {
+ ZSTD_error_no_error,
+ ZSTD_error_GENERIC,
+ ZSTD_error_prefix_unknown,
+ ZSTD_error_version_unsupported,
+ ZSTD_error_parameter_unknown,
+ ZSTD_error_frameParameter_unsupported,
+ ZSTD_error_frameParameter_unsupportedBy32bits,
+ ZSTD_error_frameParameter_windowTooLarge,
+ ZSTD_error_compressionParameter_unsupported,
+ ZSTD_error_init_missing,
+ ZSTD_error_memory_allocation,
+ ZSTD_error_stage_wrong,
+ ZSTD_error_dstSize_tooSmall,
+ ZSTD_error_srcSize_wrong,
+ ZSTD_error_corruption_detected,
+ ZSTD_error_checksum_wrong,
+ ZSTD_error_tableLog_tooLarge,
+ ZSTD_error_maxSymbolValue_tooLarge,
+ ZSTD_error_maxSymbolValue_tooSmall,
+ ZSTD_error_dictionary_corrupted,
+ ZSTD_error_dictionary_wrong,
+ ZSTD_error_dictionaryCreation_failed,
+ ZSTD_error_maxCode
+} ZSTD_ErrorCode;
+
+/**
+ * ZSTD_maxCLevel() - maximum compression level available
+ *
+ * Return: Maximum compression level available.
+ */
+int ZSTD_maxCLevel(void);
+/**
+ * ZSTD_compressBound() - maximum compressed size in worst case scenario
+ * @srcSize: The size of the data to compress.
+ *
+ * Return: The maximum compressed size in the worst case scenario.
+ */
+size_t ZSTD_compressBound(size_t srcSize);
+/**
+ * ZSTD_isError() - tells if a size_t function result is an error code
+ * @code: The function result to check for error.
+ *
+ * Return: Non-zero iff the code is an error.
+ */
+static __attribute__((unused)) unsigned int ZSTD_isError(size_t code)
+{
+ return code > (size_t)-ZSTD_error_maxCode;
+}
+/**
+ * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode
+ * @functionResult: The result of a function for which ZSTD_isError() is true.
+ *
+ * Return: The ZSTD_ErrorCode corresponding to the functionResult or 0
+ * if the functionResult isn't an error.
+ */
+static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode(
+ size_t functionResult)
+{
+ if (!ZSTD_isError(functionResult))
+ return (ZSTD_ErrorCode)0;
+ return (ZSTD_ErrorCode)(0 - functionResult);
+}
+
+/**
+ * enum ZSTD_strategy - zstd compression search strategy
+ *
+ * From faster to stronger.
+ */
+typedef enum {
+ ZSTD_fast,
+ ZSTD_dfast,
+ ZSTD_greedy,
+ ZSTD_lazy,
+ ZSTD_lazy2,
+ ZSTD_btlazy2,
+ ZSTD_btopt,
+ ZSTD_btopt2
+} ZSTD_strategy;
+
+/**
+ * struct ZSTD_compressionParameters - zstd compression parameters
+ * @windowLog: Log of the largest match distance. Larger means more
+ * compression, and more memory needed during decompression.
+ * @chainLog: Fully searched segment. Larger means more compression, slower,
+ * and more memory (useless for fast).
+ * @hashLog: Dispatch table. Larger means more compression,
+ * slower, and more memory.
+ * @searchLog: Number of searches. Larger means more compression and slower.
+ * @searchLength: Match length searched. Larger means faster decompression,
+ * sometimes less compression.
+ * @targetLength: Acceptable match size for optimal parser (only). Larger means
+ * more compression, and slower.
+ * @strategy: The zstd compression strategy.
+ */
+typedef struct {
+ unsigned int windowLog;
+ unsigned int chainLog;
+ unsigned int hashLog;
+ unsigned int searchLog;
+ unsigned int searchLength;
+ unsigned int targetLength;
+ ZSTD_strategy strategy;
+} ZSTD_compressionParameters;
+
+/**
+ * struct ZSTD_frameParameters - zstd frame parameters
+ * @contentSizeFlag: Controls whether content size will be present in the frame
+ * header (when known).
+ * @checksumFlag: Controls whether a 32-bit checksum is generated at the end
+ * of the frame for error detection.
+ * @noDictIDFlag: Controls whether dictID will be saved into the frame header
+ * when using dictionary compression.
+ *
+ * The default value is all fields set to 0.
+ */
+typedef struct {
+ unsigned int contentSizeFlag;
+ unsigned int checksumFlag;
+ unsigned int noDictIDFlag;
+} ZSTD_frameParameters;
+
+/**
+ * struct ZSTD_parameters - zstd parameters
+ * @cParams: The compression parameters.
+ * @fParams: The frame parameters.
+ */
+typedef struct {
+ ZSTD_compressionParameters cParams;
+ ZSTD_frameParameters fParams;
+} ZSTD_parameters;
+
+/**
+ * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level
+ * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
+ * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
+ * @dictSize: The dictionary size or 0 if a dictionary isn't being used.
+ *
+ * Return: The selected ZSTD_compressionParameters.
+ */
+ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel,
+ unsigned long long estimatedSrcSize, size_t dictSize);
+
+/**
+ * ZSTD_getParams() - returns ZSTD_parameters for selected level
+ * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
+ * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
+ * @dictSize: The dictionary size or 0 if a dictionary isn't being used.
+ *
+ * The same as ZSTD_getCParams() except also selects the default frame
+ * parameters (all zero).
+ *
+ * Return: The selected ZSTD_parameters.
+ */
+ZSTD_parameters ZSTD_getParams(int compressionLevel,
+ unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*-*************************************
+ * Explicit memory management
+ **************************************/
+
+/**
+ * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx
+ * @cParams: The compression parameters to be used for compression.
+ *
+ * If multiple compression parameters might be used, the caller must call
+ * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum
+ * size.
+ *
+ * Return: A lower bound on the size of the workspace that is passed to
+ * ZSTD_initCCtx().
+ */
+size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams);
+
+/**
+ * struct ZSTD_CCtx - the zstd compression context
+ *
+ * When compressing many times it is recommended to allocate a context just once
+ * and reuse it for each successive compression operation.
+ */
+typedef struct ZSTD_CCtx_s ZSTD_CCtx;
+/**
+ * ZSTD_initCCtx() - initialize a zstd compression context
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to
+ * determine how large the workspace must be.
+ *
+ * Return: A compression context emplaced into workspace.
+ */
+ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize);
+
+/**
+ * ZSTD_compressCCtx() - compress src into dst
+ * @ctx: The context. Must have been initialized with a workspace at
+ * least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
+ * @dst: The buffer to compress src into.
+ * @dstCapacity: The size of the destination buffer. May be any size, but
+ * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
+ * @src: The data to compress.
+ * @srcSize: The size of the data to compress.
+ * @params: The parameters to use for compression. See ZSTD_getParams().
+ *
+ * Return: The compressed size or an error, which can be checked using
+ * ZSTD_isError().
+ */
+size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
+ const void *src, size_t srcSize, ZSTD_parameters params);
+
+/**
+ * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx
+ *
+ * Return: A lower bound on the size of the workspace that is passed to
+ * ZSTD_initDCtx().
+ */
+size_t ZSTD_DCtxWorkspaceBound(void);
+
+/**
+ * struct ZSTD_DCtx - the zstd decompression context
+ *
+ * When decompressing many times it is recommended to allocate a context just
+ * once and reuse it for each successive decompression operation.
+ */
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+/**
+ * ZSTD_initDCtx() - initialize a zstd decompression context
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to
+ * determine how large the workspace must be.
+ *
+ * Return: A decompression context emplaced into workspace.
+ */
+ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize);
+
+/**
+ * ZSTD_decompressDCtx() - decompress zstd compressed src into dst
+ * @ctx: The decompression context.
+ * @dst: The buffer to decompress src into.
+ * @dstCapacity: The size of the destination buffer. Must be at least as large
+ * as the decompressed size. If the caller cannot upper bound the
+ * decompressed size, then it's better to use the streaming API.
+ * @src: The zstd compressed data to decompress. Multiple concatenated
+ * frames and skippable frames are allowed.
+ * @srcSize: The exact size of the data to decompress.
+ *
+ * Return: The decompressed size or an error, which can be checked using
+ * ZSTD_isError().
+ */
+size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
+ const void *src, size_t srcSize);
+
+/*-************************
+ * Simple dictionary API
+ **************************/
+
+/**
+ * ZSTD_compress_usingDict() - compress src into dst using a dictionary
+ * @ctx: The context. Must have been initialized with a workspace at
+ * least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
+ * @dst: The buffer to compress src into.
+ * @dstCapacity: The size of the destination buffer. May be any size, but
+ * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
+ * @src: The data to compress.
+ * @srcSize: The size of the data to compress.
+ * @dict: The dictionary to use for compression.
+ * @dictSize: The size of the dictionary.
+ * @params: The parameters to use for compression. See ZSTD_getParams().
+ *
+ * Compression using a predefined dictionary. The same dictionary must be used
+ * during decompression.
+ *
+ * Return: The compressed size or an error, which can be checked using
+ * ZSTD_isError().
+ */
+size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
+ const void *src, size_t srcSize, const void *dict, size_t dictSize,
+ ZSTD_parameters params);
+
+/**
+ * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary
+ * @ctx: The decompression context.
+ * @dst: The buffer to decompress src into.
+ * @dstCapacity: The size of the destination buffer. Must be at least as large
+ * as the decompressed size. If the caller cannot upper bound the
+ * decompressed size, then it's better to use the streaming API.
+ * @src: The zstd compressed data to decompress. Multiple concatenated
+ * frames and skippable frames are allowed.
+ * @srcSize: The exact size of the data to decompress.
+ * @dict: The dictionary to use for decompression. The same dictionary
+ * must've been used to compress the data.
+ * @dictSize: The size of the dictionary.
+ *
+ * Return: The decompressed size or an error, which can be checked using
+ * ZSTD_isError().
+ */
+size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
+ const void *src, size_t srcSize, const void *dict, size_t dictSize);
+
+/*-**************************
+ * Fast dictionary API
+ ***************************/
+
+/**
+ * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict
+ * @cParams: The compression parameters to be used for compression.
+ *
+ * Return: A lower bound on the size of the workspace that is passed to
+ * ZSTD_initCDict().
+ */
+size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams);
+
+/**
+ * struct ZSTD_CDict - a digested dictionary to be used for compression
+ */
+typedef struct ZSTD_CDict_s ZSTD_CDict;
+
+/**
+ * ZSTD_initCDict() - initialize a digested dictionary for compression
+ * @dictBuffer: The dictionary to digest. The buffer is referenced by the
+ * ZSTD_CDict so it must outlive the returned ZSTD_CDict.
+ * @dictSize: The size of the dictionary.
+ * @params: The parameters to use for compression. See ZSTD_getParams().
+ * @workspace: The workspace. It must outlive the returned ZSTD_CDict.
+ * @workspaceSize: The workspace size. Must be at least
+ * ZSTD_CDictWorkspaceBound(params.cParams).
+ *
+ * When compressing multiple messages / blocks with the same dictionary it is
+ * recommended to load it just once. The ZSTD_CDict merely references the
+ * dictBuffer, so it must outlive the returned ZSTD_CDict.
+ *
+ * Return: The digested dictionary emplaced into workspace.
+ */
+ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize,
+ ZSTD_parameters params, void *workspace, size_t workspaceSize);
+
+/**
+ * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict
+ * @ctx: The context. Must have been initialized with a workspace at
+ * least as large as ZSTD_CCtxWorkspaceBound(cParams) where
+ * cParams are the compression parameters used to initialize the
+ * cdict.
+ * @dst: The buffer to compress src into.
+ * @dstCapacity: The size of the destination buffer. May be any size, but
+ * ZSTD_compressBound(srcSize) is guaranteed to be large enough.
+ * @src: The data to compress.
+ * @srcSize: The size of the data to compress.
+ * @cdict: The digested dictionary to use for compression.
+ * @params: The parameters to use for compression. See ZSTD_getParams().
+ *
+ * Compression using a digested dictionary. The same dictionary must be used
+ * during decompression.
+ *
+ * Return: The compressed size or an error, which can be checked using
+ * ZSTD_isError().
+ */
+size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
+ const void *src, size_t srcSize, const ZSTD_CDict *cdict);
+
+
+/**
+ * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict
+ *
+ * Return: A lower bound on the size of the workspace that is passed to
+ * ZSTD_initDDict().
+ */
+size_t ZSTD_DDictWorkspaceBound(void);
+
+/**
+ * struct ZSTD_DDict - a digested dictionary to be used for decompression
+ */
+typedef struct ZSTD_DDict_s ZSTD_DDict;
+
+/**
+ * ZSTD_initDDict() - initialize a digested dictionary for decompression
+ * @dictBuffer: The dictionary to digest. The buffer is referenced by the
+ * ZSTD_DDict so it must outlive the returned ZSTD_DDict.
+ * @dictSize: The size of the dictionary.
+ * @workspace: The workspace. It must outlive the returned ZSTD_DDict.
+ * @workspaceSize: The workspace size. Must be at least
+ * ZSTD_DDictWorkspaceBound().
+ *
+ * When decompressing multiple messages / blocks with the same dictionary it is
+ * recommended to load it just once. The ZSTD_DDict merely references the
+ * dictBuffer, so it must outlive the returned ZSTD_DDict.
+ *
+ * Return: The digested dictionary emplaced into workspace.
+ */
+ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize,
+ void *workspace, size_t workspaceSize);
+
+/**
+ * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict
+ * @ctx: The decompression context.
+ * @dst: The buffer to decompress src into.
+ * @dstCapacity: The size of the destination buffer. Must be at least as large
+ * as the decompressed size. If the caller cannot upper bound the
+ * decompressed size, then it's better to use the streaming API.
+ * @src: The zstd compressed data to decompress. Multiple concatenated
+ * frames and skippable frames are allowed.
+ * @srcSize: The exact size of the data to decompress.
+ * @ddict: The digested dictionary to use for decompression. The same
+ * dictionary must've been used to compress the data.
+ *
+ * Return: The decompressed size or an error, which can be checked using
+ * ZSTD_isError().
+ */
+size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst,
+ size_t dstCapacity, const void *src, size_t srcSize,
+ const ZSTD_DDict *ddict);
+
+
+/*-**************************
+ * Streaming
+ ***************************/
+
+/**
+ * struct ZSTD_inBuffer - input buffer for streaming
+ * @src: Start of the input buffer.
+ * @size: Size of the input buffer.
+ * @pos: Position where reading stopped. Will be updated.
+ * Necessarily 0 <= pos <= size.
+ */
+typedef struct ZSTD_inBuffer_s {
+ const void *src;
+ size_t size;
+ size_t pos;
+} ZSTD_inBuffer;
+
+/**
+ * struct ZSTD_outBuffer - output buffer for streaming
+ * @dst: Start of the output buffer.
+ * @size: Size of the output buffer.
+ * @pos: Position where writing stopped. Will be updated.
+ * Necessarily 0 <= pos <= size.
+ */
+typedef struct ZSTD_outBuffer_s {
+ void *dst;
+ size_t size;
+ size_t pos;
+} ZSTD_outBuffer;
+
+
+
+/*-*****************************************************************************
+ * Streaming compression - HowTo
+ *
+ * A ZSTD_CStream object is required to track streaming operation.
+ * Use ZSTD_initCStream() to initialize a ZSTD_CStream object.
+ * ZSTD_CStream objects can be reused multiple times on consecutive compression
+ * operations. It is recommended to re-use ZSTD_CStream in situations where many
+ * streaming operations will be achieved consecutively. Use one separate
+ * ZSTD_CStream per thread for parallel execution.
+ *
+ * Use ZSTD_compressStream() repetitively to consume input stream.
+ * The function will automatically update both `pos` fields.
+ * Note that it may not consume the entire input, in which case `pos < size`,
+ * and it's up to the caller to present again remaining data.
+ * It returns a hint for the preferred number of bytes to use as an input for
+ * the next function call.
+ *
+ * At any moment, it's possible to flush whatever data remains within internal
+ * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might
+ * still be some content left within the internal buffer if `output->size` is
+ * too small. It returns the number of bytes left in the internal buffer and
+ * must be called until it returns 0.
+ *
+ * ZSTD_endStream() instructs to finish a frame. It will perform a flush and
+ * write frame epilogue. The epilogue is required for decoders to consider a
+ * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush
+ * the full content if `output->size` is too small. In which case, call again
+ * ZSTD_endStream() to complete the flush. It returns the number of bytes left
+ * in the internal buffer and must be called until it returns 0.
+ ******************************************************************************/
+
+/**
+ * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream
+ * @cParams: The compression parameters to be used for compression.
+ *
+ * Return: A lower bound on the size of the workspace that is passed to
+ * ZSTD_initCStream() and ZSTD_initCStream_usingCDict().
+ */
+size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams);
+
+/**
+ * struct ZSTD_CStream - the zstd streaming compression context
+ */
+typedef struct ZSTD_CStream_s ZSTD_CStream;
+
+/*===== ZSTD_CStream management functions =====*/
+/**
+ * ZSTD_initCStream() - initialize a zstd streaming compression context
+ * @params: The zstd compression parameters.
+ * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must
+ * pass the source size (zero means empty source). Otherwise,
+ * the caller may optionally pass the source size, or zero if
+ * unknown.
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspaceSize: The size of workspace.
+ * Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine
+ * how large the workspace must be.
+ *
+ * Return: The zstd streaming compression context.
+ */
+ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params,
+ unsigned long long pledgedSrcSize, void *workspace,
+ size_t workspaceSize);
+
+/**
+ * ZSTD_initCStream_usingCDict() - initialize a streaming compression context
+ * @cdict: The digested dictionary to use for compression.
+ * @pledgedSrcSize: Optionally the source size, or zero if unknown.
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspaceSize: The size of workspace. Call ZSTD_CStreamWorkspaceBound()
+ * with the cParams used to initialize the cdict to determine
+ * how large the workspace must be.
+ *
+ * Return: The zstd streaming compression context.
+ */
+ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict,
+ unsigned long long pledgedSrcSize, void *workspace,
+ size_t workspaceSize);
+
+/*===== Streaming compression functions =====*/
+/**
+ * ZSTD_resetCStream() - reset the context using parameters from creation
+ * @zcs: The zstd streaming compression context to reset.
+ * @pledgedSrcSize: Optionally the source size, or zero if unknown.
+ *
+ * Resets the context using the parameters from creation. Skips dictionary
+ * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame
+ * content size is always written into the frame header.
+ *
+ * Return: Zero or an error, which can be checked using ZSTD_isError().
+ */
+size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize);
+/**
+ * ZSTD_compressStream() - streaming compress some of input into output
+ * @zcs: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
+ * @input: Source buffer. `input->pos` is updated to indicate how much data was
+ * read. Note that it may not consume the entire input, in which case
+ * `input->pos < input->size`, and it's up to the caller to present
+ * remaining data again.
+ *
+ * The `input` and `output` buffers may be any size. Guaranteed to make some
+ * forward progress if `input` and `output` are not empty.
+ *
+ * Return: A hint for the number of bytes to use as the input for the next
+ * function call or an error, which can be checked using
+ * ZSTD_isError().
+ */
+size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output,
+ ZSTD_inBuffer *input);
+/**
+ * ZSTD_flushStream() - flush internal buffers into output
+ * @zcs: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
+ *
+ * ZSTD_flushStream() must be called until it returns 0, meaning all the data
+ * has been flushed. Since ZSTD_flushStream() causes a block to be ended,
+ * calling it too often will degrade the compression ratio.
+ *
+ * Return: The number of bytes still present within internal buffers or an
+ * error, which can be checked using ZSTD_isError().
+ */
+size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
+/**
+ * ZSTD_endStream() - flush internal buffers into output and end the frame
+ * @zcs: The zstd streaming compression context.
+ * @output: Destination buffer. `output->pos` is updated to indicate how much
+ * compressed data was written.
+ *
+ * ZSTD_endStream() must be called until it returns 0, meaning all the data has
+ * been flushed and the frame epilogue has been written.
+ *
+ * Return: The number of bytes still present within internal buffers or an
+ * error, which can be checked using ZSTD_isError().
+ */
+size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
+
+/**
+ * ZSTD_CStreamInSize() - recommended size for the input buffer
+ *
+ * Return: The recommended size for the input buffer.
+ */
+size_t ZSTD_CStreamInSize(void);
+/**
+ * ZSTD_CStreamOutSize() - recommended size for the output buffer
+ *
+ * When the output buffer is at least this large, it is guaranteed to be large
+ * enough to flush at least one complete compressed block.
+ *
+ * Return: The recommended size for the output buffer.
+ */
+size_t ZSTD_CStreamOutSize(void);
+
+
+
+/*-*****************************************************************************
+ * Streaming decompression - HowTo
+ *
+ * A ZSTD_DStream object is required to track streaming operations.
+ * Use ZSTD_initDStream() to initialize a ZSTD_DStream object.
+ * ZSTD_DStream objects can be re-used multiple times.
+ *
+ * Use ZSTD_decompressStream() repetitively to consume your input.
+ * The function will update both `pos` fields.
+ * If `input->pos < input->size`, some input has not been consumed.
+ * It's up to the caller to present again remaining data.
+ * If `output->pos < output->size`, decoder has flushed everything it could.
+ * Returns 0 iff a frame is completely decoded and fully flushed.
+ * Otherwise it returns a suggested next input size that will never load more
+ * than the current frame.
+ ******************************************************************************/
+
+/**
+ * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream
+ * @maxWindowSize: The maximum window size allowed for compressed frames.
+ *
+ * Return: A lower bound on the size of the workspace that is passed to
+ * ZSTD_initDStream() and ZSTD_initDStream_usingDDict().
+ */
+size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize);
+
+/**
+ * struct ZSTD_DStream - the zstd streaming decompression context
+ */
+typedef struct ZSTD_DStream_s ZSTD_DStream;
+/*===== ZSTD_DStream management functions =====*/
+/**
+ * ZSTD_initDStream() - initialize a zstd streaming decompression context
+ * @maxWindowSize: The maximum window size allowed for compressed frames.
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspaceSize: The size of workspace.
+ * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
+ * how large the workspace must be.
+ *
+ * Return: The zstd streaming decompression context.
+ */
+ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace,
+ size_t workspaceSize);
+/**
+ * ZSTD_initDStream_usingDDict() - initialize streaming decompression context
+ * @maxWindowSize: The maximum window size allowed for compressed frames.
+ * @ddict: The digested dictionary to use for decompression.
+ * @workspace: The workspace to emplace the context into. It must outlive
+ * the returned context.
+ * @workspaceSize: The size of workspace.
+ * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
+ * how large the workspace must be.
+ *
+ * Return: The zstd streaming decompression context.
+ */
+ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize,
+ const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize);
+
+/*===== Streaming decompression functions =====*/
+/**
+ * ZSTD_resetDStream() - reset the context using parameters from creation
+ * @zds: The zstd streaming decompression context to reset.
+ *
+ * Resets the context using the parameters from creation. Skips dictionary
+ * loading, since it can be reused.
+ *
+ * Return: Zero or an error, which can be checked using ZSTD_isError().
+ */
+size_t ZSTD_resetDStream(ZSTD_DStream *zds);
+/**
+ * ZSTD_decompressStream() - streaming decompress some of input into output
+ * @zds: The zstd streaming decompression context.
+ * @output: Destination buffer. `output.pos` is updated to indicate how much
+ * decompressed data was written.
+ * @input: Source buffer. `input.pos` is updated to indicate how much data was
+ * read. Note that it may not consume the entire input, in which case
+ * `input.pos < input.size`, and it's up to the caller to present
+ * remaining data again.
+ *
+ * The `input` and `output` buffers may be any size. Guaranteed to make some
+ * forward progress if `input` and `output` are not empty.
+ * ZSTD_decompressStream() will not consume the last byte of the frame until
+ * the entire frame is flushed.
+ *
+ * Return: Returns 0 iff a frame is completely decoded and fully flushed.
+ * Otherwise returns a hint for the number of bytes to use as the input
+ * for the next function call or an error, which can be checked using
+ * ZSTD_isError(). The size hint will never load more than the frame.
+ */
+size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output,
+ ZSTD_inBuffer *input);
+
+/**
+ * ZSTD_DStreamInSize() - recommended size for the input buffer
+ *
+ * Return: The recommended size for the input buffer.
+ */
+size_t ZSTD_DStreamInSize(void);
+/**
+ * ZSTD_DStreamOutSize() - recommended size for the output buffer
+ *
+ * When the output buffer is at least this large, it is guaranteed to be large
+ * enough to flush at least one complete decompressed block.
+ *
+ * Return: The recommended size for the output buffer.
+ */
+size_t ZSTD_DStreamOutSize(void);
+
+
+/* --- Constants ---*/
+#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */
+#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U
+
+#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
+#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
+
+#define ZSTD_WINDOWLOG_MAX_32 27
+#define ZSTD_WINDOWLOG_MAX_64 27
+#define ZSTD_WINDOWLOG_MAX \
+ ((unsigned int)(sizeof(size_t) == 4 \
+ ? ZSTD_WINDOWLOG_MAX_32 \
+ : ZSTD_WINDOWLOG_MAX_64))
+#define ZSTD_WINDOWLOG_MIN 10
+#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX
+#define ZSTD_HASHLOG_MIN 6
+#define ZSTD_CHAINLOG_MAX (ZSTD_WINDOWLOG_MAX+1)
+#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN
+#define ZSTD_HASHLOG3_MAX 17
+#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1)
+#define ZSTD_SEARCHLOG_MIN 1
+/* only for ZSTD_fast, other strategies are limited to 6 */
+#define ZSTD_SEARCHLENGTH_MAX 7
+/* only for ZSTD_btopt, other strategies are limited to 4 */
+#define ZSTD_SEARCHLENGTH_MIN 3
+#define ZSTD_TARGETLENGTH_MIN 4
+#define ZSTD_TARGETLENGTH_MAX 999
+
+/* for static allocation */
+#define ZSTD_FRAMEHEADERSIZE_MAX 18
+#define ZSTD_FRAMEHEADERSIZE_MIN 6
+static const size_t ZSTD_frameHeaderSize_prefix = 5;
+static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN;
+static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX;
+/* magic number + skippable frame length */
+static const size_t ZSTD_skippableHeaderSize = 8;
+
+
+/*-*************************************
+ * Compressed size functions
+ **************************************/
+
+/**
+ * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame
+ * @src: Source buffer. It should point to the start of a zstd encoded frame
+ * or a skippable frame.
+ * @srcSize: The size of the source buffer. It must be at least as large as the
+ * size of the frame.
+ *
+ * Return: The compressed size of the frame pointed to by `src` or an error,
+ * which can be check with ZSTD_isError().
+ * Suitable to pass to ZSTD_decompress() or similar functions.
+ */
+size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize);
+
+/*-*************************************
+ * Decompressed size functions
+ **************************************/
+/**
+ * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header
+ * @src: It should point to the start of a zstd encoded frame.
+ * @srcSize: The size of the source buffer. It must be at least as large as the
+ * frame header. `ZSTD_frameHeaderSize_max` is always large enough.
+ *
+ * Return: The frame content size stored in the frame header if known.
+ * `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the
+ * frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input.
+ */
+unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
+
+/**
+ * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames
+ * @src: It should point to the start of a series of zstd encoded and/or
+ * skippable frames.
+ * @srcSize: The exact size of the series of frames.
+ *
+ * If any zstd encoded frame in the series doesn't have the frame content size
+ * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always
+ * set when using ZSTD_compress(). The decompressed size can be very large.
+ * If the source is untrusted, the decompressed size could be wrong or
+ * intentionally modified. Always ensure the result fits within the
+ * application's authorized limits. ZSTD_findDecompressedSize() handles multiple
+ * frames, and so it must traverse the input to read each frame header. This is
+ * efficient as most of the data is skipped, however it does mean that all frame
+ * data must be present and valid.
+ *
+ * Return: Decompressed size of all the data contained in the frames if known.
+ * `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown.
+ * `ZSTD_CONTENTSIZE_ERROR` if an error occurred.
+ */
+unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize);
+
+/*-*************************************
+ * Advanced compression functions
+ **************************************/
+/**
+ * ZSTD_checkCParams() - ensure parameter values remain within authorized range
+ * @cParams: The zstd compression parameters.
+ *
+ * Return: Zero or an error, which can be checked using ZSTD_isError().
+ */
+size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams);
+
+/**
+ * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize
+ * @srcSize: Optionally the estimated source size, or zero if unknown.
+ * @dictSize: Optionally the estimated dictionary size, or zero if unknown.
+ *
+ * Return: The optimized parameters.
+ */
+ZSTD_compressionParameters ZSTD_adjustCParams(
+ ZSTD_compressionParameters cParams, unsigned long long srcSize,
+ size_t dictSize);
+
+/*--- Advanced decompression functions ---*/
+
+/**
+ * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame
+ * @buffer: The source buffer to check.
+ * @size: The size of the source buffer, must be at least 4 bytes.
+ *
+ * Return: True iff the buffer starts with a zstd or skippable frame identifier.
+ */
+unsigned int ZSTD_isFrame(const void *buffer, size_t size);
+
+/**
+ * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary
+ * @dict: The dictionary buffer.
+ * @dictSize: The size of the dictionary buffer.
+ *
+ * Return: The dictionary id stored within the dictionary or 0 if the
+ * dictionary is not a zstd dictionary. If it returns 0 the
+ * dictionary can still be loaded as a content-only dictionary.
+ */
+unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize);
+
+/**
+ * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict
+ * @ddict: The ddict to find the id of.
+ *
+ * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not
+ * a zstd dictionary. If it returns 0 `ddict` will be loaded as a
+ * content-only dictionary.
+ */
+unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict);
+
+/**
+ * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame
+ * @src: Source buffer. It must be a zstd encoded frame.
+ * @srcSize: The size of the source buffer. It must be at least as large as the
+ * frame header. `ZSTD_frameHeaderSize_max` is always large enough.
+ *
+ * Return: The dictionary id required to decompress the frame stored within
+ * `src` or 0 if the dictionary id could not be decoded. It can return
+ * 0 if the frame does not require a dictionary, the dictionary id
+ * wasn't stored in the frame, `src` is not a zstd frame, or `srcSize`
+ * is too small.
+ */
+unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize);
+
+/**
+ * struct ZSTD_frameParams - zstd frame parameters stored in the frame header
+ * @frameContentSize: The frame content size, or 0 if not present.
+ * @windowSize: The window size, or 0 if the frame is a skippable frame.
+ * @dictID: The dictionary id, or 0 if not present.
+ * @checksumFlag: Whether a checksum was used.
+ */
+typedef struct {
+ unsigned long long frameContentSize;
+ unsigned int windowSize;
+ unsigned int dictID;
+ unsigned int checksumFlag;
+} ZSTD_frameParams;
+
+/**
+ * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame
+ * @fparamsPtr: On success the frame parameters are written here.
+ * @src: The source buffer. It must point to a zstd or skippable frame.
+ * @srcSize: The size of the source buffer. `ZSTD_frameHeaderSize_max` is
+ * always large enough to succeed.
+ *
+ * Return: 0 on success. If more data is required it returns how many bytes
+ * must be provided to make forward progress. Otherwise it returns
+ * an error, which can be checked using ZSTD_isError().
+ */
+size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src,
+ size_t srcSize);
+
+/*-*****************************************************************************
+ * Buffer-less and synchronous inner streaming functions
+ *
+ * This is an advanced API, giving full control over buffer management, for
+ * users which need direct control over memory.
+ * But it's also a complex one, with many restrictions (documented below).
+ * Prefer using normal streaming API for an easier experience
+ ******************************************************************************/
+
+/*-*****************************************************************************
+ * Buffer-less streaming compression (synchronous mode)
+ *
+ * A ZSTD_CCtx object is required to track streaming operations.
+ * Use ZSTD_initCCtx() to initialize a context.
+ * ZSTD_CCtx object can be re-used multiple times within successive compression
+ * operations.
+ *
+ * Start by initializing a context.
+ * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary
+ * compression,
+ * or ZSTD_compressBegin_advanced(), for finer parameter control.
+ * It's also possible to duplicate a reference context which has already been
+ * initialized, using ZSTD_copyCCtx()
+ *
+ * Then, consume your input using ZSTD_compressContinue().
+ * There are some important considerations to keep in mind when using this
+ * advanced function :
+ * - ZSTD_compressContinue() has no internal buffer. It uses externally provided
+ * buffer only.
+ * - Interface is synchronous : input is consumed entirely and produce 1+
+ * (or more) compressed blocks.
+ * - Caller must ensure there is enough space in `dst` to store compressed data
+ * under worst case scenario. Worst case evaluation is provided by
+ * ZSTD_compressBound().
+ * ZSTD_compressContinue() doesn't guarantee recover after a failed
+ * compression.
+ * - ZSTD_compressContinue() presumes prior input ***is still accessible and
+ * unmodified*** (up to maximum distance size, see WindowLog).
+ * It remembers all previous contiguous blocks, plus one separated memory
+ * segment (which can itself consists of multiple contiguous blocks)
+ * - ZSTD_compressContinue() detects that prior input has been overwritten when
+ * `src` buffer overlaps. In which case, it will "discard" the relevant memory
+ * section from its history.
+ *
+ * Finish a frame with ZSTD_compressEnd(), which will write the last block(s)
+ * and optional checksum. It's possible to use srcSize==0, in which case, it
+ * will write a final empty block to end the frame. Without last block mark,
+ * frames will be considered unfinished (corrupted) by decoders.
+ *
+ * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new
+ * frame.
+ ******************************************************************************/
+
+/*===== Buffer-less streaming compression functions =====*/
+size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel);
+size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict,
+ size_t dictSize, int compressionLevel);
+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict,
+ size_t dictSize, ZSTD_parameters params,
+ unsigned long long pledgedSrcSize);
+size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx,
+ unsigned long long pledgedSrcSize);
+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict,
+ unsigned long long pledgedSrcSize);
+size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
+ const void *src, size_t srcSize);
+size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
+ const void *src, size_t srcSize);
+
+
+
+/*-*****************************************************************************
+ * Buffer-less streaming decompression (synchronous mode)
+ *
+ * A ZSTD_DCtx object is required to track streaming operations.
+ * Use ZSTD_initDCtx() to initialize a context.
+ * A ZSTD_DCtx object can be re-used multiple times.
+ *
+ * First typical operation is to retrieve frame parameters, using
+ * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide
+ * important information to correctly decode the frame, such as the minimum
+ * rolling buffer size to allocate to decompress data (`windowSize`), and the
+ * dictionary ID used.
+ * Note: content size is optional, it may not be present. 0 means unknown.
+ * Note that these values could be wrong, either because of data malformation,
+ * or because an attacker is spoofing deliberate false information. As a
+ * consequence, check that values remain within valid application range,
+ * especially `windowSize`, before allocation. Each application can set its own
+ * limit, depending on local restrictions. For extended interoperability, it is
+ * recommended to support at least 8 MB.
+ * Frame parameters are extracted from the beginning of the compressed frame.
+ * Data fragment must be large enough to ensure successful decoding, typically
+ * `ZSTD_frameHeaderSize_max` bytes.
+ * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled.
+ * >0: `srcSize` is too small, provide at least this many bytes.
+ * errorCode, which can be tested using ZSTD_isError().
+ *
+ * Start decompression, with ZSTD_decompressBegin() or
+ * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared
+ * context, using ZSTD_copyDCtx().
+ *
+ * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue()
+ * alternatively.
+ * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize'
+ * to ZSTD_decompressContinue().
+ * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will
+ * fail.
+ *
+ * The result of ZSTD_decompressContinue() is the number of bytes regenerated
+ * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an
+ * error; it just means ZSTD_decompressContinue() has decoded some metadata
+ * item. It can also be an error code, which can be tested with ZSTD_isError().
+ *
+ * ZSTD_decompressContinue() needs previous data blocks during decompression, up
+ * to `windowSize`. They should preferably be located contiguously, prior to
+ * current block. Alternatively, a round buffer of sufficient size is also
+ * possible. Sufficient size is determined by frame parameters.
+ * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't
+ * follow each other, make sure that either the compressor breaks contiguity at
+ * the same place, or that previous contiguous segment is large enough to
+ * properly handle maximum back-reference.
+ *
+ * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
+ * Context can then be reset to start a new decompression.
+ *
+ * Note: it's possible to know if next input to present is a header or a block,
+ * using ZSTD_nextInputType(). This information is not required to properly
+ * decode a frame.
+ *
+ * == Special case: skippable frames ==
+ *
+ * Skippable frames allow integration of user-defined data into a flow of
+ * concatenated frames. Skippable frames will be ignored (skipped) by a
+ * decompressor. The format of skippable frames is as follows:
+ * a) Skippable frame ID - 4 Bytes, Little endian format, any value from
+ * 0x184D2A50 to 0x184D2A5F
+ * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
+ * c) Frame Content - any content (User Data) of length equal to Frame Size
+ * For skippable frames ZSTD_decompressContinue() always returns 0.
+ * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0
+ * what means that a frame is skippable.
+ * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might
+ * actually be a zstd encoded frame with no content. For purposes of
+ * decompression, it is valid in both cases to skip the frame using
+ * ZSTD_findFrameCompressedSize() to find its size in bytes.
+ * It also returns frame size as fparamsPtr->frameContentSize.
+ ******************************************************************************/
+
+/*===== Buffer-less streaming decompression functions =====*/
+size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx);
+size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict,
+ size_t dictSize);
+void ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx);
+size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx);
+size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
+ const void *src, size_t srcSize);
+typedef enum {
+ ZSTDnit_frameHeader,
+ ZSTDnit_blockHeader,
+ ZSTDnit_block,
+ ZSTDnit_lastBlock,
+ ZSTDnit_checksum,
+ ZSTDnit_skippableFrame
+} ZSTD_nextInputType_e;
+ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx);
+
+/*-*****************************************************************************
+ * Block functions
+ *
+ * Block functions produce and decode raw zstd blocks, without frame metadata.
+ * Frame metadata cost is typically ~18 bytes, which can be non-negligible for
+ * very small blocks (< 100 bytes). User will have to take in charge required
+ * information to regenerate data, such as compressed and content sizes.
+ *
+ * A few rules to respect:
+ * - Compressing and decompressing require a context structure
+ * + Use ZSTD_initCCtx() and ZSTD_initDCtx()
+ * - It is necessary to init context before starting
+ * + compression : ZSTD_compressBegin()
+ * + decompression : ZSTD_decompressBegin()
+ * + variants _usingDict() are also allowed
+ * + copyCCtx() and copyDCtx() work too
+ * - Block size is limited, it must be <= ZSTD_getBlockSizeMax()
+ * + If you need to compress more, cut data into multiple blocks
+ * + Consider using the regular ZSTD_compress() instead, as frame metadata
+ * costs become negligible when source size is large.
+ * - When a block is considered not compressible enough, ZSTD_compressBlock()
+ * result will be zero. In which case, nothing is produced into `dst`.
+ * + User must test for such outcome and deal directly with uncompressed data
+ * + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!!
+ * + In case of multiple successive blocks, decoder must be informed of
+ * uncompressed block existence to follow proper history. Use
+ * ZSTD_insertBlock() in such a case.
+ ******************************************************************************/
+
+/* Define for static allocation */
+#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)
+/*===== Raw zstd block functions =====*/
+size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx);
+size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
+ const void *src, size_t srcSize);
+size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
+ const void *src, size_t srcSize);
+size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart,
+ size_t blockSize);
+
+#endif /* ZSTD_H */