diff options
Diffstat (limited to 'include/linux')
209 files changed, 4392 insertions, 1249 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 1991aea2ec4c..06ed7e54033e 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -37,6 +37,8 @@ #include <linux/list.h> #include <linux/mod_devicetable.h> #include <linux/dynamic_debug.h> +#include <linux/module.h> +#include <linux/mutex.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> @@ -119,6 +121,75 @@ typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); typedef int (*acpi_tbl_entry_handler)(struct acpi_subtable_header *header, const unsigned long end); +/* Debugger support */ + +struct acpi_debugger_ops { + int (*create_thread)(acpi_osd_exec_callback function, void *context); + ssize_t (*write_log)(const char *msg); + ssize_t (*read_cmd)(char *buffer, size_t length); + int (*wait_command_ready)(bool single_step, char *buffer, size_t length); + int (*notify_command_complete)(void); +}; + +struct acpi_debugger { + const struct acpi_debugger_ops *ops; + struct module *owner; + struct mutex lock; +}; + +#ifdef CONFIG_ACPI_DEBUGGER +int __init acpi_debugger_init(void); +int acpi_register_debugger(struct module *owner, + const struct acpi_debugger_ops *ops); +void acpi_unregister_debugger(const struct acpi_debugger_ops *ops); +int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context); +ssize_t acpi_debugger_write_log(const char *msg); +ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length); +int acpi_debugger_wait_command_ready(void); +int acpi_debugger_notify_command_complete(void); +#else +static inline int acpi_debugger_init(void) +{ + return -ENODEV; +} + +static inline int acpi_register_debugger(struct module *owner, + const struct acpi_debugger_ops *ops) +{ + return -ENODEV; +} + +static inline void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) +{ +} + +static inline int acpi_debugger_create_thread(acpi_osd_exec_callback function, + void *context) +{ + return -ENODEV; +} + +static inline int acpi_debugger_write_log(const char *msg) +{ + return -ENODEV; +} + +static inline int acpi_debugger_read_cmd(char *buffer, u32 buffer_length) +{ + return -ENODEV; +} + +static inline int acpi_debugger_wait_command_ready(void) +{ + return -ENODEV; +} + +static inline int acpi_debugger_notify_command_complete(void) +{ + return -ENODEV; +} +#endif + #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE void acpi_initrd_override(void *data, size_t size); #else @@ -318,6 +389,7 @@ bool acpi_dev_resource_address_space(struct acpi_resource *ares, bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, struct resource_win *win); unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); +unsigned int acpi_dev_get_irq_type(int triggering, int polarity); bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, struct resource *res); diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index 0ddb5c02ad8b..d76a19ba2cff 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h @@ -65,6 +65,24 @@ #define ST_UART011_ABCR 0x100 /* Autobaud control register. */ #define ST_UART011_ABIMSC 0x15C /* Autobaud interrupt mask/clear register. */ +/* + * ZTE UART register offsets. This UART has a radically different address + * allocation from the ARM and ST variants, so we list all registers here. + * We assume unlisted registers do not exist. + */ +#define ZX_UART011_DR 0x04 +#define ZX_UART011_FR 0x14 +#define ZX_UART011_IBRD 0x24 +#define ZX_UART011_FBRD 0x28 +#define ZX_UART011_LCRH 0x30 +#define ZX_UART011_CR 0x34 +#define ZX_UART011_IFLS 0x38 +#define ZX_UART011_IMSC 0x40 +#define ZX_UART011_RIS 0x44 +#define ZX_UART011_MIS 0x48 +#define ZX_UART011_ICR 0x4c +#define ZX_UART011_DMACR 0x50 + #define UART011_DR_OE (1 << 11) #define UART011_DR_BE (1 << 10) #define UART011_DR_PE (1 << 9) diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h new file mode 100644 index 000000000000..b5abfda80465 --- /dev/null +++ b/include/linux/arm-smccc.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2015, Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __LINUX_ARM_SMCCC_H +#define __LINUX_ARM_SMCCC_H + +#include <linux/linkage.h> +#include <linux/types.h> + +/* + * This file provides common defines for ARM SMC Calling Convention as + * specified in + * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html + */ + +#define ARM_SMCCC_STD_CALL 0 +#define ARM_SMCCC_FAST_CALL 1 +#define ARM_SMCCC_TYPE_SHIFT 31 + +#define ARM_SMCCC_SMC_32 0 +#define ARM_SMCCC_SMC_64 1 +#define ARM_SMCCC_CALL_CONV_SHIFT 30 + +#define ARM_SMCCC_OWNER_MASK 0x3F +#define ARM_SMCCC_OWNER_SHIFT 24 + +#define ARM_SMCCC_FUNC_MASK 0xFFFF + +#define ARM_SMCCC_IS_FAST_CALL(smc_val) \ + ((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT)) +#define ARM_SMCCC_IS_64(smc_val) \ + ((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT)) +#define ARM_SMCCC_FUNC_NUM(smc_val) ((smc_val) & ARM_SMCCC_FUNC_MASK) +#define ARM_SMCCC_OWNER_NUM(smc_val) \ + (((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK) + +#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \ + (((type) << ARM_SMCCC_TYPE_SHIFT) | \ + ((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \ + (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \ + ((func_num) & ARM_SMCCC_FUNC_MASK)) + +#define ARM_SMCCC_OWNER_ARCH 0 +#define ARM_SMCCC_OWNER_CPU 1 +#define ARM_SMCCC_OWNER_SIP 2 +#define ARM_SMCCC_OWNER_OEM 3 +#define ARM_SMCCC_OWNER_STANDARD 4 +#define ARM_SMCCC_OWNER_TRUSTED_APP 48 +#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 +#define ARM_SMCCC_OWNER_TRUSTED_OS 50 +#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 + +/** + * struct arm_smccc_res - Result from SMC/HVC call + * @a0-a3 result values from registers 0 to 3 + */ +struct arm_smccc_res { + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; +}; + +/** + * arm_smccc_smc() - make SMC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This function is used to make SMC calls following SMC Calling Convention. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the SMC instruction. The return values are updated with the content + * from register 0 to 3 on return from the SMC instruction. + */ +asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res); + +/** + * arm_smccc_hvc() - make HVC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * + * This function is used to make HVC calls following SMC Calling + * Convention. The content of the supplied param are copied to registers 0 + * to 7 prior to the HVC instruction. The return values are updated with + * the content from register 0 to 3 on return from the HVC instruction. + */ +asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res); + +#endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h new file mode 100644 index 000000000000..c3bdf8c59480 --- /dev/null +++ b/include/linux/badblocks.h @@ -0,0 +1,65 @@ +#ifndef _LINUX_BADBLOCKS_H +#define _LINUX_BADBLOCKS_H + +#include <linux/seqlock.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/stddef.h> +#include <linux/types.h> + +#define BB_LEN_MASK (0x00000000000001FFULL) +#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL) +#define BB_ACK_MASK (0x8000000000000000ULL) +#define BB_MAX_LEN 512 +#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9) +#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1) +#define BB_ACK(x) (!!((x) & BB_ACK_MASK)) +#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63)) + +/* Bad block numbers are stored sorted in a single page. + * 64bits is used for each block or extent. + * 54 bits are sector number, 9 bits are extent size, + * 1 bit is an 'acknowledged' flag. + */ +#define MAX_BADBLOCKS (PAGE_SIZE/8) + +struct badblocks { + struct device *dev; /* set by devm_init_badblocks */ + int count; /* count of bad blocks */ + int unacked_exist; /* there probably are unacknowledged + * bad blocks. This is only cleared + * when a read discovers none + */ + int shift; /* shift from sectors to block size + * a -ve shift means badblocks are + * disabled.*/ + u64 *page; /* badblock list */ + int changed; + seqlock_t lock; + sector_t sector; + sector_t size; /* in sectors */ +}; + +int badblocks_check(struct badblocks *bb, sector_t s, int sectors, + sector_t *first_bad, int *bad_sectors); +int badblocks_set(struct badblocks *bb, sector_t s, int sectors, + int acknowledged); +int badblocks_clear(struct badblocks *bb, sector_t s, int sectors); +void ack_all_badblocks(struct badblocks *bb); +ssize_t badblocks_show(struct badblocks *bb, char *page, int unack); +ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len, + int unack); +int badblocks_init(struct badblocks *bb, int enable); +void badblocks_exit(struct badblocks *bb); +struct device; +int devm_init_badblocks(struct device *dev, struct badblocks *bb); +static inline void devm_exit_badblocks(struct device *dev, struct badblocks *bb) +{ + if (bb->dev != dev) { + dev_WARN_ONCE(dev, 1, "%s: badblocks instance not associated\n", + __func__); + return; + } + badblocks_exit(bb); +} +#endif diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index cf038431a5cc..db51a6ffb7d6 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -579,6 +579,8 @@ struct bcma_pflash { }; #ifdef CONFIG_BCMA_SFLASH +struct mtd_info; + struct bcma_sflash { bool present; u32 window; @@ -592,13 +594,9 @@ struct bcma_sflash { #endif #ifdef CONFIG_BCMA_NFLASH -struct mtd_info; - struct bcma_nflash { bool present; bool boot; /* This is the flash the SoC boots from */ - - struct mtd_info *mtd; }; #endif diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0169ba2e2e64..c70e3588a48c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -797,6 +797,7 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, extern int blk_queue_enter(struct request_queue *q, gfp_t gfp); extern void blk_queue_exit(struct request_queue *q); extern void blk_start_queue(struct request_queue *q); +extern void blk_start_queue_async(struct request_queue *q); extern void blk_stop_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); extern void __blk_stop_queue(struct request_queue *q); diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index f589222bfa87..35b22f94d2d2 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -19,6 +19,10 @@ extern unsigned long min_low_pfn; * highest page */ extern unsigned long max_pfn; +/* + * highest possible page + */ +extern unsigned long long max_possible_pfn; #ifndef CONFIG_NO_BOOTMEM /* diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 59f4a7304419..f0ba9c2ec639 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -26,6 +26,7 @@ #define PHY_ID_BCM7366 0x600d8490 #define PHY_ID_BCM7425 0x600d86b0 #define PHY_ID_BCM7429 0x600d8730 +#define PHY_ID_BCM7435 0x600d8750 #define PHY_ID_BCM7439 0x600d8480 #define PHY_ID_BCM7439_2 0xae025080 #define PHY_ID_BCM7445 0x600d8510 diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 06b77f9dd3f2..7f540f7f588d 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -34,17 +34,12 @@ struct seq_file; /* define the enumeration of all cgroup subsystems */ #define SUBSYS(_x) _x ## _cgrp_id, -#define SUBSYS_TAG(_t) CGROUP_ ## _t, \ - __unused_tag_ ## _t = CGROUP_ ## _t - 1, enum cgroup_subsys_id { #include <linux/cgroup_subsys.h> CGROUP_SUBSYS_COUNT, }; -#undef SUBSYS_TAG #undef SUBSYS -#define CGROUP_CANFORK_COUNT (CGROUP_CANFORK_END - CGROUP_CANFORK_START) - /* bits in struct cgroup_subsys_state flags field */ enum { CSS_NO_REF = (1 << 0), /* no reference counting for this css */ @@ -66,7 +61,6 @@ enum { /* cgroup_root->flags */ enum { - CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */ CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ }; @@ -231,6 +225,14 @@ struct cgroup { int id; /* + * The depth this cgroup is at. The root is at depth zero and each + * step down the hierarchy increments the level. This along with + * ancestor_ids[] can determine whether a given cgroup is a + * descendant of another without traversing the hierarchy. + */ + int level; + + /* * Each non-empty css_set associated with this cgroup contributes * one to populated_cnt. All children with non-zero popuplated_cnt * of their own contribute one. The count is zero iff there's no @@ -285,6 +287,9 @@ struct cgroup { /* used to schedule release agent */ struct work_struct release_agent_work; + + /* ids of the ancestors at each level including self */ + int ancestor_ids[]; }; /* @@ -304,6 +309,9 @@ struct cgroup_root { /* The root cgroup. Root is destroyed on its release. */ struct cgroup cgrp; + /* for cgrp->ancestor_ids[0] */ + int cgrp_ancestor_id_storage; + /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ atomic_t nr_cgrps; @@ -425,9 +433,9 @@ struct cgroup_subsys { int (*can_attach)(struct cgroup_taskset *tset); void (*cancel_attach)(struct cgroup_taskset *tset); void (*attach)(struct cgroup_taskset *tset); - int (*can_fork)(struct task_struct *task, void **priv_p); - void (*cancel_fork)(struct task_struct *task, void *priv); - void (*fork)(struct task_struct *task, void *priv); + int (*can_fork)(struct task_struct *task); + void (*cancel_fork)(struct task_struct *task); + void (*fork)(struct task_struct *task); void (*exit)(struct task_struct *task); void (*free)(struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css); @@ -513,7 +521,6 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) #else /* CONFIG_CGROUPS */ -#define CGROUP_CANFORK_COUNT 0 #define CGROUP_SUBSYS_COUNT 0 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {} @@ -521,4 +528,116 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} #endif /* CONFIG_CGROUPS */ +#ifdef CONFIG_SOCK_CGROUP_DATA + +/* + * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains + * per-socket cgroup information except for memcg association. + * + * On legacy hierarchies, net_prio and net_cls controllers directly set + * attributes on each sock which can then be tested by the network layer. + * On the default hierarchy, each sock is associated with the cgroup it was + * created in and the networking layer can match the cgroup directly. + * + * To avoid carrying all three cgroup related fields separately in sock, + * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer. + * On boot, sock_cgroup_data records the cgroup that the sock was created + * in so that cgroup2 matches can be made; however, once either net_prio or + * net_cls starts being used, the area is overriden to carry prioidx and/or + * classid. The two modes are distinguished by whether the lowest bit is + * set. Clear bit indicates cgroup pointer while set bit prioidx and + * classid. + * + * While userland may start using net_prio or net_cls at any time, once + * either is used, cgroup2 matching no longer works. There is no reason to + * mix the two and this is in line with how legacy and v2 compatibility is + * handled. On mode switch, cgroup references which are already being + * pointed to by socks may be leaked. While this can be remedied by adding + * synchronization around sock_cgroup_data, given that the number of leaked + * cgroups is bound and highly unlikely to be high, this seems to be the + * better trade-off. + */ +struct sock_cgroup_data { + union { +#ifdef __LITTLE_ENDIAN + struct { + u8 is_data; + u8 padding; + u16 prioidx; + u32 classid; + } __packed; +#else + struct { + u32 classid; + u16 prioidx; + u8 padding; + u8 is_data; + } __packed; +#endif + u64 val; + }; +}; + +/* + * There's a theoretical window where the following accessors race with + * updaters and return part of the previous pointer as the prioidx or + * classid. Such races are short-lived and the result isn't critical. + */ +static inline u16 sock_cgroup_prioidx(struct sock_cgroup_data *skcd) +{ + /* fallback to 1 which is always the ID of the root cgroup */ + return (skcd->is_data & 1) ? skcd->prioidx : 1; +} + +static inline u32 sock_cgroup_classid(struct sock_cgroup_data *skcd) +{ + /* fallback to 0 which is the unconfigured default classid */ + return (skcd->is_data & 1) ? skcd->classid : 0; +} + +/* + * If invoked concurrently, the updaters may clobber each other. The + * caller is responsible for synchronization. + */ +static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, + u16 prioidx) +{ + struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; + + if (sock_cgroup_prioidx(&skcd_buf) == prioidx) + return; + + if (!(skcd_buf.is_data & 1)) { + skcd_buf.val = 0; + skcd_buf.is_data = 1; + } + + skcd_buf.prioidx = prioidx; + WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ +} + +static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, + u32 classid) +{ + struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; + + if (sock_cgroup_classid(&skcd_buf) == classid) + return; + + if (!(skcd_buf.is_data & 1)) { + skcd_buf.val = 0; + skcd_buf.is_data = 1; + } + + skcd_buf.classid = classid; + WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ +} + +#else /* CONFIG_SOCK_CGROUP_DATA */ + +struct sock_cgroup_data { +}; + +#endif /* CONFIG_SOCK_CGROUP_DATA */ + #endif /* _LINUX_CGROUP_DEFS_H */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index cb91b44f5f78..2162dca88dc0 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -81,7 +81,8 @@ struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, struct cgroup_subsys *ss); -bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); +struct cgroup *cgroup_get_from_path(const char *path); + int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); @@ -96,12 +97,9 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk); void cgroup_fork(struct task_struct *p); -extern int cgroup_can_fork(struct task_struct *p, - void *ss_priv[CGROUP_CANFORK_COUNT]); -extern void cgroup_cancel_fork(struct task_struct *p, - void *ss_priv[CGROUP_CANFORK_COUNT]); -extern void cgroup_post_fork(struct task_struct *p, - void *old_ss_priv[CGROUP_CANFORK_COUNT]); +extern int cgroup_can_fork(struct task_struct *p); +extern void cgroup_cancel_fork(struct task_struct *p); +extern void cgroup_post_fork(struct task_struct *p); void cgroup_exit(struct task_struct *p); void cgroup_free(struct task_struct *p); @@ -364,6 +362,11 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) percpu_ref_put_many(&css->refcnt, n); } +static inline void cgroup_put(struct cgroup *cgrp) +{ + css_put(&cgrp->self); +} + /** * task_css_set_check - obtain a task's css_set with extra access conditions * @task: the task to obtain css_set for @@ -471,6 +474,23 @@ static inline struct cgroup *task_cgroup(struct task_struct *task, return task_css(task, subsys_id)->cgroup; } +/** + * cgroup_is_descendant - test ancestry + * @cgrp: the cgroup to be tested + * @ancestor: possible ancestor of @cgrp + * + * Test whether @cgrp is a descendant of @ancestor. It also returns %true + * if @cgrp == @ancestor. This function is safe to call as long as @cgrp + * and @ancestor are accessible. + */ +static inline bool cgroup_is_descendant(struct cgroup *cgrp, + struct cgroup *ancestor) +{ + if (cgrp->root != ancestor->root || cgrp->level < ancestor->level) + return false; + return cgrp->ancestor_ids[ancestor->level] == ancestor->id; +} + /* no synchronization, the result can only be used as a hint */ static inline bool cgroup_is_populated(struct cgroup *cgrp) { @@ -539,13 +559,9 @@ static inline int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) { return -EINVAL; } static inline void cgroup_fork(struct task_struct *p) {} -static inline int cgroup_can_fork(struct task_struct *p, - void *ss_priv[CGROUP_CANFORK_COUNT]) -{ return 0; } -static inline void cgroup_cancel_fork(struct task_struct *p, - void *ss_priv[CGROUP_CANFORK_COUNT]) {} -static inline void cgroup_post_fork(struct task_struct *p, - void *ss_priv[CGROUP_CANFORK_COUNT]) {} +static inline int cgroup_can_fork(struct task_struct *p) { return 0; } +static inline void cgroup_cancel_fork(struct task_struct *p) {} +static inline void cgroup_post_fork(struct task_struct *p) {} static inline void cgroup_exit(struct task_struct *p) {} static inline void cgroup_free(struct task_struct *p) {} @@ -554,4 +570,45 @@ static inline int cgroup_init(void) { return 0; } #endif /* !CONFIG_CGROUPS */ +/* + * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data + * definition in cgroup-defs.h. + */ +#ifdef CONFIG_SOCK_CGROUP_DATA + +#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) +extern spinlock_t cgroup_sk_update_lock; +#endif + +void cgroup_sk_alloc_disable(void); +void cgroup_sk_alloc(struct sock_cgroup_data *skcd); +void cgroup_sk_free(struct sock_cgroup_data *skcd); + +static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) +{ +#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) + unsigned long v; + + /* + * @skcd->val is 64bit but the following is safe on 32bit too as we + * just need the lower ulong to be written and read atomically. + */ + v = READ_ONCE(skcd->val); + + if (v & 1) + return &cgrp_dfl_root.cgrp; + + return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; +#else + return (struct cgroup *)(unsigned long)skcd->val; +#endif +} + +#else /* CONFIG_CGROUP_DATA */ + +static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} +static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} + +#endif /* CONFIG_CGROUP_DATA */ + #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 1a96fdaa33d5..0df0336acee9 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -6,14 +6,8 @@ /* * This file *must* be included with SUBSYS() defined. - * SUBSYS_TAG() is a noop if undefined. */ -#ifndef SUBSYS_TAG -#define __TMP_SUBSYS_TAG -#define SUBSYS_TAG(_x) -#endif - #if IS_ENABLED(CONFIG_CPUSETS) SUBSYS(cpuset) #endif @@ -58,17 +52,10 @@ SUBSYS(net_prio) SUBSYS(hugetlb) #endif -/* - * Subsystems that implement the can_fork() family of callbacks. - */ -SUBSYS_TAG(CANFORK_START) - #if IS_ENABLED(CONFIG_CGROUP_PIDS) SUBSYS(pids) #endif -SUBSYS_TAG(CANFORK_END) - /* * The following subsystems are not supported on the default hierarchy. */ @@ -76,11 +63,6 @@ SUBSYS_TAG(CANFORK_END) SUBSYS(debug) #endif -#ifdef __TMP_SUBSYS_TAG -#undef __TMP_SUBSYS_TAG -#undef SUBSYS_TAG -#endif - /* * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. */ diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 7784b597e959..6013021a3b39 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -62,12 +62,18 @@ struct module; * @suspend: suspend function for the clocksource, if necessary * @resume: resume function for the clocksource, if necessary * @owner: module reference, must be set by clocksource in modules + * + * Note: This struct is not used in hotpathes of the timekeeping code + * because the timekeeper caches the hot path fields in its own data + * structure, so no line cache alignment is required, + * + * The pointer to the clocksource itself is handed to the read + * callback. If you need extra information there you can wrap struct + * clocksource into your own struct. Depending on the amount of + * information you need you should consider to cache line align that + * structure. */ struct clocksource { - /* - * Hotpath data, fits in a single cache line when the - * clocksource itself is cacheline aligned. - */ cycle_t (*read)(struct clocksource *cs); cycle_t mask; u32 mult; @@ -95,7 +101,7 @@ struct clocksource { cycle_t wd_last; #endif struct module *owner; -} ____cacheline_aligned; +}; /* * Clock source flags bits:: diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 4dac1036594f..00b042c49ccd 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -299,6 +299,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s __u.__val; \ }) +/** + * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering + * @cond: boolean expression to wait for + * + * Equivalent to using smp_load_acquire() on the condition variable but employs + * the control dependency of the wait to reduce the barrier on many platforms. + * + * The control dependency provides a LOAD->STORE order, the additional RMB + * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, + * aka. ACQUIRE. + */ +#define smp_cond_acquire(cond) do { \ + while (!(cond)) \ + cpu_relax(); \ + smp_rmb(); /* ctrl + rmb := acquire */ \ +} while (0) + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/include/linux/component.h b/include/linux/component.h index c00dcc302611..a559eebc0e0f 100644 --- a/include/linux/component.h +++ b/include/linux/component.h @@ -1,39 +1,48 @@ #ifndef COMPONENT_H #define COMPONENT_H +#include <linux/stddef.h> + struct device; struct component_ops { - int (*bind)(struct device *, struct device *, void *); - void (*unbind)(struct device *, struct device *, void *); + int (*bind)(struct device *comp, struct device *master, + void *master_data); + void (*unbind)(struct device *comp, struct device *master, + void *master_data); }; int component_add(struct device *, const struct component_ops *); void component_del(struct device *, const struct component_ops *); -int component_bind_all(struct device *, void *); -void component_unbind_all(struct device *, void *); +int component_bind_all(struct device *master, void *master_data); +void component_unbind_all(struct device *master, void *master_data); struct master; struct component_master_ops { - int (*add_components)(struct device *, struct master *); - int (*bind)(struct device *); - void (*unbind)(struct device *); + int (*bind)(struct device *master); + void (*unbind)(struct device *master); }; -int component_master_add(struct device *, const struct component_master_ops *); void component_master_del(struct device *, const struct component_master_ops *); -int component_master_add_child(struct master *master, - int (*compare)(struct device *, void *), void *compare_data); - struct component_match; int component_master_add_with_match(struct device *, const struct component_master_ops *, struct component_match *); -void component_match_add(struct device *, struct component_match **, +void component_match_add_release(struct device *master, + struct component_match **matchptr, + void (*release)(struct device *, void *), int (*compare)(struct device *, void *), void *compare_data); +static inline void component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), void *compare_data) +{ + component_match_add_release(master, matchptr, NULL, compare, + compare_data); +} + #endif diff --git a/include/linux/configfs.h b/include/linux/configfs.h index 758a029011b1..f7300d023dbe 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -51,6 +51,7 @@ struct module; struct configfs_item_operations; struct configfs_group_operations; struct configfs_attribute; +struct configfs_bin_attribute; struct configfs_subsystem; struct config_item { @@ -84,6 +85,7 @@ struct config_item_type { struct configfs_item_operations *ct_item_ops; struct configfs_group_operations *ct_group_ops; struct configfs_attribute **ct_attrs; + struct configfs_bin_attribute **ct_bin_attrs; }; /** @@ -154,6 +156,54 @@ static struct configfs_attribute _pfx##attr_##_name = { \ .store = _pfx##_name##_store, \ } +struct file; +struct vm_area_struct; + +struct configfs_bin_attribute { + struct configfs_attribute cb_attr; /* std. attribute */ + void *cb_private; /* for user */ + size_t cb_max_size; /* max core size */ + ssize_t (*read)(struct config_item *, void *, size_t); + ssize_t (*write)(struct config_item *, const void *, size_t); +}; + +#define CONFIGFS_BIN_ATTR(_pfx, _name, _priv, _maxsz) \ +static struct configfs_bin_attribute _pfx##attr_##_name = { \ + .cb_attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = S_IRUGO | S_IWUSR, \ + .ca_owner = THIS_MODULE, \ + }, \ + .cb_private = _priv, \ + .cb_max_size = _maxsz, \ + .read = _pfx##_name##_read, \ + .write = _pfx##_name##_write, \ +} + +#define CONFIGFS_BIN_ATTR_RO(_pfx, _name, _priv, _maxsz) \ +static struct configfs_attribute _pfx##attr_##_name = { \ + .cb_attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = S_IRUGO, \ + .ca_owner = THIS_MODULE, \ + }, \ + .cb_private = _priv, \ + .cb_max_size = _maxsz, \ + .read = _pfx##_name##_read, \ +} + +#define CONFIGFS_BIN_ATTR_WO(_pfx, _name, _priv, _maxsz) \ +static struct configfs_attribute _pfx##attr_##_name = { \ + .cb_attr = { \ + .ca_name = __stringify(_name), \ + .ca_mode = S_IWUSR, \ + .ca_owner = THIS_MODULE, \ + }, \ + .cb_private = _priv, \ + .cb_max_size = _maxsz, \ + .write = _pfx##_name##_write, \ +} + /* * If allow_link() exists, the item can symlink(2) out to other * items. If the item is a group, it may support mkdir(2). diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 68b575afe5f5..d259274238db 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -86,7 +86,7 @@ static inline void context_tracking_init(void) { } #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN static inline void guest_enter(void) { - if (vtime_accounting_enabled()) + if (vtime_accounting_cpu_enabled()) vtime_guest_enter(current); else current->flags |= PF_VCPU; @@ -100,7 +100,7 @@ static inline void guest_exit(void) if (context_tracking_is_enabled()) __context_tracking_exit(CONTEXT_GUEST); - if (vtime_accounting_enabled()) + if (vtime_accounting_cpu_enabled()) vtime_guest_exit(current); else current->flags &= ~PF_VCPU; diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index ee956c528fab..1d34fe68f48a 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h @@ -22,12 +22,12 @@ struct context_tracking { }; #ifdef CONFIG_CONTEXT_TRACKING -extern struct static_key context_tracking_enabled; +extern struct static_key_false context_tracking_enabled; DECLARE_PER_CPU(struct context_tracking, context_tracking); static inline bool context_tracking_is_enabled(void) { - return static_key_false(&context_tracking_enabled); + return static_branch_unlikely(&context_tracking_enabled); } static inline bool context_tracking_cpu_is_enabled(void) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 177c7680c1a8..88a4215125bc 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -278,7 +278,6 @@ struct cpufreq_driver { struct freq_attr **attr; /* platform specific boost support code */ - bool boost_supported; bool boost_enabled; int (*set_boost)(int state); }; @@ -574,7 +573,6 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf); #ifdef CONFIG_CPU_FREQ int cpufreq_boost_trigger_state(int state); -int cpufreq_boost_supported(void); int cpufreq_boost_enabled(void); int cpufreq_enable_boost_support(void); bool policy_has_boost_freq(struct cpufreq_policy *policy); @@ -583,10 +581,6 @@ static inline int cpufreq_boost_trigger_state(int state) { return 0; } -static inline int cpufreq_boost_supported(void) -{ - return 0; -} static inline int cpufreq_boost_enabled(void) { return 0; diff --git a/include/linux/dca.h b/include/linux/dca.h index d27a7a05718d..ad956c2e07a8 100644 --- a/include/linux/dca.h +++ b/include/linux/dca.h @@ -34,7 +34,7 @@ void dca_unregister_notify(struct notifier_block *nb); struct dca_provider { struct list_head node; - struct dca_ops *ops; + const struct dca_ops *ops; struct device *cd; int id; }; @@ -53,7 +53,8 @@ struct dca_ops { int (*dev_managed) (struct dca_provider *, struct device *); }; -struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); +struct dca_provider *alloc_dca_provider(const struct dca_ops *ops, + int priv_size); void free_dca_provider(struct dca_provider *dca); int register_dca_provider(struct dca_provider *dca, struct device *dev); void unregister_dca_provider(struct dca_provider *dca, struct device *dev); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index d67ae119cf4e..7781ce110503 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -27,10 +27,10 @@ struct vfsmount; /* The hash is always the low bits of hash_len */ #ifdef __LITTLE_ENDIAN - #define HASH_LEN_DECLARE u32 hash; u32 len; + #define HASH_LEN_DECLARE u32 hash; u32 len #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8)) #else - #define HASH_LEN_DECLARE u32 len; u32 hash; + #define HASH_LEN_DECLARE u32 len; u32 hash #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8)) #endif diff --git a/include/linux/delayed_call.h b/include/linux/delayed_call.h new file mode 100644 index 000000000000..f7fa76ae1a9b --- /dev/null +++ b/include/linux/delayed_call.h @@ -0,0 +1,34 @@ +#ifndef _DELAYED_CALL_H +#define _DELAYED_CALL_H + +/* + * Poor man's closures; I wish we could've done them sanely polymorphic, + * but... + */ + +struct delayed_call { + void (*fn)(void *); + void *arg; +}; + +#define DEFINE_DELAYED_CALL(name) struct delayed_call name = {NULL, NULL} + +/* I really wish we had closures with sane typechecking... */ +static inline void set_delayed_call(struct delayed_call *call, + void (*fn)(void *), void *arg) +{ + call->fn = fn; + call->arg = arg; +} + +static inline void do_delayed_call(struct delayed_call *call) +{ + if (call->fn) + call->fn(call->arg); +} + +static inline void clear_delayed_call(struct delayed_call *call) +{ + call->fn = NULL; +} +#endif diff --git a/include/linux/device.h b/include/linux/device.h index b8f411b57dcb..f627ba20a46c 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -191,6 +191,7 @@ extern int bus_unregister_notifier(struct bus_type *bus, unbound */ #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound from the device */ +#define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */ extern struct kset *bus_get_kset(struct bus_type *bus); extern struct klist *bus_get_device_klist(struct bus_type *bus); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c47c68e535e8..16a1cad30c33 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -607,11 +607,38 @@ enum dmaengine_alignment { }; /** + * struct dma_slave_map - associates slave device and it's slave channel with + * parameter to be used by a filter function + * @devname: name of the device + * @slave: slave channel name + * @param: opaque parameter to pass to struct dma_filter.fn + */ +struct dma_slave_map { + const char *devname; + const char *slave; + void *param; +}; + +/** + * struct dma_filter - information for slave device/channel to filter_fn/param + * mapping + * @fn: filter function callback + * @mapcnt: number of slave device/channel in the map + * @map: array of channel to filter mapping data + */ +struct dma_filter { + dma_filter_fn fn; + int mapcnt; + const struct dma_slave_map *map; +}; + +/** * struct dma_device - info on the entity supplying DMA services * @chancnt: how many DMA channels are supported * @privatecnt: how many DMA channels are requested by dma_request_channel * @channels: the list of struct dma_chan * @global_node: list_head for global dma_device_list + * @filter: information for device/slave to filter function/param mapping * @cap_mask: one or more dma_capability flags * @max_xor: maximum number of xor sources, 0 if no capability * @max_pq: maximum number of PQ sources and PQ-continue capability @@ -654,11 +681,14 @@ enum dmaengine_alignment { * paused. Returns 0 or an error code * @device_terminate_all: Aborts all transfers on a channel. Returns 0 * or an error code + * @device_synchronize: Synchronizes the termination of a transfers to the + * current context. * @device_tx_status: poll for transaction completion, the optional * txstate parameter can be supplied with a pointer to get a * struct with auxiliary transfer status information, otherwise the call * will just return a simple status code * @device_issue_pending: push pending transactions to hardware + * @descriptor_reuse: a submitted transfer can be resubmitted after completion */ struct dma_device { @@ -666,6 +696,7 @@ struct dma_device { unsigned int privatecnt; struct list_head channels; struct list_head global_node; + struct dma_filter filter; dma_cap_mask_t cap_mask; unsigned short max_xor; unsigned short max_pq; @@ -681,6 +712,7 @@ struct dma_device { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; + bool descriptor_reuse; enum dma_residue_granularity residue_granularity; int (*device_alloc_chan_resources)(struct dma_chan *chan); @@ -737,6 +769,7 @@ struct dma_device { int (*device_pause)(struct dma_chan *chan); int (*device_resume)(struct dma_chan *chan); int (*device_terminate_all)(struct dma_chan *chan); + void (*device_synchronize)(struct dma_chan *chan); enum dma_status (*device_tx_status)(struct dma_chan *chan, dma_cookie_t cookie, @@ -828,6 +861,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( src_sg, src_nents, flags); } +/** + * dmaengine_terminate_all() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * This function is DEPRECATED use either dmaengine_terminate_sync() or + * dmaengine_terminate_async() instead. + */ static inline int dmaengine_terminate_all(struct dma_chan *chan) { if (chan->device->device_terminate_all) @@ -836,6 +876,88 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan) return -ENOSYS; } +/** + * dmaengine_terminate_async() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * Calling this function will terminate all active and pending descriptors + * that have previously been submitted to the channel. It is not guaranteed + * though that the transfer for the active descriptor has stopped when the + * function returns. Furthermore it is possible the complete callback of a + * submitted transfer is still running when this function returns. + * + * dmaengine_synchronize() needs to be called before it is safe to free + * any memory that is accessed by previously submitted descriptors or before + * freeing any resources accessed from within the completion callback of any + * perviously submitted descriptors. + * + * This function can be called from atomic context as well as from within a + * complete callback of a descriptor submitted on the same channel. + * + * If none of the two conditions above apply consider using + * dmaengine_terminate_sync() instead. + */ +static inline int dmaengine_terminate_async(struct dma_chan *chan) +{ + if (chan->device->device_terminate_all) + return chan->device->device_terminate_all(chan); + + return -EINVAL; +} + +/** + * dmaengine_synchronize() - Synchronize DMA channel termination + * @chan: The channel to synchronize + * + * Synchronizes to the DMA channel termination to the current context. When this + * function returns it is guaranteed that all transfers for previously issued + * descriptors have stopped and and it is safe to free the memory assoicated + * with them. Furthermore it is guaranteed that all complete callback functions + * for a previously submitted descriptor have finished running and it is safe to + * free resources accessed from within the complete callbacks. + * + * The behavior of this function is undefined if dma_async_issue_pending() has + * been called between dmaengine_terminate_async() and this function. + * + * This function must only be called from non-atomic context and must not be + * called from within a complete callback of a descriptor submitted on the same + * channel. + */ +static inline void dmaengine_synchronize(struct dma_chan *chan) +{ + might_sleep(); + + if (chan->device->device_synchronize) + chan->device->device_synchronize(chan); +} + +/** + * dmaengine_terminate_sync() - Terminate all active DMA transfers + * @chan: The channel for which to terminate the transfers + * + * Calling this function will terminate all active and pending transfers + * that have previously been submitted to the channel. It is similar to + * dmaengine_terminate_async() but guarantees that the DMA transfer has actually + * stopped and that all complete callbacks have finished running when the + * function returns. + * + * This function must only be called from non-atomic context and must not be + * called from within a complete callback of a descriptor submitted on the same + * channel. + */ +static inline int dmaengine_terminate_sync(struct dma_chan *chan) +{ + int ret; + + ret = dmaengine_terminate_async(chan); + if (ret) + return ret; + + dmaengine_synchronize(chan); + + return 0; +} + static inline int dmaengine_pause(struct dma_chan *chan) { if (chan->device->device_pause) @@ -1140,9 +1262,11 @@ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); void dma_issue_pending_all(void); struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); -struct dma_chan *dma_request_slave_channel_reason(struct device *dev, - const char *name); struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); + +struct dma_chan *dma_request_chan(struct device *dev, const char *name); +struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask); + void dma_release_channel(struct dma_chan *chan); int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); #else @@ -1166,16 +1290,21 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, { return NULL; } -static inline struct dma_chan *dma_request_slave_channel_reason( - struct device *dev, const char *name) -{ - return ERR_PTR(-ENODEV); -} static inline struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name) { return NULL; } +static inline struct dma_chan *dma_request_chan(struct device *dev, + const char *name) +{ + return ERR_PTR(-ENODEV); +} +static inline struct dma_chan *dma_request_chan_by_mask( + const dma_cap_mask_t *mask) +{ + return ERR_PTR(-ENODEV); +} static inline void dma_release_channel(struct dma_chan *chan) { } @@ -1186,6 +1315,8 @@ static inline int dma_get_slave_caps(struct dma_chan *chan, } #endif +#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name) + static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) { struct dma_slave_caps caps; diff --git a/include/linux/dqblk_qtree.h b/include/linux/dqblk_qtree.h index 82a16527b367..ff8b55359648 100644 --- a/include/linux/dqblk_qtree.h +++ b/include/linux/dqblk_qtree.h @@ -34,7 +34,7 @@ struct qtree_mem_dqinfo { unsigned int dqi_entry_size; /* Size of quota entry in quota file */ unsigned int dqi_usable_bs; /* Space usable in block for quota data */ unsigned int dqi_qtree_depth; /* Precomputed depth of quota tree */ - struct qtree_fmt_operations *dqi_ops; /* Operations for entry manipulation */ + const struct qtree_fmt_operations *dqi_ops; /* Operations for entry manipulation */ }; int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); diff --git a/include/linux/edac.h b/include/linux/edac.h index 4fe67b853de0..9e0d78966552 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -28,12 +28,10 @@ struct device; extern int edac_op_state; extern int edac_err_assert; extern atomic_t edac_handlers; -extern struct bus_type edac_subsys; extern int edac_handler_set(void); extern void edac_atomic_assert_error(void); extern struct bus_type *edac_get_sysfs_subsys(void); -extern void edac_put_sysfs_subsys(void); enum { EDAC_REPORTING_ENABLED, @@ -237,8 +235,10 @@ enum mem_type { #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) #define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) #define MEM_FLAG_XDR BIT(MEM_XDR) -#define MEM_FLAG_DDR3 BIT(MEM_DDR3) -#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) +#define MEM_FLAG_DDR3 BIT(MEM_DDR3) +#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) +#define MEM_FLAG_DDR4 BIT(MEM_DDR4) +#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4) /** * enum edac-type - Error Detection and Correction capabilities and mode diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index eb049c622208..37ff4a6faa9a 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -29,6 +29,9 @@ #include <asm/bitsperlong.h> #ifdef __KERNEL__ +struct device; +int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr); +unsigned char *arch_get_platform_get_mac_address(void); u32 eth_get_headlen(void *data, unsigned int max_len); __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); extern const struct header_ops eth_header_ops; diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 25c6324a0dd0..e59c3be92106 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -51,6 +51,7 @@ #define MAX_ACTIVE_DATA_LOGS 8 #define VERSION_LEN 256 +#define MAX_VOLUME_NAME 512 /* * For superblock @@ -84,7 +85,7 @@ struct f2fs_super_block { __le32 node_ino; /* node inode number */ __le32 meta_ino; /* meta inode number */ __u8 uuid[16]; /* 128-bit uuid for volume */ - __le16 volume_name[512]; /* volume name */ + __le16 volume_name[MAX_VOLUME_NAME]; /* volume name */ __le32 extension_count; /* # of extensions below */ __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */ __le32 cp_payload; diff --git a/include/linux/filter.h b/include/linux/filter.h index 4165e9ac9e36..43aa1f8855c7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -350,25 +350,43 @@ struct sk_filter { #define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) +#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN + +static inline u8 *bpf_skb_cb(struct sk_buff *skb) +{ + /* eBPF programs may read/write skb->cb[] area to transfer meta + * data between tail calls. Since this also needs to work with + * tc, that scratch memory is mapped to qdisc_skb_cb's data area. + * + * In some socket filter cases, the cb unfortunately needs to be + * saved/restored so that protocol specific skb->cb[] data won't + * be lost. In any case, due to unpriviledged eBPF programs + * attached to sockets, we need to clear the bpf_skb_cb() area + * to not leak previous contents to user space. + */ + BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN); + BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != + FIELD_SIZEOF(struct qdisc_skb_cb, data)); + + return qdisc_skb_cb(skb)->data; +} + static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, struct sk_buff *skb) { - u8 *cb_data = qdisc_skb_cb(skb)->data; - u8 saved_cb[QDISC_CB_PRIV_LEN]; + u8 *cb_data = bpf_skb_cb(skb); + u8 cb_saved[BPF_SKB_CB_LEN]; u32 res; - BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != - QDISC_CB_PRIV_LEN); - if (unlikely(prog->cb_access)) { - memcpy(saved_cb, cb_data, sizeof(saved_cb)); - memset(cb_data, 0, sizeof(saved_cb)); + memcpy(cb_saved, cb_data, sizeof(cb_saved)); + memset(cb_data, 0, sizeof(cb_saved)); } res = BPF_PROG_RUN(prog, skb); if (unlikely(prog->cb_access)) - memcpy(cb_data, saved_cb, sizeof(saved_cb)); + memcpy(cb_data, cb_saved, sizeof(cb_saved)); return res; } @@ -376,10 +394,11 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, struct sk_buff *skb) { - u8 *cb_data = qdisc_skb_cb(skb)->data; + u8 *cb_data = bpf_skb_cb(skb); if (unlikely(prog->cb_access)) - memset(cb_data, 0, QDISC_CB_PRIV_LEN); + memset(cb_data, 0, BPF_SKB_CB_LEN); + return BPF_PROG_RUN(prog, skb); } @@ -447,6 +466,8 @@ void bpf_prog_destroy(struct bpf_prog *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_attach_bpf(u32 ufd, struct sock *sk); +int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); +int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); int sk_detach_filter(struct sock *sk); int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned int len); @@ -493,6 +514,25 @@ static inline void bpf_jit_free(struct bpf_prog *fp) #define BPF_ANC BIT(15) +static inline bool bpf_needs_clear_a(const struct sock_filter *first) +{ + switch (first->code) { + case BPF_RET | BPF_K: + case BPF_LD | BPF_W | BPF_LEN: + return false; + + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: + if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X) + return true; + return false; + + default: + return true; + } +} + static inline u16 bpf_anc_helper(const struct sock_filter *ftest) { BUG_ON(ftest->code & BPF_ANC); diff --git a/include/linux/fs.h b/include/linux/fs.h index 3aa514254161..eb73d74ed992 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -31,6 +31,7 @@ #include <linux/blk_types.h> #include <linux/workqueue.h> #include <linux/percpu-rwsem.h> +#include <linux/delayed_call.h> #include <asm/byteorder.h> #include <uapi/linux/fs.h> @@ -482,6 +483,9 @@ struct block_device { int bd_fsfreeze_count; /* Mutex for freeze */ struct mutex bd_fsfreeze_mutex; +#ifdef CONFIG_FS_DAX + int bd_map_count; +#endif }; /* @@ -1042,7 +1046,7 @@ extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); extern int fcntl_getlease(struct file *filp); /* fs/locks.c */ -void locks_free_lock_context(struct file_lock_context *ctx); +void locks_free_lock_context(struct inode *inode); void locks_free_lock(struct file_lock *fl); extern void locks_init_lock(struct file_lock *); extern struct file_lock * locks_alloc_lock(void); @@ -1103,7 +1107,7 @@ static inline int fcntl_getlease(struct file *filp) } static inline void -locks_free_lock_context(struct file_lock_context *ctx) +locks_free_lock_context(struct inode *inode) { } @@ -1629,16 +1633,21 @@ struct file_operations { #ifndef CONFIG_MMU unsigned (*mmap_capabilities)(struct file *); #endif + ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, + loff_t, size_t, unsigned int); + int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, + u64); + ssize_t (*dedupe_file_range)(struct file *, u64, u64, struct file *, + u64); }; struct inode_operations { struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); - const char * (*follow_link) (struct dentry *, void **); + const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); int (*permission) (struct inode *, int); struct posix_acl * (*get_acl)(struct inode *, int); int (*readlink) (struct dentry *, char __user *,int); - void (*put_link) (struct inode *, void *); int (*create) (struct inode *,struct dentry *, umode_t, bool); int (*link) (struct dentry *,struct inode *,struct dentry *); @@ -1680,6 +1689,12 @@ extern ssize_t vfs_readv(struct file *, const struct iovec __user *, unsigned long, loff_t *); extern ssize_t vfs_writev(struct file *, const struct iovec __user *, unsigned long, loff_t *); +extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, + loff_t, size_t, unsigned int); +extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, u64 len); +extern int vfs_dedupe_file_range(struct file *file, + struct file_dedupe_range *same); struct super_operations { struct inode *(*alloc_inode)(struct super_block *sb); @@ -2027,12 +2042,9 @@ extern struct kobject *fs_kobj; #define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK) -#define FLOCK_VERIFY_READ 1 -#define FLOCK_VERIFY_WRITE 2 - -#ifdef CONFIG_FILE_LOCKING +#ifdef CONFIG_MANDATORY_FILE_LOCKING extern int locks_mandatory_locked(struct file *); -extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t); +extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char); /* * Candidates for mandatory locking have the setgid bit set @@ -2062,19 +2074,59 @@ static inline int locks_verify_locked(struct file *file) } static inline int locks_verify_truncate(struct inode *inode, - struct file *filp, + struct file *f, loff_t size) { - if (inode->i_flctx && mandatory_lock(inode)) - return locks_mandatory_area( - FLOCK_VERIFY_WRITE, inode, filp, - size < inode->i_size ? size : inode->i_size, - (size < inode->i_size ? inode->i_size - size - : size - inode->i_size) - ); + if (!inode->i_flctx || !mandatory_lock(inode)) + return 0; + + if (size < inode->i_size) { + return locks_mandatory_area(inode, f, size, inode->i_size - 1, + F_WRLCK); + } else { + return locks_mandatory_area(inode, f, inode->i_size, size - 1, + F_WRLCK); + } +} + +#else /* !CONFIG_MANDATORY_FILE_LOCKING */ + +static inline int locks_mandatory_locked(struct file *file) +{ + return 0; +} + +static inline int locks_mandatory_area(struct inode *inode, struct file *filp, + loff_t start, loff_t end, unsigned char type) +{ return 0; } +static inline int __mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int locks_verify_locked(struct file *file) +{ + return 0; +} + +static inline int locks_verify_truncate(struct inode *inode, struct file *filp, + size_t size) +{ + return 0; +} + +#endif /* CONFIG_MANDATORY_FILE_LOCKING */ + + +#ifdef CONFIG_FILE_LOCKING static inline int break_lease(struct inode *inode, unsigned int mode) { /* @@ -2136,39 +2188,6 @@ static inline int break_layout(struct inode *inode, bool wait) } #else /* !CONFIG_FILE_LOCKING */ -static inline int locks_mandatory_locked(struct file *file) -{ - return 0; -} - -static inline int locks_mandatory_area(int rw, struct inode *inode, - struct file *filp, loff_t offset, - size_t count) -{ - return 0; -} - -static inline int __mandatory_lock(struct inode *inode) -{ - return 0; -} - -static inline int mandatory_lock(struct inode *inode) -{ - return 0; -} - -static inline int locks_verify_locked(struct file *file) -{ - return 0; -} - -static inline int locks_verify_truncate(struct inode *inode, struct file *filp, - size_t size) -{ - return 0; -} - static inline int break_lease(struct inode *inode, unsigned int mode) { return 0; @@ -2264,6 +2283,14 @@ extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_all(void); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); extern int fsync_bdev(struct block_device *); +#ifdef CONFIG_FS_DAX +extern bool blkdev_dax_capable(struct block_device *bdev); +#else +static inline bool blkdev_dax_capable(struct block_device *bdev) +{ + return false; +} +#endif extern struct super_block *blockdev_superblock; @@ -2291,9 +2318,9 @@ static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void { } -static inline int sb_is_blkdev_sb(struct super_block *sb) +static inline bool sb_is_blkdev_sb(struct super_block *sb) { - return 0; + return false; } #endif extern int sync_filesystem(struct super_block *); @@ -2371,7 +2398,7 @@ extern void init_special_inode(struct inode *, umode_t, dev_t); /* Invalid inode operations -- fs/bad_inode.c */ extern void make_bad_inode(struct inode *); -extern int is_bad_inode(struct inode *); +extern bool is_bad_inode(struct inode *); #ifdef CONFIG_BLOCK /* @@ -2532,8 +2559,8 @@ extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); extern struct file * open_exec(const char *); /* fs/dcache.c -- generic fs support functions */ -extern int is_subdir(struct dentry *, struct dentry *); -extern int path_is_under(struct path *, struct path *); +extern bool is_subdir(struct dentry *, struct dentry *); +extern bool path_is_under(struct path *, struct path *); extern char *file_path(struct file *, char *, int); @@ -2660,6 +2687,8 @@ extern loff_t generic_file_llseek_size(struct file *file, loff_t offset, int whence, loff_t maxsize, loff_t eof); extern loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size); +extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); +extern loff_t no_seek_end_llseek(struct file *, loff_t, int); extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); @@ -2736,14 +2765,14 @@ extern const struct file_operations generic_ro_fops; extern int readlink_copy(char __user *, int, const char *); extern int page_readlink(struct dentry *, char __user *, int); -extern const char *page_follow_link_light(struct dentry *, void **); -extern void page_put_link(struct inode *, void *); +extern const char *page_get_link(struct dentry *, struct inode *, + struct delayed_call *); +extern void page_put_link(void *); extern int __page_symlink(struct inode *inode, const char *symname, int len, int nofs); extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; -extern void kfree_put_link(struct inode *, void *); -extern void free_page_put_link(struct inode *, void *); +extern void kfree_link(void *); extern int generic_readlink(struct dentry *, char __user *, int); extern void generic_fillattr(struct inode *, struct kstat *); int vfs_getattr_nosec(struct path *path, struct kstat *stat); @@ -2754,7 +2783,8 @@ void __inode_sub_bytes(struct inode *inode, loff_t bytes); void inode_sub_bytes(struct inode *inode, loff_t bytes); loff_t inode_get_bytes(struct inode *inode); void inode_set_bytes(struct inode *inode, loff_t bytes); -const char *simple_follow_link(struct dentry *, void **); +const char *simple_get_link(struct dentry *, struct inode *, + struct delayed_call *); extern const struct inode_operations simple_symlink_inode_operations; extern int iterate_dir(struct file *, struct dir_context *); @@ -2764,8 +2794,6 @@ extern int vfs_lstat(const char __user *, struct kstat *); extern int vfs_fstat(unsigned int, struct kstat *); extern int vfs_fstatat(int , const char __user *, struct kstat *, int); -extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, - unsigned long arg); extern int __generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, loff_t start, loff_t len, @@ -2963,7 +2991,7 @@ int __init get_filesystem_list(char *buf); #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \ (flag & __FMODE_NONOTIFY))) -static inline int is_sxid(umode_t mode) +static inline bool is_sxid(umode_t mode) { return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); } @@ -3025,5 +3053,6 @@ static inline bool dir_relax(struct inode *inode) } extern bool path_noexec(const struct path *path); +extern void inode_nohighmem(struct inode *inode); #endif /* _LINUX_FS_H */ diff --git a/include/linux/fsl/edac.h b/include/linux/fsl/edac.h new file mode 100644 index 000000000000..90d64d4ec1a9 --- /dev/null +++ b/include/linux/fsl/edac.h @@ -0,0 +1,8 @@ +#ifndef FSL_EDAC_H +#define FSL_EDAC_H + +struct mpc85xx_edac_pci_plat_data { + struct device_node *of_node; +}; + +#endif diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 533c4408529a..6b7e89f45aa4 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -220,7 +220,10 @@ struct fsnotify_mark { /* List of marks by group->i_fsnotify_marks. Also reused for queueing * mark into destroy_list when it's waiting for the end of SRCU period * before it can be freed. [group->mark_mutex] */ - struct list_head g_list; + union { + struct list_head g_list; + struct rcu_head g_rcu; + }; /* Protects inode / mnt pointers, flags, masks */ spinlock_t lock; /* List of marks for inode / vfsmount [obj_lock] */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index eae6548efbf0..0639dcc98195 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -76,8 +76,8 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * ENABLED - set/unset when ftrace_ops is registered/unregistered * DYNAMIC - set when ftrace_ops is registered to denote dynamically * allocated ftrace_ops which need special care - * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops - * could be controled by following calls: + * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops + * could be controlled by following calls: * ftrace_function_local_enable * ftrace_function_local_disable * SAVE_REGS - The ftrace_ops wants regs saved at each function called @@ -121,7 +121,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); enum { FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_DYNAMIC = 1 << 1, - FTRACE_OPS_FL_CONTROL = 1 << 2, + FTRACE_OPS_FL_PER_CPU = 1 << 2, FTRACE_OPS_FL_SAVE_REGS = 1 << 3, FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, @@ -134,6 +134,7 @@ enum { FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, FTRACE_OPS_FL_IPMODIFY = 1 << 13, FTRACE_OPS_FL_PID = 1 << 14, + FTRACE_OPS_FL_RCU = 1 << 15, }; #ifdef CONFIG_DYNAMIC_FTRACE @@ -146,11 +147,11 @@ struct ftrace_ops_hash { #endif /* - * Note, ftrace_ops can be referenced outside of RCU protection. - * (Although, for perf, the control ops prevent that). If ftrace_ops is - * allocated and not part of kernel core data, the unregistering of it will - * perform a scheduling on all CPUs to make sure that there are no more users. - * Depending on the load of the system that may take a bit of time. + * Note, ftrace_ops can be referenced outside of RCU protection, unless + * the RCU flag is set. If ftrace_ops is allocated and not part of kernel + * core data, the unregistering of it will perform a scheduling on all CPUs + * to make sure that there are no more users. Depending on the load of the + * system that may take a bit of time. * * Any private data added must also take care not to be freed and if private * data is added to a ftrace_ops that is in core code, the user of the @@ -196,34 +197,34 @@ int unregister_ftrace_function(struct ftrace_ops *ops); void clear_ftrace_function(void); /** - * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu + * ftrace_function_local_enable - enable ftrace_ops on current cpu * * This function enables tracing on current cpu by decreasing * the per cpu control variable. * It must be called with preemption disabled and only on ftrace_ops - * registered with FTRACE_OPS_FL_CONTROL. If called without preemption + * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. */ static inline void ftrace_function_local_enable(struct ftrace_ops *ops) { - if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) + if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) return; (*this_cpu_ptr(ops->disabled))--; } /** - * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu + * ftrace_function_local_disable - disable ftrace_ops on current cpu * - * This function enables tracing on current cpu by decreasing + * This function disables tracing on current cpu by increasing * the per cpu control variable. * It must be called with preemption disabled and only on ftrace_ops - * registered with FTRACE_OPS_FL_CONTROL. If called without preemption + * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. */ static inline void ftrace_function_local_disable(struct ftrace_ops *ops) { - if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL))) + if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) return; (*this_cpu_ptr(ops->disabled))++; @@ -235,12 +236,12 @@ static inline void ftrace_function_local_disable(struct ftrace_ops *ops) * * This function returns value of ftrace_ops::disabled on current cpu. * It must be called with preemption disabled and only on ftrace_ops - * registered with FTRACE_OPS_FL_CONTROL. If called without preemption + * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. */ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) { - WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)); + WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)); return *this_cpu_ptr(ops->disabled); } @@ -296,6 +297,21 @@ int ftrace_arch_code_modify_post_process(void); struct dyn_ftrace; +enum ftrace_bug_type { + FTRACE_BUG_UNKNOWN, + FTRACE_BUG_INIT, + FTRACE_BUG_NOP, + FTRACE_BUG_CALL, + FTRACE_BUG_UPDATE, +}; +extern enum ftrace_bug_type ftrace_bug_type; + +/* + * Archs can set this to point to a variable that holds the value that was + * expected at the call site before calling ftrace_bug(). + */ +extern const void *ftrace_expected; + void ftrace_bug(int err, struct dyn_ftrace *rec); struct seq_file; @@ -341,6 +357,7 @@ bool is_ftrace_trampoline(unsigned long addr); * REGS - the record wants the function to save regs * REGS_EN - the function is set up to save regs. * IPMODIFY - the record allows for the IP address to be changed. + * DISABLED - the record is not ready to be touched yet * * When a new ftrace_ops is registered and wants a function to save * pt_regs, the rec->flag REGS is set. When the function has been @@ -355,10 +372,11 @@ enum { FTRACE_FL_TRAMP = (1UL << 28), FTRACE_FL_TRAMP_EN = (1UL << 27), FTRACE_FL_IPMODIFY = (1UL << 26), + FTRACE_FL_DISABLED = (1UL << 25), }; -#define FTRACE_REF_MAX_SHIFT 26 -#define FTRACE_FL_BITS 6 +#define FTRACE_REF_MAX_SHIFT 25 +#define FTRACE_FL_BITS 7 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) @@ -586,6 +604,7 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size); extern int skip_trace(unsigned long ip); extern void ftrace_module_init(struct module *mod); +extern void ftrace_release_mod(struct module *mod); extern void ftrace_disable_daemon(void); extern void ftrace_enable_daemon(void); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 847cc1d91634..5c706765404a 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -162,6 +162,7 @@ struct disk_part_tbl { }; struct disk_events; +struct badblocks; #if defined(CONFIG_BLK_DEV_INTEGRITY) @@ -213,6 +214,7 @@ struct gendisk { struct kobject integrity_kobj; #endif /* CONFIG_BLK_DEV_INTEGRITY */ int node_id; + struct badblocks *bb; }; static inline struct gendisk *part_to_disk(struct hd_struct *part) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 8942af0813e3..28ad5f6494b0 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -30,7 +30,7 @@ struct vm_area_struct; #define ___GFP_HARDWALL 0x20000u #define ___GFP_THISNODE 0x40000u #define ___GFP_ATOMIC 0x80000u -#define ___GFP_NOACCOUNT 0x100000u +#define ___GFP_ACCOUNT 0x100000u #define ___GFP_NOTRACK 0x200000u #define ___GFP_DIRECT_RECLAIM 0x400000u #define ___GFP_OTHER_NODE 0x800000u @@ -73,11 +73,15 @@ struct vm_area_struct; * * __GFP_THISNODE forces the allocation to be satisified from the requested * node with no fallbacks or placement policy enforcements. + * + * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg (only relevant + * to kmem allocations). */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) +#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) /* * Watermark modifiers -- controls access to emergency reserves @@ -104,7 +108,6 @@ struct vm_area_struct; #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) -#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* * Reclaim modifiers @@ -197,6 +200,9 @@ struct vm_area_struct; * GFP_KERNEL is typical for kernel-internal allocations. The caller requires * ZONE_NORMAL or a lower zone for direct access but can direct reclaim. * + * GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is + * accounted to kmemcg. + * * GFP_NOWAIT is for kernel allocations that should not stall for direct * reclaim, start physical IO or use any filesystem callback. * @@ -236,6 +242,7 @@ struct vm_area_struct; */ #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) +#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) #define GFP_NOIO (__GFP_RECLAIM) #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) @@ -271,7 +278,7 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) { - return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM); + return !!(gfp_flags & __GFP_DIRECT_RECLAIM); } #ifdef CONFIG_HIGHMEM @@ -377,10 +384,11 @@ static inline enum zone_type gfp_zone(gfp_t flags) static inline int gfp_zonelist(gfp_t flags) { - if (IS_ENABLED(CONFIG_NUMA) && unlikely(flags & __GFP_THISNODE)) - return 1; - - return 0; +#ifdef CONFIG_NUMA + if (unlikely(flags & __GFP_THISNODE)) + return ZONELIST_NOFALLBACK; +#endif + return ZONELIST_FALLBACK; } /* diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h index 519b6e2d769e..661e5c2a8e2a 100644 --- a/include/linux/hashtable.h +++ b/include/linux/hashtable.h @@ -16,6 +16,10 @@ struct hlist_head name[1 << (bits)] = \ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + #define DECLARE_HASHTABLE(name, bits) \ struct hlist_head name[1 << (bits)] diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index 1acb1445e05f..e31bcd4c7859 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h @@ -101,7 +101,7 @@ netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev); int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, size_t size); /* May be used by hardware driver to gain control over HDLC device */ -void detach_hdlc_protocol(struct net_device *dev); +int detach_hdlc_protocol(struct net_device *dev); static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb, struct net_device *dev) diff --git a/include/linux/hid.h b/include/linux/hid.h index 251a1d382e23..75b66eccc692 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -168,6 +168,8 @@ struct hid_item { #define HID_UP_MSVENDOR 0xff000000 #define HID_UP_CUSTOM 0x00ff0000 #define HID_UP_LOGIVENDOR 0xffbc0000 +#define HID_UP_LOGIVENDOR2 0xff090000 +#define HID_UP_LOGIVENDOR3 0xff430000 #define HID_UP_LNVENDOR 0xffa00000 #define HID_UP_SENSOR 0x00200000 @@ -563,6 +565,9 @@ struct hid_device { /* device report descriptor */ wait_queue_head_t debug_wait; }; +#define to_hid_device(pdev) \ + container_of(pdev, struct hid_device, dev) + static inline void *hid_get_drvdata(struct hid_device *hdev) { return dev_get_drvdata(&hdev->dev); @@ -712,6 +717,9 @@ struct hid_driver { struct device_driver driver; }; +#define to_hid_driver(pdrv) \ + container_of(pdrv, struct hid_driver, driver) + /** * hid_ll_driver - low level driver callbacks * @start: called on probe to start the device diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h index 5dd60c2e120f..2790591c77cf 100644 --- a/include/linux/hsi/hsi.h +++ b/include/linux/hsi/hsi.h @@ -135,9 +135,6 @@ static inline int hsi_register_board_info(struct hsi_board_info const *info, * @device: Driver model representation of the device * @tx_cfg: HSI TX configuration * @rx_cfg: HSI RX configuration - * @e_handler: Callback for handling port events (RX Wake High/Low) - * @pclaimed: Keeps tracks if the clients claimed its associated HSI port - * @nb: Notifier block for port events */ struct hsi_client { struct device device; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 685c262e0be8..e76574d8f9b5 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -96,9 +96,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct address_space *mapping, pgoff_t idx, unsigned long address); -#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); -#endif extern int hugepages_treat_as_movable; extern int sysctl_hugetlb_shm_group; @@ -265,20 +263,18 @@ struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, struct user_struct **user, int creat_flags, int page_size_log); -static inline int is_file_hugepages(struct file *file) +static inline bool is_file_hugepages(struct file *file) { if (file->f_op == &hugetlbfs_file_operations) - return 1; - if (is_file_shm_hugepages(file)) - return 1; + return true; - return 0; + return is_file_shm_hugepages(file); } #else /* !CONFIG_HUGETLBFS */ -#define is_file_hugepages(file) 0 +#define is_file_hugepages(file) false static inline struct file * hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, struct user_struct **user, int creat_flags, diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 8fdc17b84739..753dbad0bf94 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -141,8 +141,6 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, { u32 read_loc, write_loc, dsize; - smp_read_barrier_depends(); - /* Capture the read/write indices before they changed */ read_loc = rbi->ring_buffer->read_index; write_loc = rbi->ring_buffer->write_index; @@ -630,6 +628,11 @@ struct hv_input_signal_event_buffer { struct hv_input_signal_event event; }; +enum hv_signal_policy { + HV_SIGNAL_POLICY_DEFAULT = 0, + HV_SIGNAL_POLICY_EXPLICIT, +}; + struct vmbus_channel { /* Unique channel id */ int id; @@ -757,8 +760,21 @@ struct vmbus_channel { * link up channels based on their CPU affinity. */ struct list_head percpu_list; + /* + * Host signaling policy: The default policy will be + * based on the ring buffer state. We will also support + * a policy where the client driver can have explicit + * signaling control. + */ + enum hv_signal_policy signal_policy; }; +static inline void set_channel_signal_state(struct vmbus_channel *c, + enum hv_signal_policy policy) +{ + c->signal_policy = policy; +} + static inline void set_channel_read_state(struct vmbus_channel *c, bool state) { c->batched_reading = state; @@ -983,16 +999,8 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, resource_size_t size, resource_size_t align, bool fb_overlap_ok); -/** - * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device - * - * This macro is used to create a struct hv_vmbus_device_id that matches a - * specific device. - */ -#define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7, \ - g8, g9, ga, gb, gc, gd, ge, gf) \ - .guid = { g0, g1, g2, g3, g4, g5, g6, g7, \ - g8, g9, ga, gb, gc, gd, ge, gf }, +int vmbus_cpu_number_to_vp_number(int cpu_number); +u64 hv_do_hypercall(u64 control, void *input, void *output); /* * GUID definitions of various offer types - services offered to the guest. @@ -1003,118 +1011,102 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, * {f8615163-df3e-46c5-913f-f2d2f965ed0e} */ #define HV_NIC_GUID \ - .guid = { \ - 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, \ - 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e \ - } + .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ + 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) /* * IDE GUID * {32412632-86cb-44a2-9b5c-50d1417354f5} */ #define HV_IDE_GUID \ - .guid = { \ - 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, \ - 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 \ - } + .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ + 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) /* * SCSI GUID * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ #define HV_SCSI_GUID \ - .guid = { \ - 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, \ - 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f \ - } + .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ + 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) /* * Shutdown GUID * {0e0b6031-5213-4934-818b-38d90ced39db} */ #define HV_SHUTDOWN_GUID \ - .guid = { \ - 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, \ - 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb \ - } + .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ + 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) /* * Time Synch GUID * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} */ #define HV_TS_GUID \ - .guid = { \ - 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, \ - 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf \ - } + .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ + 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) /* * Heartbeat GUID * {57164f39-9115-4e78-ab55-382f3bd5422d} */ #define HV_HEART_BEAT_GUID \ - .guid = { \ - 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, \ - 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d \ - } + .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ + 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) /* * KVP GUID * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} */ #define HV_KVP_GUID \ - .guid = { \ - 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, \ - 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6 \ - } + .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ + 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) /* * Dynamic memory GUID * {525074dc-8985-46e2-8057-a307dc18a502} */ #define HV_DM_GUID \ - .guid = { \ - 0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, \ - 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 \ - } + .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ + 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) /* * Mouse GUID * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} */ #define HV_MOUSE_GUID \ - .guid = { \ - 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, \ - 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a \ - } + .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ + 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) + +/* + * Keyboard GUID + * {f912ad6d-2b17-48ea-bd65-f927a61c7684} + */ +#define HV_KBD_GUID \ + .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ + 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) /* * VSS (Backup/Restore) GUID */ #define HV_VSS_GUID \ - .guid = { \ - 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42, \ - 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 \ - } + .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ + 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) /* * Synthetic Video GUID * {DA0A7802-E377-4aac-8E77-0558EB1073F8} */ #define HV_SYNTHVID_GUID \ - .guid = { \ - 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a, \ - 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 \ - } + .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ + 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) /* * Synthetic FC GUID * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} */ #define HV_SYNTHFC_GUID \ - .guid = { \ - 0x4A, 0xCC, 0x9B, 0x2F, 0x69, 0x00, 0xF3, 0x4A, \ - 0xB7, 0x6B, 0x6F, 0xD0, 0xBE, 0x52, 0x8C, 0xDA \ - } + .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ + 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) /* * Guest File Copy Service @@ -1122,20 +1114,25 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj, */ #define HV_FCOPY_GUID \ - .guid = { \ - 0xE3, 0x4B, 0xD1, 0x34, 0xE4, 0xDE, 0xC8, 0x41, \ - 0x9A, 0xE7, 0x6B, 0x17, 0x49, 0x77, 0xC1, 0x92 \ - } + .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ + 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) /* * NetworkDirect. This is the guest RDMA service. * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} */ #define HV_ND_GUID \ - .guid = { \ - 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, \ - 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 \ - } + .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ + 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) + +/* + * PCI Express Pass Through + * {44C4F61D-4444-4400-9D52-802E27EDE19F} + */ + +#define HV_PCIE_GUID \ + .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ + 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) /* * Common header for Hyper-V ICs diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 768063baafbf..200cf13b00f6 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -414,6 +414,22 @@ struct i2c_algorithm { }; /** + * struct i2c_timings - I2C timing information + * @bus_freq_hz: the bus frequency in Hz + * @scl_rise_ns: time SCL signal takes to rise in ns; t(r) in the I2C specification + * @scl_fall_ns: time SCL signal takes to fall in ns; t(f) in the I2C specification + * @scl_int_delay_ns: time IP core additionally needs to setup SCL in ns + * @sda_fall_ns: time SDA signal takes to fall in ns; t(f) in the I2C specification + */ +struct i2c_timings { + u32 bus_freq_hz; + u32 scl_rise_ns; + u32 scl_fall_ns; + u32 scl_int_delay_ns; + u32 sda_fall_ns; +}; + +/** * struct i2c_bus_recovery_info - I2C bus recovery information * @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or * i2c_generic_scl_recovery() or i2c_generic_gpio_recovery(). @@ -493,6 +509,8 @@ struct i2c_adapter_quirks { /* convenience macro for typical write-then read case */ #define I2C_AQ_COMB_WRITE_THEN_READ (I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | \ I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR) +/* clock stretching is not supported */ +#define I2C_AQ_NO_CLK_STRETCH BIT(4) /* * i2c_adapter is the structure used to identify a physical i2c bus along @@ -602,6 +620,7 @@ extern void i2c_clients_command(struct i2c_adapter *adap, extern struct i2c_adapter *i2c_get_adapter(int nr); extern void i2c_put_adapter(struct i2c_adapter *adap); +void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults); /* Return the functionality mask */ static inline u32 i2c_get_functionality(struct i2c_adapter *adap) @@ -615,6 +634,20 @@ static inline int i2c_check_functionality(struct i2c_adapter *adap, u32 func) return (func & i2c_get_functionality(adap)) == func; } +/** + * i2c_check_quirks() - Function for checking the quirk flags in an i2c adapter + * @adap: i2c adapter + * @quirks: quirk flags + * + * Return: true if the adapter has all the specified quirk flags, false if not + */ +static inline bool i2c_check_quirks(struct i2c_adapter *adap, u64 quirks) +{ + if (!adap->quirks) + return false; + return (adap->quirks->flags & quirks) == quirks; +} + /* Return the adapter number for a specific adapter */ static inline int i2c_adapter_id(struct i2c_adapter *adap) { @@ -622,7 +655,7 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap) } /** - * module_i2c_driver() - Helper macro for registering a I2C driver + * module_i2c_driver() - Helper macro for registering a modular I2C driver * @__i2c_driver: i2c_driver struct * * Helper macro for I2C drivers which do not do anything special in module @@ -633,6 +666,17 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap) module_driver(__i2c_driver, i2c_add_driver, \ i2c_del_driver) +/** + * builtin_i2c_driver() - Helper macro for registering a builtin I2C driver + * @__i2c_driver: i2c_driver struct + * + * Helper macro for I2C drivers which do not do anything special in their + * init. This eliminates a lot of boilerplate. Each driver may only + * use this macro once, and calling it replaces device_initcall(). + */ +#define builtin_i2c_driver(__i2c_driver) \ + builtin_driver(__i2c_driver, i2c_add_driver) + #endif /* I2C */ #if IS_ENABLED(CONFIG_OF) @@ -644,6 +688,7 @@ extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) /* must call i2c_put_adapter() when done with returned i2c_adapter device */ struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node); + #else static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index b49cf923becc..ba7a9b0c7c57 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -91,7 +91,6 @@ enum { PPPOX_CONNECTED = 1, /* connection established ==TCP_ESTABLISHED */ PPPOX_BOUND = 2, /* bound to ppp device */ PPPOX_RELAY = 4, /* forwarding is enabled */ - PPPOX_ZOMBIE = 8, /* dead, but still bound to ppp device */ PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/ }; diff --git a/include/linux/if_team.h b/include/linux/if_team.h index a6aa970758a2..b84e49c3a738 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -164,6 +164,7 @@ struct team_mode { size_t priv_size; size_t port_priv_size; const struct team_mode_ops *ops; + enum netdev_lag_tx_type lag_tx_type; }; #define TEAM_PORT_HASHBITS 4 diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 67ce5bd3b56a..a5f6ce6b578c 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) /* found in socket.c */ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); -static inline bool is_vlan_dev(struct net_device *dev) +static inline bool is_vlan_dev(const struct net_device *dev) { return dev->priv_flags & IFF_802_1Q_VLAN; } @@ -621,7 +621,7 @@ static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | - NETIF_F_GEN_CSUM | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h new file mode 100644 index 000000000000..767467d886de --- /dev/null +++ b/include/linux/iio/buffer-dma.h @@ -0,0 +1,152 @@ +/* + * Copyright 2013-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + * + * Licensed under the GPL-2. + */ + +#ifndef __INDUSTRIALIO_DMA_BUFFER_H__ +#define __INDUSTRIALIO_DMA_BUFFER_H__ + +#include <linux/list.h> +#include <linux/kref.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/iio/buffer.h> + +struct iio_dma_buffer_queue; +struct iio_dma_buffer_ops; +struct device; + +struct iio_buffer_block { + u32 size; + u32 bytes_used; +}; + +/** + * enum iio_block_state - State of a struct iio_dma_buffer_block + * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued + * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue + * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA + * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue + * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed + */ +enum iio_block_state { + IIO_BLOCK_STATE_DEQUEUED, + IIO_BLOCK_STATE_QUEUED, + IIO_BLOCK_STATE_ACTIVE, + IIO_BLOCK_STATE_DONE, + IIO_BLOCK_STATE_DEAD, +}; + +/** + * struct iio_dma_buffer_block - IIO buffer block + * @head: List head + * @size: Total size of the block in bytes + * @bytes_used: Number of bytes that contain valid data + * @vaddr: Virutal address of the blocks memory + * @phys_addr: Physical address of the blocks memory + * @queue: Parent DMA buffer queue + * @kref: kref used to manage the lifetime of block + * @state: Current state of the block + */ +struct iio_dma_buffer_block { + /* May only be accessed by the owner of the block */ + struct list_head head; + size_t bytes_used; + + /* + * Set during allocation, constant thereafter. May be accessed read-only + * by anybody holding a reference to the block. + */ + void *vaddr; + dma_addr_t phys_addr; + size_t size; + struct iio_dma_buffer_queue *queue; + + /* Must not be accessed outside the core. */ + struct kref kref; + /* + * Must not be accessed outside the core. Access needs to hold + * queue->list_lock if the block is not owned by the core. + */ + enum iio_block_state state; +}; + +/** + * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer + * @blocks: Buffer blocks used for fileio + * @active_block: Block being used in read() + * @pos: Read offset in the active block + * @block_size: Size of each block + */ +struct iio_dma_buffer_queue_fileio { + struct iio_dma_buffer_block *blocks[2]; + struct iio_dma_buffer_block *active_block; + size_t pos; + size_t block_size; +}; + +/** + * struct iio_dma_buffer_queue - DMA buffer base structure + * @buffer: IIO buffer base structure + * @dev: Parent device + * @ops: DMA buffer callbacks + * @lock: Protects the incoming list, active and the fields in the fileio + * substruct + * @list_lock: Protects lists that contain blocks which can be modified in + * atomic context as well as blocks on those lists. This is the outgoing queue + * list and typically also a list of active blocks in the part that handles + * the DMA controller + * @incoming: List of buffers on the incoming queue + * @outgoing: List of buffers on the outgoing queue + * @active: Whether the buffer is currently active + * @fileio: FileIO state + */ +struct iio_dma_buffer_queue { + struct iio_buffer buffer; + struct device *dev; + const struct iio_dma_buffer_ops *ops; + + struct mutex lock; + spinlock_t list_lock; + struct list_head incoming; + struct list_head outgoing; + + bool active; + + struct iio_dma_buffer_queue_fileio fileio; +}; + +/** + * struct iio_dma_buffer_ops - DMA buffer callback operations + * @submit: Called when a block is submitted to the DMA controller + * @abort: Should abort all pending transfers + */ +struct iio_dma_buffer_ops { + int (*submit)(struct iio_dma_buffer_queue *queue, + struct iio_dma_buffer_block *block); + void (*abort)(struct iio_dma_buffer_queue *queue); +}; + +void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block); +void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, + struct list_head *list); + +int iio_dma_buffer_enable(struct iio_buffer *buffer, + struct iio_dev *indio_dev); +int iio_dma_buffer_disable(struct iio_buffer *buffer, + struct iio_dev *indio_dev); +int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, + char __user *user_buffer); +size_t iio_dma_buffer_data_available(struct iio_buffer *buffer); +int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); +int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length); +int iio_dma_buffer_request_update(struct iio_buffer *buffer); + +int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, + struct device *dma_dev, const struct iio_dma_buffer_ops *ops); +void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue); +void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue); + +#endif diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h new file mode 100644 index 000000000000..5dcddf427bb0 --- /dev/null +++ b/include/linux/iio/buffer-dmaengine.h @@ -0,0 +1,18 @@ +/* + * Copyright 2014-2015 Analog Devices Inc. + * Author: Lars-Peter Clausen <lars@metafoo.de> + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __IIO_DMAENGINE_H__ +#define __IIO_DMAENGINE_H__ + +struct iio_buffer; +struct device; + +struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, + const char *channel); +void iio_dmaengine_buffer_free(struct iio_buffer *buffer); + +#endif diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index 1600c55828e0..2ec3ad58e8a0 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -18,6 +18,12 @@ struct iio_buffer; /** + * INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be + * configured. It has a fixed value which will be buffer specific. + */ +#define INDIO_BUFFER_FLAG_FIXED_WATERMARK BIT(0) + +/** * struct iio_buffer_access_funcs - access functions for buffers. * @store_to: actually store stuff to the buffer * @read_first_n: try to get a specified number of bytes (must exist) @@ -27,9 +33,15 @@ struct iio_buffer; * storage. * @set_bytes_per_datum:set number of bytes per datum * @set_length: set number of datums in buffer + * @enable: called if the buffer is attached to a device and the + * device starts sampling. Calls are balanced with + * @disable. + * @disable: called if the buffer is attached to a device and the + * device stops sampling. Calles are balanced with @enable. * @release: called when the last reference to the buffer is dropped, * should free all resources allocated by the buffer. * @modes: Supported operating modes by this buffer type + * @flags: A bitmask combination of INDIO_BUFFER_FLAG_* * * The purpose of this structure is to make the buffer element * modular as event for a given driver, different usecases may require @@ -51,9 +63,13 @@ struct iio_buffer_access_funcs { int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); int (*set_length)(struct iio_buffer *buffer, int length); + int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); + int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); + void (*release)(struct iio_buffer *buffer); unsigned int modes; + unsigned int flags; }; /** diff --git a/include/linux/iio/configfs.h b/include/linux/iio/configfs.h new file mode 100644 index 000000000000..93befd67c15c --- /dev/null +++ b/include/linux/iio/configfs.h @@ -0,0 +1,15 @@ +/* + * Industrial I/O configfs support + * + * Copyright (c) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ +#ifndef __IIO_CONFIGFS +#define __IIO_CONFIGFS + +extern struct configfs_subsystem iio_configfs_subsys; + +#endif /* __IIO_CONFIGFS */ diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 19c94c9acc81..b5894118755f 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -636,6 +636,8 @@ static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) } #endif +ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals); + int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer, int *fract); diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h new file mode 100644 index 000000000000..5198f8ed08a4 --- /dev/null +++ b/include/linux/iio/sw_trigger.h @@ -0,0 +1,70 @@ +/* + * Industrial I/O software trigger interface + * + * Copyright (c) 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +#ifndef __IIO_SW_TRIGGER +#define __IIO_SW_TRIGGER + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/iio/iio.h> +#include <linux/configfs.h> + +#define module_iio_sw_trigger_driver(__iio_sw_trigger_type) \ + module_driver(__iio_sw_trigger_type, iio_register_sw_trigger_type, \ + iio_unregister_sw_trigger_type) + +struct iio_sw_trigger_ops; + +struct iio_sw_trigger_type { + const char *name; + struct module *owner; + const struct iio_sw_trigger_ops *ops; + struct list_head list; + struct config_group *group; +}; + +struct iio_sw_trigger { + struct iio_trigger *trigger; + struct iio_sw_trigger_type *trigger_type; + struct config_group group; +}; + +struct iio_sw_trigger_ops { + struct iio_sw_trigger* (*probe)(const char *); + int (*remove)(struct iio_sw_trigger *); +}; + +static inline +struct iio_sw_trigger *to_iio_sw_trigger(struct config_item *item) +{ + return container_of(to_config_group(item), struct iio_sw_trigger, + group); +} + +int iio_register_sw_trigger_type(struct iio_sw_trigger_type *tt); +void iio_unregister_sw_trigger_type(struct iio_sw_trigger_type *tt); + +struct iio_sw_trigger *iio_sw_trigger_create(const char *, const char *); +void iio_sw_trigger_destroy(struct iio_sw_trigger *); + +int iio_sw_trigger_type_configfs_register(struct iio_sw_trigger_type *tt); +void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt); + +static inline +void iio_swt_group_init_type_name(struct iio_sw_trigger *t, + const char *name, + struct config_item_type *type) +{ +#ifdef CONFIG_CONFIGFS_FS + config_group_init_type_name(&t->group, name, type); +#endif +} + +#endif /* __IIO_SW_TRIGGER */ diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index 0e707f0c1a3e..7c27fa1030e8 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -3,6 +3,7 @@ #include <uapi/linux/inet_diag.h> +struct net; struct sock; struct inet_hashinfo; struct nlattr; @@ -23,6 +24,10 @@ struct inet_diag_handler { void (*idiag_get_info)(struct sock *sk, struct inet_diag_msg *r, void *info); + + int (*destroy)(struct sk_buff *in_skb, + const struct inet_diag_req_v2 *req); + __u16 idiag_type; __u16 idiag_info_size; }; @@ -41,6 +46,10 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb, const struct nlmsghdr *nlh, const struct inet_diag_req_v2 *req); +struct sock *inet_diag_find_one_icsk(struct net *net, + struct inet_hashinfo *hashinfo, + const struct inet_diag_req_v2 *req); + int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk); extern int inet_diag_register(const struct inet_diag_handler *handler); diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 1c1ff7e4faa4..f2cb8d45513d 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -150,7 +150,7 @@ extern struct task_group root_task_group; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN # define INIT_VTIME(tsk) \ - .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ + .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \ .vtime_snap = 0, \ .vtime_snap_whence = VTIME_SYS, #else diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index ad16809c8596..cb30edbfe9fc 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -195,6 +195,7 @@ extern void disable_irq(unsigned int irq); extern void disable_percpu_irq(unsigned int irq); extern void enable_irq(unsigned int irq); extern void enable_percpu_irq(unsigned int irq, unsigned int type); +extern bool irq_percpu_is_enabled(unsigned int irq); extern void irq_wake_thread(unsigned int irq, void *dev_id); /* The following three functions are for the core kernel use only. */ diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index bae69e5d693c..9c940263ca23 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -103,10 +103,21 @@ struct device_node; void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); int gic_cpu_if_down(unsigned int gic_nr); +/* + * Subdrivers that need some preparatory work can initialize their + * chips and call this to register their GICs. + */ +int gic_of_init(struct device_node *node, struct device_node *parent); + +/* + * Legacy platforms not converted to DT yet must use this to init + * their GIC + */ void gic_init(unsigned int nr, int start, void __iomem *dist , void __iomem *cpu); -int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); +int gicv2m_init(struct fwnode_handle *parent_handle, + struct irq_domain *parent); void gic_send_sgi(unsigned int cpu_id, unsigned int irq); int gic_get_cpu_id(unsigned int cpu); diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index a587a33363c7..dcca77c4b9d2 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -1,6 +1,8 @@ #ifndef _LINUX_IRQDESC_H #define _LINUX_IRQDESC_H +#include <linux/rcupdate.h> + /* * Core internal functions to deal with irq descriptors */ @@ -40,6 +42,7 @@ struct pt_regs; * IRQF_NO_SUSPEND set * @force_resume_depth: number of irqactions on a irq descriptor with * IRQF_FORCE_RESUME set + * @rcu: rcu head for delayed free * @dir: /proc/irq/ procfs entry * @name: flow handler name for /proc/interrupts output */ @@ -82,6 +85,9 @@ struct irq_desc { #ifdef CONFIG_PROC_FS struct proc_dir_entry *dir; #endif +#ifdef CONFIG_SPARSE_IRQ + struct rcu_head rcu; +#endif int parent_irq; struct module *owner; const char *name; diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index d5e5c5bef28c..f64622ad02c1 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -211,6 +211,11 @@ static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) return node ? &node->fwnode : NULL; } +static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode) +{ + return fwnode && fwnode->type == FWNODE_IRQCHIP; +} + static inline struct irq_domain *irq_find_matching_host(struct device_node *node, enum irq_domain_bus_token bus_token) { @@ -367,6 +372,9 @@ static inline int irq_domain_alloc_irqs(struct irq_domain *domain, return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); } +extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, + unsigned int irq_base, + unsigned int nr_irqs, void *arg); extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, @@ -410,6 +418,11 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) static inline void irq_dispose_mapping(unsigned int virq) { } static inline void irq_domain_activate_irq(struct irq_data *data) { } static inline void irq_domain_deactivate_irq(struct irq_data *data) { } +static inline struct irq_domain *irq_find_matching_fwnode( + struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token) +{ + return NULL; +} #endif /* !CONFIG_IRQ_DOMAIN */ #endif /* _LINUX_IRQDOMAIN_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 350dfb08aee3..7311c3294e25 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -255,6 +255,7 @@ extern long (*panic_blink)(int state); __printf(1, 2) void panic(const char *fmt, ...) __noreturn __cold; +void nmi_panic_self_stop(struct pt_regs *); extern void oops_enter(void); extern void oops_exit(void); void print_oops_end_marker(void); @@ -446,6 +447,33 @@ extern int sysctl_panic_on_stackoverflow; extern bool crash_kexec_post_notifiers; /* + * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It + * holds a CPU number which is executing panic() currently. A value of + * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec(). + */ +extern atomic_t panic_cpu; +#define PANIC_CPU_INVALID -1 + +/* + * A variant of panic() called from NMI context. We return if we've already + * panicked on this CPU. If another CPU already panicked, loop in + * nmi_panic_self_stop() which can provide architecture dependent code such + * as saving register state for crash dump. + */ +#define nmi_panic(regs, fmt, ...) \ +do { \ + int old_cpu, cpu; \ + \ + cpu = raw_smp_processor_id(); \ + old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); \ + \ + if (old_cpu == PANIC_CPU_INVALID) \ + panic(fmt, ##__VA_ARGS__); \ + else if (old_cpu != cpu) \ + nmi_panic_self_stop(regs); \ +} while (0) + +/* * Only to be used by arch init code. If the user over-wrote the default * CONFIG_PANIC_TIMEOUT, honor it. */ diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 5d4e9c4b821d..af51df35d749 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -274,6 +274,8 @@ void pr_cont_kernfs_path(struct kernfs_node *kn); struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn); struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name, const void *ns); +struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent, + const char *path, const void *ns); void kernfs_get(struct kernfs_node *kn); void kernfs_put(struct kernfs_node *kn); @@ -350,6 +352,10 @@ static inline struct kernfs_node * kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name, const void *ns) { return NULL; } +static inline struct kernfs_node * +kernfs_walk_and_get_ns(struct kernfs_node *parent, const char *path, + const void *ns) +{ return NULL; } static inline void kernfs_get(struct kernfs_node *kn) { } static inline void kernfs_put(struct kernfs_node *kn) { } @@ -431,6 +437,12 @@ kernfs_find_and_get(struct kernfs_node *kn, const char *name) } static inline struct kernfs_node * +kernfs_walk_and_get(struct kernfs_node *kn, const char *path) +{ + return kernfs_walk_and_get_ns(kn, path, NULL); +} + +static inline struct kernfs_node * kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode, void *priv) { diff --git a/include/linux/kexec.h b/include/linux/kexec.h index d140b1e9faa7..7b68d2788a56 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -237,6 +237,7 @@ extern int kexec_purgatory_get_set_symbol(struct kimage *image, unsigned int size, bool get_value); extern void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); +extern void __crash_kexec(struct pt_regs *); extern void crash_kexec(struct pt_regs *); int kexec_should_crash(struct task_struct *); void crash_save_cpu(struct pt_regs *regs, int cpu); @@ -332,6 +333,7 @@ int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, #else /* !CONFIG_KEXEC_CORE */ struct pt_regs; struct task_struct; +static inline void __crash_kexec(struct pt_regs *regs) { } static inline void crash_kexec(struct pt_regs *regs) { } static inline int kexec_should_crash(struct task_struct *p) { return 0; } #define kexec_in_progress false diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c923350ca20a..f707f74055c3 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -111,38 +111,13 @@ static inline bool is_error_page(struct page *page) } /* - * vcpu->requests bit members + * Architecture-independent vcpu->requests bit members + * Bits 4-7 are reserved for more arch-independent bits. */ #define KVM_REQ_TLB_FLUSH 0 -#define KVM_REQ_MIGRATE_TIMER 1 -#define KVM_REQ_REPORT_TPR_ACCESS 2 -#define KVM_REQ_MMU_RELOAD 3 -#define KVM_REQ_TRIPLE_FAULT 4 -#define KVM_REQ_PENDING_TIMER 5 -#define KVM_REQ_UNHALT 6 -#define KVM_REQ_MMU_SYNC 7 -#define KVM_REQ_CLOCK_UPDATE 8 -#define KVM_REQ_KICK 9 -#define KVM_REQ_DEACTIVATE_FPU 10 -#define KVM_REQ_EVENT 11 -#define KVM_REQ_APF_HALT 12 -#define KVM_REQ_STEAL_UPDATE 13 -#define KVM_REQ_NMI 14 -#define KVM_REQ_PMU 15 -#define KVM_REQ_PMI 16 -#define KVM_REQ_WATCHDOG 17 -#define KVM_REQ_MASTERCLOCK_UPDATE 18 -#define KVM_REQ_MCLOCK_INPROGRESS 19 -#define KVM_REQ_EPR_EXIT 20 -#define KVM_REQ_SCAN_IOAPIC 21 -#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22 -#define KVM_REQ_ENABLE_IBS 23 -#define KVM_REQ_DISABLE_IBS 24 -#define KVM_REQ_APIC_PAGE_RELOAD 25 -#define KVM_REQ_SMI 26 -#define KVM_REQ_HV_CRASH 27 -#define KVM_REQ_IOAPIC_EOI_EXIT 28 -#define KVM_REQ_HV_RESET 29 +#define KVM_REQ_MMU_RELOAD 1 +#define KVM_REQ_PENDING_TIMER 2 +#define KVM_REQ_UNHALT 3 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 @@ -318,6 +293,11 @@ struct kvm_s390_adapter_int { u32 adapter_id; }; +struct kvm_hv_sint { + u32 vcpu; + u32 sint; +}; + struct kvm_kernel_irq_routing_entry { u32 gsi; u32 type; @@ -331,6 +311,7 @@ struct kvm_kernel_irq_routing_entry { } irqchip; struct msi_msg msi; struct kvm_s390_adapter_int adapter; + struct kvm_hv_sint hv_sint; }; struct hlist_node link; }; @@ -439,10 +420,13 @@ struct kvm { /* The guest did something we don't support. */ #define vcpu_unimpl(vcpu, fmt, ...) \ - kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) + kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ + (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) #define vcpu_debug(vcpu, fmt, ...) \ kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) +#define vcpu_err(vcpu, fmt, ...) \ + kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) { @@ -465,6 +449,11 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) struct kvm_vcpu *vcpu; int i; + if (id < 0 || id >= KVM_MAX_VCPUS) + return NULL; + vcpu = kvm_get_vcpu(kvm, id); + if (vcpu && vcpu->vcpu_id == id) + return vcpu; kvm_for_each_vcpu(i, vcpu, kvm) if (vcpu->vcpu_id == id) return vcpu; @@ -484,12 +473,12 @@ void vcpu_put(struct kvm_vcpu *vcpu); #ifdef __KVM_HAVE_IOAPIC void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); -void kvm_arch_irq_routing_update(struct kvm *kvm); +void kvm_arch_post_irq_routing_update(struct kvm *kvm); #else static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) { } -static inline void kvm_arch_irq_routing_update(struct kvm *kvm) +static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) { } #endif @@ -634,7 +623,7 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); -int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); +bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); @@ -668,8 +657,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); -void kvm_make_mclock_inprogress_request(struct kvm *kvm); -void kvm_make_scan_ioapic_request(struct kvm *kvm); bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); long kvm_arch_dev_ioctl(struct file *filp, @@ -990,11 +977,6 @@ static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) return kvm_is_error_hva(hva); } -static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) -{ - set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); -} - enum kvm_stat_kind { KVM_STAT_VM, KVM_STAT_VCPU, @@ -1004,7 +986,6 @@ struct kvm_stats_debugfs_item { const char *name; int offset; enum kvm_stat_kind kind; - struct dentry *dentry; }; extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct dentry *kvm_debugfs_dir; @@ -1091,6 +1072,7 @@ static inline void kvm_irq_routing_update(struct kvm *kvm) { } #endif +void kvm_arch_irq_routing_update(struct kvm *kvm); static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) { diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h index 00a97bb905db..35e568f04b1e 100644 --- a/include/linux/kvm_para.h +++ b/include/linux/kvm_para.h @@ -4,10 +4,8 @@ #include <uapi/linux/kvm_para.h> -static inline int kvm_para_has_feature(unsigned int feature) +static inline bool kvm_para_has_feature(unsigned int feature) { - if (kvm_arch_para_features() & (1UL << feature)) - return 1; - return 0; + return !!(kvm_arch_para_features() & (1UL << feature)); } #endif /* __LINUX_KVM_PARA_H */ diff --git a/include/linux/leds.h b/include/linux/leds.h index fa359c79c825..bc1476fda96e 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -44,9 +44,9 @@ struct led_classdev { #define LED_BLINK_ONESHOT (1 << 17) #define LED_BLINK_ONESHOT_STOP (1 << 18) #define LED_BLINK_INVERT (1 << 19) -#define LED_SYSFS_DISABLE (1 << 20) -#define SET_BRIGHTNESS_ASYNC (1 << 21) -#define SET_BRIGHTNESS_SYNC (1 << 22) +#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 20) +#define LED_BLINK_DISABLE (1 << 21) +#define LED_SYSFS_DISABLE (1 << 22) #define LED_DEV_CAP_FLASH (1 << 23) /* Set LED brightness level */ @@ -57,8 +57,8 @@ struct led_classdev { * Set LED brightness level immediately - it can block the caller for * the time required for accessing a LED device register. */ - int (*brightness_set_sync)(struct led_classdev *led_cdev, - enum led_brightness brightness); + int (*brightness_set_blocking)(struct led_classdev *led_cdev, + enum led_brightness brightness); /* Get LED brightness level */ enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); @@ -156,10 +156,25 @@ extern void led_blink_set_oneshot(struct led_classdev *led_cdev, * * Set an LED's brightness, and, if necessary, cancel the * software blink timer that implements blinking when the - * hardware doesn't. + * hardware doesn't. This function is guaranteed not to sleep. */ extern void led_set_brightness(struct led_classdev *led_cdev, enum led_brightness brightness); + +/** + * led_set_brightness_sync - set LED brightness synchronously + * @led_cdev: the LED to set + * @brightness: the brightness to set it to + * + * Set an LED's brightness immediately. This function will block + * the caller for the time required for accessing device registers, + * and it can sleep. + * + * Returns: 0 on success or negative error value on failure + */ +extern int led_set_brightness_sync(struct led_classdev *led_cdev, + enum led_brightness value); + /** * led_update_brightness - update LED brightness * @led_cdev: the LED to query @@ -231,6 +246,8 @@ ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr, /* Registration functions for complex triggers */ extern int led_trigger_register(struct led_trigger *trigger); extern void led_trigger_unregister(struct led_trigger *trigger); +extern int devm_led_trigger_register(struct device *dev, + struct led_trigger *trigger); extern void led_trigger_register_simple(const char *name, struct led_trigger **trigger); diff --git a/include/linux/libata.h b/include/linux/libata.h index 600c1e0626a5..851821bfd553 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -205,6 +205,7 @@ enum { ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */ ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */ ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */ + ATA_LFLAG_NO_DB_DELAY = (1 << 11), /* no debounce delay on link resume */ /* struct ata_port flags */ ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 3f021dc5da8c..bed40dff0e86 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -116,6 +116,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( } +int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length); struct nvdimm_bus *__nvdimm_bus_register(struct device *parent, struct nvdimm_bus_descriptor *nfit_desc, struct module *module); #define nvdimm_bus_register(parent, desc) \ diff --git a/include/linux/list.h b/include/linux/list.h index 993395a2e55c..5356f4d661a7 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -24,7 +24,7 @@ static inline void INIT_LIST_HEAD(struct list_head *list) { - list->next = list; + WRITE_ONCE(list->next, list); list->prev = list; } @@ -42,7 +42,7 @@ static inline void __list_add(struct list_head *new, next->prev = new; new->next = next; new->prev = prev; - prev->next = new; + WRITE_ONCE(prev->next, new); } #else extern void __list_add(struct list_head *new, @@ -186,7 +186,7 @@ static inline int list_is_last(const struct list_head *list, */ static inline int list_empty(const struct list_head *head) { - return head->next == head; + return READ_ONCE(head->next) == head; } /** @@ -608,7 +608,7 @@ static inline int hlist_unhashed(const struct hlist_node *h) static inline int hlist_empty(const struct hlist_head *h) { - return !h->first; + return !READ_ONCE(h->first); } static inline void __hlist_del(struct hlist_node *n) @@ -642,7 +642,7 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) n->next = first; if (first) first->pprev = &n->next; - h->first = n; + WRITE_ONCE(h->first, n); n->pprev = &h->first; } @@ -653,14 +653,14 @@ static inline void hlist_add_before(struct hlist_node *n, n->pprev = next->pprev; n->next = next; next->pprev = &n->next; - *(n->pprev) = n; + WRITE_ONCE(*(n->pprev), n); } static inline void hlist_add_behind(struct hlist_node *n, struct hlist_node *prev) { n->next = prev->next; - prev->next = n; + WRITE_ONCE(prev->next, n); n->pprev = &prev->next; if (n->next) diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index 8132214e8efd..ee7229a6c06a 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h @@ -70,7 +70,7 @@ static inline void hlist_bl_set_first(struct hlist_bl_head *h, static inline int hlist_bl_empty(const struct hlist_bl_head *h) { - return !((unsigned long)h->first & ~LIST_BL_LOCKMASK); + return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK); } static inline void hlist_bl_add_head(struct hlist_bl_node *n, diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h index 444d2b1313bd..b01fe1009084 100644 --- a/include/linux/list_nulls.h +++ b/include/linux/list_nulls.h @@ -57,7 +57,7 @@ static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h) static inline int hlist_nulls_empty(const struct hlist_nulls_head *h) { - return is_a_nulls(h->first); + return is_a_nulls(READ_ONCE(h->first)); } static inline void hlist_nulls_add_head(struct hlist_nulls_node *n, diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 31db7a05dd36..a8828652f794 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -37,8 +37,9 @@ enum klp_state { * struct klp_func - function structure for live patching * @old_name: name of the function to be patched * @new_func: pointer to the patched function code - * @old_addr: a hint conveying at what address the old function - * can be found (optional, vmlinux patches only) + * @old_sympos: a hint indicating which symbol position the old function + * can be found (optional) + * @old_addr: the address of the function being patched * @kobj: kobject for sysfs resources * @state: tracks function-level patch application state * @stack_node: list node for klp_ops func_stack list @@ -48,16 +49,16 @@ struct klp_func { const char *old_name; void *new_func; /* - * The old_addr field is optional and can be used to resolve - * duplicate symbol names in the vmlinux object. If this - * information is not present, the symbol is located by name - * with kallsyms. If the name is not unique and old_addr is - * not provided, the patch application fails as there is no - * way to resolve the ambiguity. + * The old_sympos field is optional and can be used to resolve + * duplicate symbol names in livepatch objects. If this field is zero, + * it is expected the symbol is unique, otherwise patching fails. If + * this value is greater than zero then that occurrence of the symbol + * in kallsyms for the given object is used. */ - unsigned long old_addr; + unsigned long old_sympos; /* internal */ + unsigned long old_addr; struct kobject kobj; enum klp_state state; struct list_head stack_node; @@ -66,8 +67,7 @@ struct klp_func { /** * struct klp_reloc - relocation structure for live patching * @loc: address where the relocation will be written - * @val: address of the referenced symbol (optional, - * vmlinux patches only) + * @sympos: position in kallsyms to disambiguate symbols (optional) * @type: ELF relocation type * @name: name of the referenced symbol (for lookup/verification) * @addend: offset from the referenced symbol @@ -75,7 +75,7 @@ struct klp_func { */ struct klp_reloc { unsigned long loc; - unsigned long val; + unsigned long sympos; unsigned long type; const char *name; int addend; diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index 4d24d64578c4..140edab64446 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h @@ -29,7 +29,7 @@ struct nlmsvc_binding { void (*fclose)(struct file *); }; -extern struct nlmsvc_binding * nlmsvc_ops; +extern const struct nlmsvc_binding *nlmsvc_ops; /* * Similar to nfs_client_initdata, but without the NFS-specific diff --git a/include/linux/mdio.h b/include/linux/mdio.h index b42963bc81dd..5bfd99d1a40a 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -11,6 +11,55 @@ #include <uapi/linux/mdio.h> +struct mii_bus; + +struct mdio_device { + struct device dev; + + const struct dev_pm_ops *pm_ops; + struct mii_bus *bus; + + int (*bus_match)(struct device *dev, struct device_driver *drv); + void (*device_free)(struct mdio_device *mdiodev); + void (*device_remove)(struct mdio_device *mdiodev); + + /* Bus address of the MDIO device (0-31) */ + int addr; + int flags; +}; +#define to_mdio_device(d) container_of(d, struct mdio_device, dev) + +/* struct mdio_driver_common: Common to all MDIO drivers */ +struct mdio_driver_common { + struct device_driver driver; + int flags; +}; +#define MDIO_DEVICE_FLAG_PHY 1 +#define to_mdio_common_driver(d) \ + container_of(d, struct mdio_driver_common, driver) + +/* struct mdio_driver: Generic MDIO driver */ +struct mdio_driver { + struct mdio_driver_common mdiodrv; + + /* + * Called during discovery. Used to set + * up device-specific structures, if any + */ + int (*probe)(struct mdio_device *mdiodev); + + /* Clears up any memory if needed */ + void (*remove)(struct mdio_device *mdiodev); +}; +#define to_mdio_driver(d) \ + container_of(to_mdio_common_driver(d), struct mdio_driver, mdiodrv) + +void mdio_device_free(struct mdio_device *mdiodev); +struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr); +int mdio_device_register(struct mdio_device *mdiodev); +void mdio_device_remove(struct mdio_device *mdiodev); +int mdio_driver_register(struct mdio_driver *drv); +void mdio_driver_unregister(struct mdio_driver *drv); static inline bool mdio_phy_id_is_c45(int phy_id) { @@ -173,4 +222,33 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv) return reg; } +int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); +int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); +int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); +int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val); + +int mdiobus_register_device(struct mdio_device *mdiodev); +int mdiobus_unregister_device(struct mdio_device *mdiodev); +bool mdiobus_is_registered_device(struct mii_bus *bus, int addr); +struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr); + +/** + * module_mdio_driver() - Helper macro for registering mdio drivers + * + * Helper macro for MDIO drivers which do not do anything special in module + * init/exit. Each module may only use this macro once, and calling it + * replaces module_init() and module_exit(). + */ +#define mdio_module_driver(_mdio_driver) \ +static int __init mdio_module_init(void) \ +{ \ + return mdio_driver_register(&_mdio_driver); \ +} \ +module_init(mdio_module_init); \ +static void __exit mdio_module_exit(void) \ +{ \ + mdio_driver_unregister(&_mdio_driver); \ +} \ +module_exit(mdio_module_exit) + #endif /* __LINUX_MDIO_H__ */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 24daf8fc4d7c..173fb44e22f1 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -25,6 +25,7 @@ enum { MEMBLOCK_NONE = 0x0, /* No special request */ MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ MEMBLOCK_MIRROR = 0x2, /* mirrored region */ + MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ }; struct memblock_region { @@ -82,6 +83,7 @@ bool memblock_overlaps_region(struct memblock_type *type, int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); +int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); ulong choose_memblock_flags(void); /* Low level functions */ @@ -184,6 +186,11 @@ static inline bool memblock_is_mirror(struct memblock_region *m) return m->flags & MEMBLOCK_MIRROR; } +static inline bool memblock_is_nomap(struct memblock_region *m) +{ + return m->flags & MEMBLOCK_NOMAP; +} + #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn); @@ -209,10 +216,10 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, * for_each_free_mem_range - iterate through free memblock areas * @i: u64 used as loop variable * @nid: node selector, %NUMA_NO_NODE for all nodes + * @flags: pick from blocks based on memory attributes * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL - * @flags: pick from blocks based on memory attributes * * Walks over free (memory && !reserved) areas of memblock. Available as * soon as memblock is initialized. @@ -225,10 +232,10 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, * for_each_free_mem_range_reverse - rev-iterate through free memblock areas * @i: u64 used as loop variable * @nid: node selector, %NUMA_NO_NODE for all nodes + * @flags: pick from blocks based on memory attributes * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_nid: ptr to int for nid of the range, can be %NULL - * @flags: pick from blocks based on memory attributes * * Walks over free (memory && !reserved) areas of memblock in reverse * order. Available as soon as memblock is initialized. @@ -318,9 +325,10 @@ phys_addr_t memblock_mem_size(unsigned long limit_pfn); phys_addr_t memblock_start_of_DRAM(void); phys_addr_t memblock_end_of_DRAM(void); void memblock_enforce_memory_limit(phys_addr_t memory_limit); -int memblock_is_memory(phys_addr_t addr); +bool memblock_is_memory(phys_addr_t addr); +int memblock_is_map_memory(phys_addr_t addr); int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); -int memblock_is_reserved(phys_addr_t addr); +bool memblock_is_reserved(phys_addr_t addr); bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); extern void __memblock_dump_all(void); @@ -391,6 +399,11 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ region++) +#define for_each_memblock_type(memblock_type, rgn) \ + idx = 0; \ + rgn = &memblock_type->regions[idx]; \ + for (idx = 0; idx < memblock_type->cnt; \ + idx++,rgn = &memblock_type->regions[idx]) #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK #define __init_memblock __meminit diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index cd0e2413c358..2292468f2a30 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -85,32 +85,10 @@ enum mem_cgroup_events_target { MEM_CGROUP_NTARGETS, }; -/* - * Bits in struct cg_proto.flags - */ -enum cg_proto_flags { - /* Currently active and new sockets should be assigned to cgroups */ - MEMCG_SOCK_ACTIVE, - /* It was ever activated; we must disarm static keys on destruction */ - MEMCG_SOCK_ACTIVATED, -}; - struct cg_proto { struct page_counter memory_allocated; /* Current allocated memory. */ - struct percpu_counter sockets_allocated; /* Current number of sockets. */ int memory_pressure; - long sysctl_mem[3]; - unsigned long flags; - /* - * memcg field is used to find which memcg we belong directly - * Each memcg struct can hold more than one cg_proto, so container_of - * won't really cut. - * - * The elegant solution would be having an inverse function to - * proto_cgroup in struct proto, but that means polluting the structure - * for everybody, instead of just for memcg users. - */ - struct mem_cgroup *memcg; + bool active; }; #ifdef CONFIG_MEMCG @@ -192,6 +170,9 @@ struct mem_cgroup { unsigned long low; unsigned long high; + /* Range enforcement for interrupt charges */ + struct work_struct high_work; + unsigned long soft_limit; /* vmpressure notifications */ @@ -268,6 +249,10 @@ struct mem_cgroup { struct wb_domain cgwb_domain; #endif +#ifdef CONFIG_INET + unsigned long socket_pressure; +#endif + /* List of events which userspace want to receive */ struct list_head event_list; spinlock_t event_list_lock; @@ -275,7 +260,8 @@ struct mem_cgroup { struct mem_cgroup_per_node *nodeinfo[0]; /* WARNING: nodeinfo must be the last member here */ }; -extern struct cgroup_subsys_state *mem_cgroup_root_css; + +extern struct mem_cgroup *root_mem_cgroup; /** * mem_cgroup_events - count memory events against a cgroup @@ -308,18 +294,34 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); -struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); static inline struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ return css ? container_of(css, struct mem_cgroup, css) : NULL; } +#define mem_cgroup_from_counter(counter, member) \ + container_of(counter, struct mem_cgroup, member) + struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, struct mem_cgroup *, struct mem_cgroup_reclaim_cookie *); void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); +/** + * parent_mem_cgroup - find the accounting parent of a memcg + * @memcg: memcg whose parent to find + * + * Returns the parent memcg, or NULL if this is the root or the memory + * controller is in legacy no-hierarchy mode. + */ +static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) +{ + if (!memcg->memory.parent) + return NULL; + return mem_cgroup_from_counter(memcg->memory.parent, memory); +} + static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root) { @@ -671,12 +673,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) } #endif /* CONFIG_MEMCG */ -enum { - UNDER_LIMIT, - SOFT_LIMIT, - OVER_LIMIT, -}; - #ifdef CONFIG_CGROUP_WRITEBACK struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); @@ -703,20 +699,35 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, #endif /* CONFIG_CGROUP_WRITEBACK */ struct sock; -#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) void sock_update_memcg(struct sock *sk); void sock_release_memcg(struct sock *sk); -#else -static inline void sock_update_memcg(struct sock *sk) +bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); +void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); +#if defined(CONFIG_MEMCG) && defined(CONFIG_INET) +extern struct static_key_false memcg_sockets_enabled_key; +#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) +static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { +#ifdef CONFIG_MEMCG_KMEM + if (memcg->tcp_mem.memory_pressure) + return true; +#endif + do { + if (time_before(jiffies, memcg->socket_pressure)) + return true; + } while ((memcg = parent_mem_cgroup(memcg))); + return false; } -static inline void sock_release_memcg(struct sock *sk) +#else +#define mem_cgroup_sockets_enabled 0 +static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { + return false; } -#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ +#endif #ifdef CONFIG_MEMCG_KMEM -extern struct static_key memcg_kmem_enabled_key; +extern struct static_key_false memcg_kmem_enabled_key; extern int memcg_nr_cache_ids; void memcg_get_cache_ids(void); @@ -732,7 +743,7 @@ void memcg_put_cache_ids(void); static inline bool memcg_kmem_enabled(void) { - return static_key_false(&memcg_kmem_enabled_key); + return static_branch_unlikely(&memcg_kmem_enabled_key); } static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) @@ -766,15 +777,13 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return memcg ? memcg->kmemcg_id : -1; } -struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); +struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); void __memcg_kmem_put_cache(struct kmem_cache *cachep); -static inline bool __memcg_kmem_bypass(gfp_t gfp) +static inline bool __memcg_kmem_bypass(void) { if (!memcg_kmem_enabled()) return true; - if (gfp & __GFP_NOACCOUNT) - return true; if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) return true; return false; @@ -791,7 +800,9 @@ static inline bool __memcg_kmem_bypass(gfp_t gfp) static __always_inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) { - if (__memcg_kmem_bypass(gfp)) + if (__memcg_kmem_bypass()) + return 0; + if (!(gfp & __GFP_ACCOUNT)) return 0; return __memcg_kmem_charge(page, gfp, order); } @@ -810,16 +821,15 @@ static __always_inline void memcg_kmem_uncharge(struct page *page, int order) /** * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation * @cachep: the original global kmem cache - * @gfp: allocation flags. * * All memory allocated from a per-memcg cache is charged to the owner memcg. */ static __always_inline struct kmem_cache * memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { - if (__memcg_kmem_bypass(gfp)) + if (__memcg_kmem_bypass()) return cachep; - return __memcg_kmem_get_cache(cachep); + return __memcg_kmem_get_cache(cachep, gfp); } static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 3d385c81c153..2696c1f05ed1 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -122,7 +122,7 @@ struct sp_node { struct shared_policy { struct rb_root root; - spinlock_t lock; + rwlock_t lock; }; int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h index 79e607e2f081..d55a42297d49 100644 --- a/include/linux/mfd/arizona/core.h +++ b/include/linux/mfd/arizona/core.h @@ -27,6 +27,8 @@ enum arizona_type { WM8280 = 4, WM8998 = 5, WM1814 = 6, + WM1831 = 7, + CS47L24 = 8, }; #define ARIZONA_IRQ_GP1 0 @@ -166,6 +168,7 @@ static inline int wm5102_patch(struct arizona *arizona) #endif int wm5110_patch(struct arizona *arizona); +int cs47l24_patch(struct arizona *arizona); int wm8997_patch(struct arizona *arizona); int wm8998_patch(struct arizona *arizona); diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h index 57b45caaea80..64faeeff698c 100644 --- a/include/linux/mfd/arizona/pdata.h +++ b/include/linux/mfd/arizona/pdata.h @@ -171,7 +171,7 @@ struct arizona_pdata { int inmode[ARIZONA_MAX_INPUT]; /** Mode for outputs */ - bool out_mono[ARIZONA_MAX_OUTPUT]; + int out_mono[ARIZONA_MAX_OUTPUT]; /** PDM speaker mute setting */ unsigned int spk_mute[ARIZONA_MAX_PDM_SPK]; diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 27dac3ff18b9..bc6f7e00fb3d 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -17,6 +17,7 @@ #include <linux/platform_device.h> struct irq_domain; +struct property_set; /* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */ struct mfd_cell_acpi_match { @@ -44,6 +45,10 @@ struct mfd_cell { /* platform data passed to the sub devices drivers */ void *platform_data; size_t pdata_size; + + /* device properties passed to the sub devices drivers */ + const struct property_set *pset; + /* * Device Tree compatible string * See: Documentation/devicetree/usage-model.txt Chapter 2.2 for details diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index 13e1d96935ed..c800dbc42079 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -134,21 +134,32 @@ struct palmas_pmic_driver_data { struct regulator_config config); }; +struct palmas_adc_wakeup_property { + int adc_channel_number; + int adc_high_threshold; + int adc_low_threshold; +}; + struct palmas_gpadc_platform_data { /* Channel 3 current source is only enabled during conversion */ - int ch3_current; + int ch3_current; /* 0: off; 1: 10uA; 2: 400uA; 3: 800 uA */ /* Channel 0 current source can be used for battery detection. * If used for battery detection this will cause a permanent current * consumption depending on current level set here. */ - int ch0_current; + int ch0_current; /* 0: off; 1: 5uA; 2: 15uA; 3: 20 uA */ + bool extended_delay; /* use extended delay for conversion */ /* default BAT_REMOVAL_DAT setting on device probe */ int bat_removal; /* Sets the START_POLARITY bit in the RT_CTRL register */ int start_polarity; + + int auto_conversion_period_ms; + struct palmas_adc_wakeup_property *adc_wakeup1_data; + struct palmas_adc_wakeup_property *adc_wakeup2_data; }; struct palmas_reg_init { @@ -405,28 +416,7 @@ struct palmas_gpadc_calibration { s32 offset_error; }; -struct palmas_gpadc { - struct device *dev; - struct palmas *palmas; - - int ch3_current; - int ch0_current; - - int gpadc_force; - - int bat_removal; - - struct mutex reading_lock; - struct completion irq_complete; - - int eoc_sw_irq; - - struct palmas_gpadc_calibration *palmas_cal_tbl; - - int conv0_channel; - int conv1_channel; - int rt_channel; -}; +#define PALMAS_DATASHEET_NAME(_name) "palmas-gpadc-chan-"#_name struct palmas_gpadc_result { s32 raw_code; @@ -520,6 +510,43 @@ enum palmas_irqs { PALMAS_NUM_IRQ, }; +/* Palmas GPADC Channels */ +enum { + PALMAS_ADC_CH_IN0, + PALMAS_ADC_CH_IN1, + PALMAS_ADC_CH_IN2, + PALMAS_ADC_CH_IN3, + PALMAS_ADC_CH_IN4, + PALMAS_ADC_CH_IN5, + PALMAS_ADC_CH_IN6, + PALMAS_ADC_CH_IN7, + PALMAS_ADC_CH_IN8, + PALMAS_ADC_CH_IN9, + PALMAS_ADC_CH_IN10, + PALMAS_ADC_CH_IN11, + PALMAS_ADC_CH_IN12, + PALMAS_ADC_CH_IN13, + PALMAS_ADC_CH_IN14, + PALMAS_ADC_CH_IN15, + PALMAS_ADC_CH_MAX, +}; + +/* Palmas GPADC Channel0 Current Source */ +enum { + PALMAS_ADC_CH0_CURRENT_SRC_0, + PALMAS_ADC_CH0_CURRENT_SRC_5, + PALMAS_ADC_CH0_CURRENT_SRC_15, + PALMAS_ADC_CH0_CURRENT_SRC_20, +}; + +/* Palmas GPADC Channel3 Current Source */ +enum { + PALMAS_ADC_CH3_CURRENT_SRC_0, + PALMAS_ADC_CH3_CURRENT_SRC_10, + PALMAS_ADC_CH3_CURRENT_SRC_400, + PALMAS_ADC_CH3_CURRENT_SRC_800, +}; + struct palmas_pmic { struct palmas *palmas; struct device *dev; diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index a06098639399..6bc4bcd488ac 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -44,6 +44,7 @@ enum sec_device_type { S2MPS11X, S2MPS13X, S2MPS14X, + S2MPS15X, S2MPU02, }; diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h index 29c30ac36020..a65e4655d470 100644 --- a/include/linux/mfd/samsung/rtc.h +++ b/include/linux/mfd/samsung/rtc.h @@ -107,6 +107,8 @@ enum s2mps_rtc_reg { #define S2MPS_RTC_WUDR_MASK (1 << S2MPS_RTC_WUDR_SHIFT) #define S2MPS13_RTC_AUDR_SHIFT 1 #define S2MPS13_RTC_AUDR_MASK (1 << S2MPS13_RTC_AUDR_SHIFT) +#define S2MPS15_RTC_WUDR_SHIFT 1 +#define S2MPS15_RTC_WUDR_MASK (1 << S2MPS15_RTC_WUDR_SHIFT) #define S2MPS_RTC_RUDR_SHIFT 0 #define S2MPS_RTC_RUDR_MASK (1 << S2MPS_RTC_RUDR_SHIFT) #define RTC_TCON_SHIFT 1 diff --git a/include/linux/mfd/samsung/s2mps15.h b/include/linux/mfd/samsung/s2mps15.h new file mode 100644 index 000000000000..36d35287c3c0 --- /dev/null +++ b/include/linux/mfd/samsung/s2mps15.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2015 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_MFD_S2MPS15_H +#define __LINUX_MFD_S2MPS15_H + +/* S2MPS15 registers */ +enum s2mps15_reg { + S2MPS15_REG_ID, + S2MPS15_REG_INT1, + S2MPS15_REG_INT2, + S2MPS15_REG_INT3, + S2MPS15_REG_INT1M, + S2MPS15_REG_INT2M, + S2MPS15_REG_INT3M, + S2MPS15_REG_ST1, + S2MPS15_REG_ST2, + S2MPS15_REG_PWRONSRC, + S2MPS15_REG_OFFSRC, + S2MPS15_REG_BU_CHG, + S2MPS15_REG_RTC_BUF, + S2MPS15_REG_CTRL1, + S2MPS15_REG_CTRL2, + S2MPS15_REG_RSVD1, + S2MPS15_REG_RSVD2, + S2MPS15_REG_RSVD3, + S2MPS15_REG_RSVD4, + S2MPS15_REG_RSVD5, + S2MPS15_REG_RSVD6, + S2MPS15_REG_CTRL3, + S2MPS15_REG_RSVD7, + S2MPS15_REG_RSVD8, + S2MPS15_REG_RSVD9, + S2MPS15_REG_B1CTRL1, + S2MPS15_REG_B1CTRL2, + S2MPS15_REG_B2CTRL1, + S2MPS15_REG_B2CTRL2, + S2MPS15_REG_B3CTRL1, + S2MPS15_REG_B3CTRL2, + S2MPS15_REG_B4CTRL1, + S2MPS15_REG_B4CTRL2, + S2MPS15_REG_B5CTRL1, + S2MPS15_REG_B5CTRL2, + S2MPS15_REG_B6CTRL1, + S2MPS15_REG_B6CTRL2, + S2MPS15_REG_B7CTRL1, + S2MPS15_REG_B7CTRL2, + S2MPS15_REG_B8CTRL1, + S2MPS15_REG_B8CTRL2, + S2MPS15_REG_B9CTRL1, + S2MPS15_REG_B9CTRL2, + S2MPS15_REG_B10CTRL1, + S2MPS15_REG_B10CTRL2, + S2MPS15_REG_BBCTRL1, + S2MPS15_REG_BBCTRL2, + S2MPS15_REG_BRAMP, + S2MPS15_REG_LDODVS1, + S2MPS15_REG_LDODVS2, + S2MPS15_REG_LDODVS3, + S2MPS15_REG_LDODVS4, + S2MPS15_REG_L1CTRL, + S2MPS15_REG_L2CTRL, + S2MPS15_REG_L3CTRL, + S2MPS15_REG_L4CTRL, + S2MPS15_REG_L5CTRL, + S2MPS15_REG_L6CTRL, + S2MPS15_REG_L7CTRL, + S2MPS15_REG_L8CTRL, + S2MPS15_REG_L9CTRL, + S2MPS15_REG_L10CTRL, + S2MPS15_REG_L11CTRL, + S2MPS15_REG_L12CTRL, + S2MPS15_REG_L13CTRL, + S2MPS15_REG_L14CTRL, + S2MPS15_REG_L15CTRL, + S2MPS15_REG_L16CTRL, + S2MPS15_REG_L17CTRL, + S2MPS15_REG_L18CTRL, + S2MPS15_REG_L19CTRL, + S2MPS15_REG_L20CTRL, + S2MPS15_REG_L21CTRL, + S2MPS15_REG_L22CTRL, + S2MPS15_REG_L23CTRL, + S2MPS15_REG_L24CTRL, + S2MPS15_REG_L25CTRL, + S2MPS15_REG_L26CTRL, + S2MPS15_REG_L27CTRL, + S2MPS15_REG_LDODSCH1, + S2MPS15_REG_LDODSCH2, + S2MPS15_REG_LDODSCH3, + S2MPS15_REG_LDODSCH4, +}; + +/* S2MPS15 regulator ids */ +enum s2mps15_regulators { + S2MPS15_LDO1, + S2MPS15_LDO2, + S2MPS15_LDO3, + S2MPS15_LDO4, + S2MPS15_LDO5, + S2MPS15_LDO6, + S2MPS15_LDO7, + S2MPS15_LDO8, + S2MPS15_LDO9, + S2MPS15_LDO10, + S2MPS15_LDO11, + S2MPS15_LDO12, + S2MPS15_LDO13, + S2MPS15_LDO14, + S2MPS15_LDO15, + S2MPS15_LDO16, + S2MPS15_LDO17, + S2MPS15_LDO18, + S2MPS15_LDO19, + S2MPS15_LDO20, + S2MPS15_LDO21, + S2MPS15_LDO22, + S2MPS15_LDO23, + S2MPS15_LDO24, + S2MPS15_LDO25, + S2MPS15_LDO26, + S2MPS15_LDO27, + S2MPS15_BUCK1, + S2MPS15_BUCK2, + S2MPS15_BUCK3, + S2MPS15_BUCK4, + S2MPS15_BUCK5, + S2MPS15_BUCK6, + S2MPS15_BUCK7, + S2MPS15_BUCK8, + S2MPS15_BUCK9, + S2MPS15_BUCK10, + S2MPS15_BUCK11, + S2MPS15_REGULATOR_MAX, +}; + +#define S2MPS15_LDO_VSEL_MASK (0x3F) +#define S2MPS15_BUCK_VSEL_MASK (0xFF) + +#define S2MPS15_ENABLE_SHIFT (0x06) +#define S2MPS15_ENABLE_MASK (0x03 << S2MPS15_ENABLE_SHIFT) + +#define S2MPS15_LDO_N_VOLTAGES (S2MPS15_LDO_VSEL_MASK + 1) +#define S2MPS15_BUCK_N_VOLTAGES (S2MPS15_BUCK_VSEL_MASK + 1) + +#endif /* __LINUX_MFD_S2MPS15_H */ diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index 2f9b593246ee..d58f3b5f585a 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h @@ -200,6 +200,8 @@ enum tps65218_regulator_id { TPS65218_DCDC_4, TPS65218_DCDC_5, TPS65218_DCDC_6, + /* LS's */ + TPS65218_LS_3, /* LDOs */ TPS65218_LDO_1, }; @@ -210,8 +212,11 @@ enum tps65218_regulator_id { #define TPS65218_NUM_DCDC 6 /* Number of LDO voltage regulators available */ #define TPS65218_NUM_LDO 1 +/* Number of total LS current regulators available */ +#define TPS65218_NUM_LS 1 /* Number of total regulators available */ -#define TPS65218_NUM_REGULATOR (TPS65218_NUM_DCDC + TPS65218_NUM_LDO) +#define TPS65218_NUM_REGULATOR (TPS65218_NUM_DCDC + TPS65218_NUM_LDO \ + + TPS65218_NUM_LS) /* Define the TPS65218 IRQ numbers */ enum tps65218_irqs { diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h index 579b50ca2e02..7a09e7f1f984 100644 --- a/include/linux/mfd/wm8350/pmic.h +++ b/include/linux/mfd/wm8350/pmic.h @@ -715,7 +715,6 @@ struct wm8350_led_platform_data { struct wm8350_led { struct platform_device *pdev; - struct mutex mutex; struct work_struct work; spinlock_t value_lock; enum led_brightness value; diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 5a06d969338e..2e8af001c5da 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -75,6 +75,11 @@ static inline int mlx4_is_bonded(struct mlx4_dev *dev) return !!(dev->flags & MLX4_FLAG_BONDED); } +static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev) +{ + return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev)); +} + struct mlx4_port_map { u8 port1; u8 port2; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 0b473cbfa7ef..7be845e30689 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -251,6 +251,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, + MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, }; enum { @@ -442,9 +443,12 @@ struct mlx5_init_seg { __be32 rsvd1[120]; __be32 initializing; struct health_buffer health; - __be32 rsvd2[884]; + __be32 rsvd2[880]; + __be32 internal_timer_h; + __be32 internal_timer_l; + __be32 rsrv3[2]; __be32 health_counter; - __be32 rsvd3[1019]; + __be32 rsvd4[1019]; __be64 ieee1588_clk; __be32 ieee1588_clk_type; __be32 clr_intx; @@ -520,6 +524,12 @@ struct mlx5_eqe_page_fault { __be32 flags_qpn; } __packed; +struct mlx5_eqe_vport_change { + u8 rsvd0[2]; + __be16 vport_num; + __be32 rsvd1[6]; +} __packed; + union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; @@ -532,6 +542,7 @@ union ev_data { struct mlx5_eqe_stall_vl stall_vl; struct mlx5_eqe_page_req req_pages; struct mlx5_eqe_page_fault page_fault; + struct mlx5_eqe_vport_change vport_change; } __packed; struct mlx5_eqe { @@ -593,7 +604,8 @@ struct mlx5_cqe64 { __be32 imm_inval_pkey; u8 rsvd40[4]; __be32 byte_cnt; - __be64 timestamp; + __be32 timestamp_h; + __be32 timestamp_l; __be32 sop_drop_qpn; __be16 wqe_counter; u8 signature; @@ -615,6 +627,16 @@ static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) return !!(cqe->l4_hdr_type_etc & 0x1); } +static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) +{ + u32 hi, lo; + + hi = be32_to_cpu(cqe->timestamp_h); + lo = be32_to_cpu(cqe->timestamp_l); + + return (u64)lo | ((u64)hi << 32); +} + enum { CQE_L4_HDR_TYPE_NONE = 0x0, CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, @@ -1067,6 +1089,12 @@ enum { }; enum { + MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0, + MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1, + MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2, +}; + +enum { MLX5_L3_PROT_TYPE_IPV4 = 0, MLX5_L3_PROT_TYPE_IPV6 = 1, }; @@ -1102,6 +1130,12 @@ enum { MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2, }; +enum mlx5_list_type { + MLX5_NVPRT_LIST_TYPE_UC = 0x0, + MLX5_NVPRT_LIST_TYPE_MC = 0x1, + MLX5_NVPRT_LIST_TYPE_VLAN = 0x2, +}; + enum { MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, @@ -1124,6 +1158,8 @@ enum mlx5_cap_type { MLX5_CAP_IPOIB_OFFLOADS, MLX5_CAP_EOIB_OFFLOADS, MLX5_CAP_FLOW_TABLE, + MLX5_CAP_ESWITCH_FLOW_TABLE, + MLX5_CAP_ESWITCH, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1161,6 +1197,28 @@ enum mlx5_cap_type { #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) +#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ + MLX5_GET(flow_table_eswitch_cap, \ + mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + +#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ + MLX5_GET(flow_table_eswitch_cap, \ + mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + +#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) + +#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ + MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) + +#define MLX5_CAP_ESW(mdev, cap) \ + MLX5_GET(e_switch_cap, \ + mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap) + +#define MLX5_CAP_ESW_MAX(mdev, cap) \ + MLX5_GET(e_switch_cap, \ + mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap) + #define MLX5_CAP_ODP(mdev, cap)\ MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) @@ -1200,4 +1258,6 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; } +#define MLX5_BY_PASS_NUM_PRIOS 9 + #endif /* MLX5_DEVICE_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 5c857f2a20d7..2fd7019f69db 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -426,11 +426,23 @@ struct mlx5_mr_table { struct radix_tree_root tree; }; +struct mlx5_vf_context { + int enabled; +}; + +struct mlx5_core_sriov { + struct mlx5_vf_context *vfs_ctx; + int num_vfs; + int enabled_vfs; +}; + struct mlx5_irq_info { cpumask_var_t mask; char name[MLX5_MAX_IRQ_NAME]; }; +struct mlx5_eswitch; + struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table eq_table; @@ -447,6 +459,7 @@ struct mlx5_priv { int fw_pages; atomic_t reg_pages; struct list_head free_list; + int vfs_pages; struct mlx5_core_health health; @@ -485,6 +498,12 @@ struct mlx5_priv { struct list_head dev_list; struct list_head ctx_list; spinlock_t ctx_lock; + + struct mlx5_eswitch *eswitch; + struct mlx5_core_sriov sriov; + unsigned long pci_dev_data; + struct mlx5_flow_root_namespace *root_ns; + struct mlx5_flow_root_namespace *fdb_root_ns; }; enum mlx5_device_state { @@ -739,6 +758,8 @@ void mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); int mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); +int mlx5_sriov_init(struct mlx5_core_dev *dev); +int mlx5_sriov_cleanup(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); @@ -884,6 +905,15 @@ struct mlx5_profile { } mr_cache[MAX_MR_CACHE_ENTRIES]; }; +enum { + MLX5_PCI_DEV_IS_VF = 1 << 0, +}; + +static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) +{ + return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); +} + static inline int mlx5_get_gid_table_len(u16 param) { if (param > 4) { diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h deleted file mode 100644 index 5f922c6d4fc2..000000000000 --- a/include/linux/mlx5/flow_table.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef MLX5_FLOW_TABLE_H -#define MLX5_FLOW_TABLE_H - -#include <linux/mlx5/driver.h> - -struct mlx5_flow_table_group { - u8 log_sz; - u8 match_criteria_enable; - u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; -}; - -void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type, - u16 num_groups, - struct mlx5_flow_table_group *group); -void mlx5_destroy_flow_table(void *flow_table); -int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable, - void *match_criteria, void *flow_context, - u32 *flow_index); -void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index); -u32 mlx5_get_flow_table_id(void *flow_table); - -#endif /* MLX5_FLOW_TABLE_H */ diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h new file mode 100644 index 000000000000..8230caa3fb6e --- /dev/null +++ b/include/linux/mlx5/fs.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _MLX5_FS_ +#define _MLX5_FS_ + +#include <linux/mlx5/driver.h> +#include <linux/mlx5/mlx5_ifc.h> + +#define MLX5_FS_DEFAULT_FLOW_TAG 0x0 + +#define LEFTOVERS_RULE_NUM 2 +static inline void build_leftovers_ft_param(int *priority, + int *n_ent, + int *n_grp) +{ + *priority = 0; /* Priority of leftovers_prio-0 */ + *n_ent = LEFTOVERS_RULE_NUM; + *n_grp = LEFTOVERS_RULE_NUM; +} + +enum mlx5_flow_namespace_type { + MLX5_FLOW_NAMESPACE_BYPASS, + MLX5_FLOW_NAMESPACE_KERNEL, + MLX5_FLOW_NAMESPACE_LEFTOVERS, + MLX5_FLOW_NAMESPACE_FDB, +}; + +struct mlx5_flow_table; +struct mlx5_flow_group; +struct mlx5_flow_rule; +struct mlx5_flow_namespace; + +struct mlx5_flow_destination { + enum mlx5_flow_destination_type type; + union { + u32 tir_num; + struct mlx5_flow_table *ft; + u32 vport_num; + }; +}; + +struct mlx5_flow_namespace * +mlx5_get_flow_namespace(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type type); + +struct mlx5_flow_table * +mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries, + int max_num_groups); + +struct mlx5_flow_table * +mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + int prio, + int num_flow_table_entries); +int mlx5_destroy_flow_table(struct mlx5_flow_table *ft); + +/* inbox should be set with the following values: + * start_flow_index + * end_flow_index + * match_criteria_enable + * match_criteria + */ +struct mlx5_flow_group * +mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); +void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); + +/* Single destination per rule. + * Group ID is implied by the match criteria. + */ +struct mlx5_flow_rule * +mlx5_add_flow_rule(struct mlx5_flow_table *ft, + u8 match_criteria_enable, + u32 *match_criteria, + u32 *match_value, + u32 action, + u32 flow_tag, + struct mlx5_flow_destination *dest); +void mlx5_del_flow_rule(struct mlx5_flow_rule *fr); + +#endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 1565324eb620..68d73f82e009 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -185,6 +185,7 @@ enum { MLX5_CMD_OP_MODIFY_RQT = 0x917, MLX5_CMD_OP_DESTROY_RQT = 0x918, MLX5_CMD_OP_QUERY_RQT = 0x919, + MLX5_CMD_OP_SET_FLOW_TABLE_ROOT = 0x92f, MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930, MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931, MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932, @@ -193,7 +194,8 @@ enum { MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936, MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937, - MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938 + MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938, + MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c }; struct mlx5_ifc_flow_table_fields_supported_bits { @@ -256,25 +258,30 @@ struct mlx5_ifc_flow_table_fields_supported_bits { struct mlx5_ifc_flow_table_prop_layout_bits { u8 ft_support[0x1]; - u8 reserved_0[0x1f]; + u8 reserved_0[0x2]; + u8 flow_modify_en[0x1]; + u8 modify_root[0x1]; + u8 identified_miss_table_mode[0x1]; + u8 flow_table_modify[0x1]; + u8 reserved_1[0x19]; - u8 reserved_1[0x2]; + u8 reserved_2[0x2]; u8 log_max_ft_size[0x6]; - u8 reserved_2[0x10]; + u8 reserved_3[0x10]; u8 max_ft_level[0x8]; - u8 reserved_3[0x20]; + u8 reserved_4[0x20]; - u8 reserved_4[0x18]; + u8 reserved_5[0x18]; u8 log_max_ft_num[0x8]; - u8 reserved_5[0x18]; + u8 reserved_6[0x18]; u8 log_max_destination[0x8]; - u8 reserved_6[0x18]; + u8 reserved_7[0x18]; u8 log_max_flow[0x8]; - u8 reserved_7[0x40]; + u8 reserved_8[0x40]; struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; @@ -291,6 +298,22 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits { u8 reserved_1[0x1a]; }; +struct mlx5_ifc_ipv4_layout_bits { + u8 reserved_0[0x60]; + + u8 ipv4[0x20]; +}; + +struct mlx5_ifc_ipv6_layout_bits { + u8 ipv6[16][0x8]; +}; + +union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { + struct mlx5_ifc_ipv6_layout_bits ipv6_layout; + struct mlx5_ifc_ipv4_layout_bits ipv4_layout; + u8 reserved_0[0x80]; +}; + struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 smac_47_16[0x20]; @@ -321,9 +344,9 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 udp_sport[0x10]; u8 udp_dport[0x10]; - u8 src_ip[4][0x20]; + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; - u8 dst_ip[4][0x20]; + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; }; struct mlx5_ifc_fte_match_set_misc_bits { @@ -447,6 +470,29 @@ struct mlx5_ifc_flow_table_nic_cap_bits { u8 reserved_3[0x7200]; }; +struct mlx5_ifc_flow_table_eswitch_cap_bits { + u8 reserved_0[0x200]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_ingress; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; + + u8 reserved_1[0x7800]; +}; + +struct mlx5_ifc_e_switch_cap_bits { + u8 vport_svlan_strip[0x1]; + u8 vport_cvlan_strip[0x1]; + u8 vport_svlan_insert[0x1]; + u8 vport_cvlan_insert_if_not_exist[0x1]; + u8 vport_cvlan_insert_overwrite[0x1]; + u8 reserved_0[0x1b]; + + u8 reserved_1[0x7e0]; +}; + struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 csum_cap[0x1]; u8 vlan_cap[0x1]; @@ -665,7 +711,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_17[0x1]; u8 ets[0x1]; u8 nic_flow_table[0x1]; - u8 reserved_18[0x4]; + u8 eswitch_flow_table[0x1]; + u8 early_vf_enable; + u8 reserved_18[0x2]; u8 local_ca_ack_delay[0x5]; u8 reserved_19[0x6]; u8 port_type[0x2]; @@ -787,27 +835,36 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_60[0x1b]; u8 log_max_wq_sz[0x5]; - u8 reserved_61[0xa0]; - + u8 nic_vport_change_event[0x1]; + u8 reserved_61[0xa]; + u8 log_max_vlan_list[0x5]; u8 reserved_62[0x3]; + u8 log_max_current_mc_list[0x5]; + u8 reserved_63[0x3]; + u8 log_max_current_uc_list[0x5]; + + u8 reserved_64[0x80]; + + u8 reserved_65[0x3]; u8 log_max_l2_table[0x5]; - u8 reserved_63[0x8]; + u8 reserved_66[0x8]; u8 log_uar_page_sz[0x10]; - u8 reserved_64[0x100]; - - u8 reserved_65[0x1f]; + u8 reserved_67[0x40]; + u8 device_frequency_khz[0x20]; + u8 reserved_68[0x5f]; u8 cqe_zip[0x1]; u8 cqe_zip_timeout[0x10]; u8 cqe_zip_max_num[0x10]; - u8 reserved_66[0x220]; + u8 reserved_69[0x220]; }; -enum { - MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_ = 0x1, - MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR = 0x2, +enum mlx5_flow_destination_type { + MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0, + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, + MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, }; struct mlx5_ifc_dest_format_struct_bits { @@ -900,6 +957,13 @@ struct mlx5_ifc_mac_address_layout_bits { u8 mac_addr_31_0[0x20]; }; +struct mlx5_ifc_vlan_layout_bits { + u8 reserved_0[0x14]; + u8 vlan[0x0c]; + + u8 reserved_1[0x20]; +}; + struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { u8 reserved_0[0xa0]; @@ -1829,6 +1893,8 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_roce_cap_bits roce_cap; struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps; struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; + struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; + struct mlx5_ifc_e_switch_cap_bits e_switch_cap; u8 reserved_0[0x8000]; }; @@ -2133,24 +2199,35 @@ struct mlx5_ifc_rmpc_bits { struct mlx5_ifc_wq_bits wq; }; -enum { - MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0, -}; - struct mlx5_ifc_nic_vport_context_bits { u8 reserved_0[0x1f]; u8 roce_en[0x1]; - u8 reserved_1[0x760]; + u8 arm_change_event[0x1]; + u8 reserved_1[0x1a]; + u8 event_on_mtu[0x1]; + u8 event_on_promisc_change[0x1]; + u8 event_on_vlan_change[0x1]; + u8 event_on_mc_address_change[0x1]; + u8 event_on_uc_address_change[0x1]; - u8 reserved_2[0x5]; + u8 reserved_2[0xf0]; + + u8 mtu[0x10]; + + u8 reserved_3[0x640]; + + u8 promisc_uc[0x1]; + u8 promisc_mc[0x1]; + u8 promisc_all[0x1]; + u8 reserved_4[0x2]; u8 allowed_list_type[0x3]; - u8 reserved_3[0xc]; + u8 reserved_5[0xc]; u8 allowed_list_size[0xc]; struct mlx5_ifc_mac_address_layout_bits permanent_address; - u8 reserved_4[0x20]; + u8 reserved_6[0x20]; u8 current_uc_mac_address[0][0x40]; }; @@ -2263,6 +2340,26 @@ struct mlx5_ifc_hca_vport_context_bits { u8 reserved_6[0xca0]; }; +struct mlx5_ifc_esw_vport_context_bits { + u8 reserved_0[0x3]; + u8 vport_svlan_strip[0x1]; + u8 vport_cvlan_strip[0x1]; + u8 vport_svlan_insert[0x1]; + u8 vport_cvlan_insert[0x2]; + u8 reserved_1[0x18]; + + u8 reserved_2[0x20]; + + u8 svlan_cfi[0x1]; + u8 svlan_pcp[0x3]; + u8 svlan_id[0xc]; + u8 cvlan_cfi[0x1]; + u8 cvlan_pcp[0x3]; + u8 cvlan_id[0xc]; + + u8 reserved_3[0x7a0]; +}; + enum { MLX5_EQC_STATUS_OK = 0x0, MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa, @@ -2769,6 +2866,13 @@ struct mlx5_ifc_set_hca_cap_in_bits { union mlx5_ifc_hca_cap_union_bits capability; }; +enum { + MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0, + MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1, + MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2, + MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3 +}; + struct mlx5_ifc_set_fte_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; @@ -2793,11 +2897,14 @@ struct mlx5_ifc_set_fte_in_bits { u8 reserved_4[0x8]; u8 table_id[0x18]; - u8 reserved_5[0x40]; + u8 reserved_5[0x18]; + u8 modify_enable_mask[0x8]; + + u8 reserved_6[0x20]; u8 flow_index[0x20]; - u8 reserved_6[0xe0]; + u8 reserved_7[0xe0]; struct mlx5_ifc_flow_context_bits flow_context; }; @@ -2940,6 +3047,7 @@ struct mlx5_ifc_query_vport_state_out_bits { enum { MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1, }; struct mlx5_ifc_query_vport_state_in_bits { @@ -3700,6 +3808,64 @@ struct mlx5_ifc_query_flow_group_in_bits { u8 reserved_5[0x120]; }; +struct mlx5_ifc_query_esw_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_esw_vport_context_bits esw_vport_context; +}; + +struct mlx5_ifc_query_esw_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_modify_esw_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_esw_vport_context_fields_select_bits { + u8 reserved[0x1c]; + u8 vport_cvlan_insert[0x1]; + u8 vport_svlan_insert[0x1]; + u8 vport_cvlan_strip[0x1]; + u8 vport_svlan_strip[0x1]; +}; + +struct mlx5_ifc_modify_esw_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + struct mlx5_ifc_esw_vport_context_fields_select_bits field_select; + + struct mlx5_ifc_esw_vport_context_bits esw_vport_context; +}; + struct mlx5_ifc_query_eq_out_bits { u8 status[0x8]; u8 reserved_0[0x18]; @@ -4228,7 +4394,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits { }; struct mlx5_ifc_modify_nic_vport_field_select_bits { - u8 reserved_0[0x1c]; + u8 reserved_0[0x19]; + u8 mtu[0x1]; + u8 change_event[0x1]; + u8 promisc[0x1]; u8 permanent_address[0x1]; u8 addresses_list[0x1]; u8 roce_en[0x1]; @@ -5519,12 +5688,16 @@ struct mlx5_ifc_create_flow_table_in_bits { u8 reserved_4[0x20]; - u8 reserved_5[0x8]; + u8 reserved_5[0x4]; + u8 table_miss_mode[0x4]; u8 level[0x8]; u8 reserved_6[0x8]; u8 log_size[0x8]; - u8 reserved_7[0x120]; + u8 reserved_7[0x8]; + u8 table_miss_id[0x18]; + + u8 reserved_8[0x100]; }; struct mlx5_ifc_create_flow_group_out_bits { @@ -6798,4 +6971,72 @@ union mlx5_ifc_uplink_pci_interface_document_bits { u8 reserved_0[0x20060]; }; +struct mlx5_ifc_set_flow_table_root_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_flow_table_root_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 reserved_5[0x140]; +}; + +enum { + MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = 0x1, +}; + +struct mlx5_ifc_modify_flow_table_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_flow_table_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x20]; + + u8 reserved_3[0x10]; + u8 modify_field_select[0x10]; + + u8 table_type[0x8]; + u8 reserved_4[0x18]; + + u8 reserved_5[0x8]; + u8 table_id[0x18]; + + u8 reserved_6[0x4]; + u8 table_miss_mode[0x4]; + u8 reserved_7[0x18]; + + u8 reserved_8[0x8]; + u8 table_miss_id[0x18]; + + u8 reserved_9[0x100]; +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 967e0fd06e89..638f2ca7a527 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -34,9 +34,17 @@ #define __MLX5_VPORT_H__ #include <linux/mlx5/driver.h> +#include <linux/mlx5/device.h> -u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod); -void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr); +u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); +u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, + u16 vport); +int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, + u16 vport, u8 state); +int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, + u16 vport, u8 *addr); +int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, + u16 vport, u8 *addr); int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, u16 vf_num, u16 gid_index, union ib_gid *gid); @@ -51,5 +59,30 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, u64 *sys_image_guid); int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid); +int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, + u32 vport, + enum mlx5_list_type list_type, + u8 addr_list[][ETH_ALEN], + int *list_size); +int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, + enum mlx5_list_type list_type, + u8 addr_list[][ETH_ALEN], + int list_size); +int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, + u32 vport, + int *promisc_uc, + int *promisc_mc, + int *promisc_all); +int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev, + int promisc_uc, + int promisc_mc, + int promisc_all); +int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, + u32 vport, + u16 vlans[], + int *size); +int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, + u16 vlans[], + int list_size); #endif /* __MLX5_VPORT_H__ */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 00bad7793788..839d9e9a1c38 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -51,6 +51,17 @@ extern int sysctl_legacy_va_layout; #define sysctl_legacy_va_layout 0 #endif +#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS +extern const int mmap_rnd_bits_min; +extern const int mmap_rnd_bits_max; +extern int mmap_rnd_bits __read_mostly; +#endif +#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS +extern const int mmap_rnd_compat_bits_min; +extern const int mmap_rnd_compat_bits_max; +extern int mmap_rnd_compat_bits __read_mostly; +#endif + #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> @@ -225,10 +236,14 @@ extern pgprot_t protection_map[16]; * ->fault function. The vma's ->fault is responsible for returning a bitmask * of VM_FAULT_xxx flags that give details about how the fault was handled. * + * MM layer fills up gfp_mask for page allocations but fault handler might + * alter it if its implementation requires a different allocation context. + * * pgoff should be used in favour of virtual_address, if possible. */ struct vm_fault { unsigned int flags; /* FAULT_FLAG_xxx flags */ + gfp_t gfp_mask; /* gfp mask to be used for allocations */ pgoff_t pgoff; /* Logical page offset based on vma */ void __user *virtual_address; /* Faulting virtual address */ @@ -1361,10 +1376,26 @@ static inline void dec_mm_counter(struct mm_struct *mm, int member) atomic_long_dec(&mm->rss_stat.count[member]); } +/* Optimized variant when page is already known not to be PageAnon */ +static inline int mm_counter_file(struct page *page) +{ + if (PageSwapBacked(page)) + return MM_SHMEMPAGES; + return MM_FILEPAGES; +} + +static inline int mm_counter(struct page *page) +{ + if (PageAnon(page)) + return MM_ANONPAGES; + return mm_counter_file(page); +} + static inline unsigned long get_mm_rss(struct mm_struct *mm) { return get_mm_counter(mm, MM_FILEPAGES) + - get_mm_counter(mm, MM_ANONPAGES); + get_mm_counter(mm, MM_ANONPAGES) + + get_mm_counter(mm, MM_SHMEMPAGES); } static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) @@ -1898,7 +1929,9 @@ extern void mm_drop_all_locks(struct mm_struct *mm); extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); extern struct file *get_mm_exe_file(struct mm_struct *mm); -extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); +extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); +extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); + extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags, @@ -2116,15 +2149,6 @@ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data); -#ifdef CONFIG_PROC_FS -void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); -#else -static inline void vm_stat_account(struct mm_struct *mm, - unsigned long flags, struct file *file, long pages) -{ - mm->total_vm += pages; -} -#endif /* CONFIG_PROC_FS */ #ifdef CONFIG_DEBUG_PAGEALLOC extern bool _debug_pagealloc_enabled; diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index cf55945c83fb..712e8c37a200 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -100,4 +100,6 @@ static __always_inline enum lru_list page_lru(struct page *page) return lru; } +#define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) + #endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index f8d1492a114f..6bc9a0ce2253 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -369,9 +369,10 @@ struct core_state { }; enum { - MM_FILEPAGES, - MM_ANONPAGES, - MM_SWAPENTS, + MM_FILEPAGES, /* Resident file mapping pages */ + MM_ANONPAGES, /* Resident anonymous pages */ + MM_SWAPENTS, /* Anonymous swap entries */ + MM_SHMEMPAGES, /* Resident shared memory pages */ NR_MM_COUNTERS }; @@ -426,7 +427,7 @@ struct mm_struct { unsigned long total_vm; /* Total pages mapped */ unsigned long locked_vm; /* Pages that have PG_mlocked set */ unsigned long pinned_vm; /* Refcount permanently increased */ - unsigned long shared_vm; /* Shared pages (files) */ + unsigned long data_vm; /* VM_WRITE & ~VM_SHARED/GROWSDOWN */ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ unsigned long stack_vm; /* VM_GROWSUP/DOWN */ unsigned long def_flags; diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index f67b2ec18e6d..89df7abedd67 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -172,7 +172,7 @@ struct dw_mci { /* For edmac */ struct dw_mci_dma_slave *dms; /* Registers's physical base address */ - void *phy_regs; + resource_size_t phy_regs; u32 cmd_status; u32 data_status; @@ -235,16 +235,10 @@ struct dw_mci_dma_ops { }; /* IP Quirks/flags. */ -/* DTO fix for command transmission with IDMAC configured */ -#define DW_MCI_QUIRK_IDMAC_DTO BIT(0) -/* delay needed between retries on some 2.11a implementations */ -#define DW_MCI_QUIRK_RETRY_DELAY BIT(1) -/* High Speed Capable - Supports HS cards (up to 50MHz) */ -#define DW_MCI_QUIRK_HIGHSPEED BIT(2) /* Unreliable card detection */ -#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3) +#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(0) /* Timer for broken data transfer over scheme */ -#define DW_MCI_QUIRK_BROKEN_DTO BIT(4) +#define DW_MCI_QUIRK_BROKEN_DTO BIT(1) struct dma_pdata; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 8673ffe3d86e..8dd4d290ab0d 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -212,7 +212,9 @@ struct mmc_host { u32 ocr_avail_sdio; /* SDIO-specific OCR */ u32 ocr_avail_sd; /* SD-specific OCR */ u32 ocr_avail_mmc; /* MMC-specific OCR */ +#ifdef CONFIG_PM_SLEEP struct notifier_block pm_notify; +#endif u32 max_current_330; u32 max_current_300; u32 max_current_180; @@ -259,7 +261,6 @@ struct mmc_host { #define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */ #define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */ #define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */ -#define MMC_CAP_RUNTIME_RESUME (1 << 20) /* Resume at runtime_resume. */ #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ @@ -289,6 +290,7 @@ struct mmc_host { #define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V) #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) #define MMC_CAP2_NO_WRITE_PROTECT (1 << 18) /* No physical write protect pin, assume that card is always read-write */ +#define MMC_CAP2_NO_SDIO (1 << 19) /* Do not send SDIO commands during initialization */ mmc_pm_flag_t pm_caps; /* supported pm features */ @@ -434,8 +436,6 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc, int mmc_regulator_get_supply(struct mmc_host *mmc); -int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *); - static inline int mmc_card_is_removable(struct mmc_host *host) { return !(host->caps & MMC_CAP_NONREMOVABLE); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e23a9e704536..33bb1b19273e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -195,11 +195,6 @@ static inline int is_active_lru(enum lru_list lru) return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); } -static inline int is_unevictable_lru(enum lru_list lru) -{ - return (lru == LRU_UNEVICTABLE); -} - struct zone_reclaim_stat { /* * The pageout code in vmscan.c keeps track of how many of the @@ -361,10 +356,10 @@ struct zone { struct per_cpu_pageset __percpu *pageset; /* - * This is a per-zone reserve of pages that should not be - * considered dirtyable memory. + * This is a per-zone reserve of pages that are not available + * to userspace allocations. */ - unsigned long dirty_balance_reserve; + unsigned long totalreserve_pages; #ifndef CONFIG_SPARSEMEM /* @@ -576,19 +571,17 @@ static inline bool zone_is_empty(struct zone *zone) /* Maximum number of zones on a zonelist */ #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) +enum { + ZONELIST_FALLBACK, /* zonelist with fallback */ #ifdef CONFIG_NUMA - -/* - * The NUMA zonelists are doubled because we need zonelists that restrict the - * allocations to a single node for __GFP_THISNODE. - * - * [0] : Zonelist with fallback - * [1] : No fallback (__GFP_THISNODE) - */ -#define MAX_ZONELISTS 2 -#else -#define MAX_ZONELISTS 1 + /* + * The NUMA zonelists are doubled because we need zonelists that + * restrict the allocations to a single node for __GFP_THISNODE. + */ + ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ #endif + MAX_ZONELISTS +}; /* * This struct contains information about a zone in a zonelist. It is stored @@ -1207,13 +1200,13 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); * the zone and PFN linkages are still valid. This is expensive, but walkers * of the full memmap are extremely rare. */ -int memmap_valid_within(unsigned long pfn, +bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone); #else -static inline int memmap_valid_within(unsigned long pfn, +static inline bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) { - return 1; + return true; } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 64f36e09a790..6e4c645e1c0d 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -404,7 +404,7 @@ struct virtio_device_id { * For Hyper-V devices we use the device guid as the id. */ struct hv_vmbus_device_id { - __u8 guid[16]; + uuid_le guid; kernel_ulong_t driver_data; /* Data private to the driver */ }; diff --git a/include/linux/module.h b/include/linux/module.h index 3a19c79918e0..4560d8f1545d 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -302,6 +302,28 @@ struct mod_tree_node { struct latch_tree_node node; }; +struct module_layout { + /* The actual code + data. */ + void *base; + /* Total size. */ + unsigned int size; + /* The size of the executable code. */ + unsigned int text_size; + /* Size of RO section of the module (text+rodata) */ + unsigned int ro_size; + +#ifdef CONFIG_MODULES_TREE_LOOKUP + struct mod_tree_node mtn; +#endif +}; + +#ifdef CONFIG_MODULES_TREE_LOOKUP +/* Only touch one cacheline for common rbtree-for-core-layout case. */ +#define __module_layout_align ____cacheline_aligned +#else +#define __module_layout_align +#endif + struct module { enum module_state state; @@ -366,37 +388,9 @@ struct module { /* Startup function. */ int (*init)(void); - /* - * If this is non-NULL, vfree() after init() returns. - * - * Cacheline align here, such that: - * module_init, module_core, init_size, core_size, - * init_text_size, core_text_size and mtn_core::{mod,node[0]} - * are on the same cacheline. - */ - void *module_init ____cacheline_aligned; - - /* Here is the actual code + data, vfree'd on unload. */ - void *module_core; - - /* Here are the sizes of the init and core sections */ - unsigned int init_size, core_size; - - /* The size of the executable code in each section. */ - unsigned int init_text_size, core_text_size; - -#ifdef CONFIG_MODULES_TREE_LOOKUP - /* - * We want mtn_core::{mod,node[0]} to be in the same cacheline as the - * above entries such that a regular lookup will only touch one - * cacheline. - */ - struct mod_tree_node mtn_core; - struct mod_tree_node mtn_init; -#endif - - /* Size of RO sections of the module (text+rodata) */ - unsigned int init_ro_size, core_ro_size; + /* Core layout: rbtree is accessed frequently, so keep together. */ + struct module_layout core_layout __module_layout_align; + struct module_layout init_layout; /* Arch-specific module values */ struct mod_arch_specific arch; @@ -505,15 +499,15 @@ bool is_module_text_address(unsigned long addr); static inline bool within_module_core(unsigned long addr, const struct module *mod) { - return (unsigned long)mod->module_core <= addr && - addr < (unsigned long)mod->module_core + mod->core_size; + return (unsigned long)mod->core_layout.base <= addr && + addr < (unsigned long)mod->core_layout.base + mod->core_layout.size; } static inline bool within_module_init(unsigned long addr, const struct module *mod) { - return (unsigned long)mod->module_init <= addr && - addr < (unsigned long)mod->module_init + mod->init_size; + return (unsigned long)mod->init_layout.base <= addr && + addr < (unsigned long)mod->init_layout.base + mod->init_layout.size; } static inline bool within_module(unsigned long addr, const struct module *mod) @@ -768,9 +762,13 @@ extern int module_sysfs_initialized; #ifdef CONFIG_DEBUG_SET_MODULE_RONX extern void set_all_modules_text_rw(void); extern void set_all_modules_text_ro(void); +extern void module_enable_ro(const struct module *mod); +extern void module_disable_ro(const struct module *mod); #else static inline void set_all_modules_text_rw(void) { } static inline void set_all_modules_text_ro(void) { } +static inline void module_enable_ro(const struct module *mod) { } +static inline void module_disable_ro(const struct module *mod) { } #endif #ifdef CONFIG_GENERIC_BUG diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 79aaa9fc1a15..bf9b322cb0b0 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -9,38 +9,28 @@ #ifdef CONFIG_IP_MROUTE static inline int ip_mroute_opt(int opt) { - return (opt >= MRT_BASE) && (opt <= MRT_MAX); + return opt >= MRT_BASE && opt <= MRT_MAX; } -#else -static inline int ip_mroute_opt(int opt) -{ - return 0; -} -#endif -#ifdef CONFIG_IP_MROUTE -extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); -extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); -extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); -extern int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); -extern int ip_mr_init(void); +int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); +int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); +int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); +int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); +int ip_mr_init(void); #else -static inline -int ip_mroute_setsockopt(struct sock *sock, - int optname, char __user *optval, unsigned int optlen) +static inline int ip_mroute_setsockopt(struct sock *sock, int optname, + char __user *optval, unsigned int optlen) { return -ENOPROTOOPT; } -static inline -int ip_mroute_getsockopt(struct sock *sock, - int optname, char __user *optval, int __user *optlen) +static inline int ip_mroute_getsockopt(struct sock *sock, int optname, + char __user *optval, int __user *optlen) { return -ENOPROTOOPT; } -static inline -int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) +static inline int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) { return -ENOIOCTLCMD; } @@ -49,6 +39,11 @@ static inline int ip_mr_init(void) { return 0; } + +static inline int ip_mroute_opt(int opt) +{ + return 0; +} #endif struct vif_device { @@ -64,6 +59,32 @@ struct vif_device { #define VIFF_STATIC 0x8000 +#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) +#define MFC_LINES 64 + +struct mr_table { + struct list_head list; + possible_net_t net; + u32 id; + struct sock __rcu *mroute_sk; + struct timer_list ipmr_expire_timer; + struct list_head mfc_unres_queue; + struct list_head mfc_cache_array[MFC_LINES]; + struct vif_device vif_table[MAXVIFS]; + int maxvif; + atomic_t cache_resolve_queue_len; + bool mroute_do_assert; + bool mroute_do_pim; + int mroute_reg_vif_num; +}; + +/* mfc_flags: + * MFC_STATIC - the entry was added statically (not by a routing daemon) + */ +enum { + MFC_STATIC = BIT(0), +}; + struct mfc_cache { struct list_head list; __be32 mfc_mcastgrp; /* Group the entry belongs to */ @@ -89,19 +110,14 @@ struct mfc_cache { struct rcu_head rcu; }; -#define MFC_STATIC 1 -#define MFC_NOTIFY 2 - -#define MFC_LINES 64 - #ifdef __BIG_ENDIAN #define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1)) #else #define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1)) -#endif +#endif struct rtmsg; -extern int ipmr_get_route(struct net *net, struct sk_buff *skb, - __be32 saddr, __be32 daddr, - struct rtmsg *rtm, int nowait); +int ipmr_get_route(struct net *net, struct sk_buff *skb, + __be32 saddr, __be32 daddr, + struct rtmsg *rtm, int nowait); #endif diff --git a/include/linux/msi.h b/include/linux/msi.h index f71a25e5fd25..1c6342ab8c0e 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -174,6 +174,7 @@ struct msi_controller { #include <asm/msi.h> struct irq_domain; +struct irq_domain_ops; struct irq_chip; struct device_node; struct fwnode_handle; @@ -279,6 +280,23 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, irq_write_msi_msg_t write_msi_msg); void platform_msi_domain_free_irqs(struct device *dev); + +/* When an MSI domain is used as an intermediate domain */ +int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *args); +int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, + int virq, int nvec, msi_alloc_info_t *args); +struct irq_domain * +platform_msi_create_device_domain(struct device *dev, + unsigned int nvec, + irq_write_msi_msg_t write_msi_msg, + const struct irq_domain_ops *ops, + void *host_data); +int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs); +void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nvec); +void *platform_msi_get_host_data(struct irq_domain *domain); #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 366cf77953b5..58f3ba709ade 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -142,7 +142,9 @@ #endif #ifndef map_bankwidth +#ifdef CONFIG_MTD #warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work" +#endif static inline int map_bankwidth(void *map) { BUG(); diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index f17fa75809aa..cc84923011c0 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -254,6 +254,17 @@ struct mtd_info { int usecount; }; +static inline void mtd_set_of_node(struct mtd_info *mtd, + struct device_node *np) +{ + mtd->dev.of_node = np; +} + +static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd) +{ + return mtd->dev.of_node; +} + int mtd_erase(struct mtd_info *mtd, struct erase_info *instr); int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys); diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 5a9d1d4c2487..bdd68e22b5a5 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -129,6 +129,14 @@ typedef enum { /* Enable Hardware ECC before syndrome is read back from flash */ #define NAND_ECC_READSYN 2 +/* + * Enable generic NAND 'page erased' check. This check is only done when + * ecc.correct() returns -EBADMSG. + * Set this flag if your implementation does not fix bitflips in erased + * pages and you want to rely on the default implementation. + */ +#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) + /* Bit mask for flags passed to do_nand_read_ecc */ #define NAND_GET_DEVICE 0x80 @@ -276,15 +284,15 @@ struct nand_onfi_params { __le16 t_r; __le16 t_ccs; __le16 src_sync_timing_mode; - __le16 src_ssync_features; + u8 src_ssync_features; __le16 clk_pin_capacitance_typ; __le16 io_pin_capacitance_typ; __le16 input_pin_capacitance_typ; u8 input_pin_capacitance_max; u8 driver_strength_support; __le16 t_int_r; - __le16 t_ald; - u8 reserved4[7]; + __le16 t_adl; + u8 reserved4[8]; /* vendor */ __le16 vendor_revision; @@ -407,7 +415,7 @@ struct nand_jedec_params { __le16 input_pin_capacitance_typ; __le16 clk_pin_capacitance_typ; u8 driver_strength_support; - __le16 t_ald; + __le16 t_adl; u8 reserved4[36]; /* ECC and endurance block */ @@ -451,12 +459,19 @@ struct nand_hw_control { * @total: total number of ECC bytes per page * @prepad: padding information for syndrome based ECC generators * @postpad: padding information for syndrome based ECC generators + * @options: ECC specific options (see NAND_ECC_XXX flags defined above) * @layout: ECC layout control struct pointer * @priv: pointer to private ECC control data * @hwctl: function to control hardware ECC generator. Must only * be provided if an hardware ECC is available * @calculate: function for ECC calculation or readback from ECC hardware - * @correct: function for ECC correction, matching to ECC generator (sw/hw) + * @correct: function for ECC correction, matching to ECC generator (sw/hw). + * Should return a positive number representing the number of + * corrected bitflips, -EBADMSG if the number of bitflips exceed + * ECC strength, or any other error code if the error is not + * directly related to correction. + * If -EBADMSG is returned the input buffers should be left + * untouched. * @read_page_raw: function to read a raw page without ECC. This function * should hide the specific layout used by the ECC * controller and always return contiguous in-band and @@ -494,6 +509,7 @@ struct nand_ecc_ctrl { int strength; int prepad; int postpad; + unsigned int options; struct nand_ecclayout *layout; void *priv; void (*hwctl)(struct mtd_info *mtd, int mode); @@ -540,11 +556,11 @@ struct nand_buffers { /** * struct nand_chip - NAND Private Flash Chip Data + * @mtd: MTD device registered to the MTD framework * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the * flash device * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the * flash device. - * @flash_node: [BOARDSPECIFIC] device node describing this instance * @read_byte: [REPLACEABLE] read one byte from the chip * @read_word: [REPLACEABLE] read one word from the chip * @write_byte: [REPLACEABLE] write a single byte to the chip on the @@ -640,11 +656,10 @@ struct nand_buffers { */ struct nand_chip { + struct mtd_info mtd; void __iomem *IO_ADDR_R; void __iomem *IO_ADDR_W; - struct device_node *flash_node; - uint8_t (*read_byte)(struct mtd_info *mtd); u16 (*read_word)(struct mtd_info *mtd); void (*write_byte)(struct mtd_info *mtd, uint8_t byte); @@ -719,6 +734,37 @@ struct nand_chip { void *priv; }; +static inline void nand_set_flash_node(struct nand_chip *chip, + struct device_node *np) +{ + mtd_set_of_node(&chip->mtd, np); +} + +static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) +{ + return mtd_get_of_node(&chip->mtd); +} + +static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd) +{ + return container_of(mtd, struct nand_chip, mtd); +} + +static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip) +{ + return &chip->mtd; +} + +static inline void *nand_get_controller_data(struct nand_chip *chip) +{ + return chip->priv; +} + +static inline void nand_set_controller_data(struct nand_chip *chip, void *priv) +{ + chip->priv = priv; +} + /* * NAND Flash Manufacturer ID Codes */ @@ -907,15 +953,6 @@ struct platform_nand_data { struct platform_nand_ctrl ctrl; }; -/* Some helpers to access the data structures */ -static inline -struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd) -{ - struct nand_chip *chip = mtd->priv; - - return chip->priv; -} - /* return the supported features. */ static inline int onfi_feature(struct nand_chip *chip) { diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h index 74acf5367556..fb0bc3420a10 100644 --- a/include/linux/mtd/nand_bch.h +++ b/include/linux/mtd/nand_bch.h @@ -55,7 +55,7 @@ static inline int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf, unsigned char *read_ecc, unsigned char *calc_ecc) { - return -1; + return -ENOTSUPP; } static inline struct nand_bch_control * diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index 6a35e6de5da1..70736e1e6c8f 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -41,7 +41,6 @@ struct mtd_partition { uint64_t size; /* partition size */ uint64_t offset; /* offset within the master MTD space */ uint32_t mask_flags; /* master MTD flags to mask out for this partition */ - struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */ }; #define MTDPART_OFS_RETAIN (-3) @@ -56,11 +55,9 @@ struct device_node; /** * struct mtd_part_parser_data - used to pass data to MTD partition parsers. * @origin: for RedBoot, start address of MTD device - * @of_node: for OF parsers, device node containing partitioning information */ struct mtd_part_parser_data { unsigned long origin; - struct device_node *of_node; }; @@ -72,13 +69,33 @@ struct mtd_part_parser { struct list_head list; struct module *owner; const char *name; - int (*parse_fn)(struct mtd_info *, struct mtd_partition **, + int (*parse_fn)(struct mtd_info *, const struct mtd_partition **, struct mtd_part_parser_data *); + void (*cleanup)(const struct mtd_partition *pparts, int nr_parts); }; -extern void register_mtd_parser(struct mtd_part_parser *parser); +/* Container for passing around a set of parsed partitions */ +struct mtd_partitions { + const struct mtd_partition *parts; + int nr_parts; + const struct mtd_part_parser *parser; +}; + +extern int __register_mtd_parser(struct mtd_part_parser *parser, + struct module *owner); +#define register_mtd_parser(parser) __register_mtd_parser(parser, THIS_MODULE) + extern void deregister_mtd_parser(struct mtd_part_parser *parser); +/* + * module_mtd_part_parser() - Helper macro for MTD partition parsers that don't + * do anything special in module init/exit. Each driver may only use this macro + * once, and calling it replaces module_init() and module_exit(). + */ +#define module_mtd_part_parser(__mtd_part_parser) \ + module_driver(__mtd_part_parser, register_mtd_parser, \ + deregister_mtd_parser) + int mtd_is_partition(const struct mtd_info *mtd); int mtd_add_partition(struct mtd_info *master, const char *name, long long offset, long long length); diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h index 1c28f8879b1c..2251add65fa7 100644 --- a/include/linux/mtd/sh_flctl.h +++ b/include/linux/mtd/sh_flctl.h @@ -143,11 +143,11 @@ enum flctl_ecc_res_t { struct dma_chan; struct sh_flctl { - struct mtd_info mtd; struct nand_chip chip; struct platform_device *pdev; struct dev_pm_qos_request pm_qos; void __iomem *reg; + resource_size_t fifo; uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */ int read_bytes; @@ -186,7 +186,7 @@ struct sh_flctl_platform_data { static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo) { - return container_of(mtdinfo, struct sh_flctl, mtd); + return container_of(mtd_to_nand(mtdinfo), struct sh_flctl, chip); } #endif /* __SH_FLCTL_H__ */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index c8723b62c4cd..62356d50815b 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -12,6 +12,7 @@ #include <linux/bitops.h> #include <linux/mtd/cfi.h> +#include <linux/mtd/mtd.h> /* * Manufacturer IDs @@ -25,7 +26,7 @@ #define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX #define SNOR_MFR_SPANSION CFI_MFR_AMD #define SNOR_MFR_SST CFI_MFR_SST -#define SNOR_MFR_WINBOND 0xef +#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */ /* * Note on opcode nomenclature: some opcodes have a format like @@ -117,14 +118,11 @@ enum spi_nor_option_flags { SNOR_F_USE_FSR = BIT(0), }; -struct mtd_info; - /** * struct spi_nor - Structure for defining a the SPI NOR layer * @mtd: point to a mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations * @dev: point to a spi device, or a spi nor controller device. - * @flash_node: point to a device node describing this flash instance. * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector @@ -144,7 +142,8 @@ struct mtd_info; * @read: [DRIVER-SPECIFIC] read data from the SPI NOR * @write: [DRIVER-SPECIFIC] write data to the SPI NOR * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR - * at the offset @offs + * at the offset @offs; if not provided by the driver, + * spi-nor will send the erase opcode via write_reg() * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is @@ -155,7 +154,6 @@ struct spi_nor { struct mtd_info mtd; struct mutex lock; struct device *dev; - struct device_node *flash_node; u32 page_size; u8 addr_width; u8 erase_opcode; @@ -185,6 +183,17 @@ struct spi_nor { void *priv; }; +static inline void spi_nor_set_flash_node(struct spi_nor *nor, + struct device_node *np) +{ + mtd_set_of_node(&nor->mtd, np); +} + +static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor) +{ + return mtd_get_of_node(&nor->mtd); +} + /** * spi_nor_scan() - scan the SPI NOR * @nor: the spi_nor structure diff --git a/include/linux/namei.h b/include/linux/namei.h index d8c6334cd150..d0f25d81b46a 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -77,6 +77,7 @@ extern struct dentry *kern_path_locked(const char *, struct path *); extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int); extern struct dentry *lookup_one_len(const char *, struct dentry *, int); +extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); extern int follow_down_one(struct path *); extern int follow_down(struct path *); diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index f0d87347df19..d9654f0eecb3 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -52,7 +52,7 @@ enum { NETIF_F_GSO_TUNNEL_REMCSUM_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ - NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ + NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/ NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */ NETIF_F_RXHASH_BIT, /* Receive hashing offload */ @@ -103,7 +103,7 @@ enum { #define NETIF_F_NTUPLE __NETIF_F(NTUPLE) #define NETIF_F_RXCSUM __NETIF_F(RXCSUM) #define NETIF_F_RXHASH __NETIF_F(RXHASH) -#define NETIF_F_SCTP_CSUM __NETIF_F(SCTP_CSUM) +#define NETIF_F_SCTP_CRC __NETIF_F(SCTP_CRC) #define NETIF_F_SG __NETIF_F(SG) #define NETIF_F_TSO6 __NETIF_F(TSO6) #define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN) @@ -146,10 +146,12 @@ enum { #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ NETIF_F_TSO6 | NETIF_F_UFO) -#define NETIF_F_GEN_CSUM NETIF_F_HW_CSUM -#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) -#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) -#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) +/* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be + * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set-- + * this would be contradictory + */ +#define NETIF_F_CSUM_MASK (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ + NETIF_F_HW_CSUM) #define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3143c847bddb..5ac140dcb789 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -132,7 +132,9 @@ static inline bool dev_xmit_complete(int rc) * used. */ -#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) +#if defined(CONFIG_HYPERV_NET) +# define LL_MAX_HEADER 128 +#elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) # if defined(CONFIG_MAC80211_MESH) # define LL_MAX_HEADER 128 # else @@ -326,7 +328,8 @@ enum { NAPI_STATE_SCHED, /* Poll is scheduled */ NAPI_STATE_DISABLE, /* Disable pending */ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ - NAPI_STATE_HASHED, /* In NAPI hash */ + NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ + NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ }; enum gro_result { @@ -461,19 +464,13 @@ static inline void napi_complete(struct napi_struct *n) } /** - * napi_by_id - lookup a NAPI by napi_id - * @napi_id: hashed napi_id - * - * lookup @napi_id in napi_hash table - * must be called under rcu_read_lock() - */ -struct napi_struct *napi_by_id(unsigned int napi_id); - -/** * napi_hash_add - add a NAPI to global hashtable * @napi: napi context * * generate a new napi_id and store a @napi under it in napi_hash + * Used for busy polling (CONFIG_NET_RX_BUSY_POLL) + * Note: This is normally automatically done from netif_napi_add(), + * so might disappear in a future linux version. */ void napi_hash_add(struct napi_struct *napi); @@ -482,9 +479,14 @@ void napi_hash_add(struct napi_struct *napi); * @napi: napi context * * Warning: caller must observe rcu grace period - * before freeing memory containing @napi + * before freeing memory containing @napi, if + * this function returns true. + * Note: core networking stack automatically calls it + * from netif_napi_del() + * Drivers might want to call this helper to combine all + * the needed rcu grace periods into a single one. */ -void napi_hash_del(struct napi_struct *napi); +bool napi_hash_del(struct napi_struct *napi); /** * napi_disable - prevent NAPI from scheduling @@ -810,6 +812,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) * Required can not be NULL. * + * netdev_features_t (*ndo_fix_features)(struct net_device *dev, + * netdev_features_t features); + * Adjusts the requested feature flags according to device-specific + * constraints, and returns the resulting flags. Must not modify + * the device state. + * * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, * void *accel_priv, select_queue_fallback_t fallback); * Called to decide which queue to when device supports multiple @@ -957,12 +965,6 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * Called to release previously enslaved netdev. * * Feature/offload setting functions. - * netdev_features_t (*ndo_fix_features)(struct net_device *dev, - * netdev_features_t features); - * Adjusts the requested feature flags according to device-specific - * constraints, and returns the resulting flags. Must not modify - * the device state. - * * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); * Called to update device configuration to new features. Passed * feature set might be less than what was returned by ndo_fix_features()). @@ -1011,6 +1013,19 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * a new port starts listening. The operation is protected by the * vxlan_net->sock_lock. * + * void (*ndo_add_geneve_port)(struct net_device *dev, + * sa_family_t sa_family, __be16 port); + * Called by geneve to notify a driver about the UDP port and socket + * address family that geneve is listnening to. It is called only when + * a new port starts listening. The operation is protected by the + * geneve_net->sock_lock. + * + * void (*ndo_del_geneve_port)(struct net_device *dev, + * sa_family_t sa_family, __be16 port); + * Called by geneve to notify the driver about a UDP port and socket + * address family that geneve is not listening to anymore. The operation + * is protected by the geneve_net->sock_lock. + * * void (*ndo_del_vxlan_port)(struct net_device *dev, * sa_family_t sa_family, __be16 port); * Called by vxlan to notify the driver about a UDP port and socket @@ -1066,8 +1081,11 @@ struct net_device_ops { void (*ndo_uninit)(struct net_device *dev); int (*ndo_open)(struct net_device *dev); int (*ndo_stop)(struct net_device *dev); - netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, - struct net_device *dev); + netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, + struct net_device *dev); + netdev_features_t (*ndo_features_check)(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, void *accel_priv, @@ -1215,7 +1233,12 @@ struct net_device_ops { void (*ndo_del_vxlan_port)(struct net_device *dev, sa_family_t sa_family, __be16 port); - + void (*ndo_add_geneve_port)(struct net_device *dev, + sa_family_t sa_family, + __be16 port); + void (*ndo_del_geneve_port)(struct net_device *dev, + sa_family_t sa_family, + __be16 port); void* (*ndo_dfwd_add_station)(struct net_device *pdev, struct net_device *dev); void (*ndo_dfwd_del_station)(struct net_device *pdev, @@ -1225,9 +1248,6 @@ struct net_device_ops { struct net_device *dev, void *priv); int (*ndo_get_lock_subclass)(struct net_device *dev); - netdev_features_t (*ndo_features_check) (struct sk_buff *skb, - struct net_device *dev, - netdev_features_t features); int (*ndo_set_tx_maxrate)(struct net_device *dev, int queue_index, u32 maxrate); @@ -1271,6 +1291,7 @@ struct net_device_ops { * @IFF_NO_QUEUE: device can run without qdisc attached * @IFF_OPENVSWITCH: device is a Open vSwitch master * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device + * @IFF_TEAM: device is a team device */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1297,6 +1318,7 @@ enum netdev_priv_flags { IFF_NO_QUEUE = 1<<21, IFF_OPENVSWITCH = 1<<22, IFF_L3MDEV_SLAVE = 1<<23, + IFF_TEAM = 1<<24, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1323,6 +1345,7 @@ enum netdev_priv_flags { #define IFF_NO_QUEUE IFF_NO_QUEUE #define IFF_OPENVSWITCH IFF_OPENVSWITCH #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE +#define IFF_TEAM IFF_TEAM /** * struct net_device - The DEVICE structure. @@ -1716,7 +1739,9 @@ struct net_device { #ifdef CONFIG_XPS struct xps_dev_maps __rcu *xps_maps; #endif - +#ifdef CONFIG_NET_CLS_ACT + struct tcf_proto __rcu *egress_cl_list; +#endif #ifdef CONFIG_NET_SWITCHDEV u32 offload_fwd_mark; #endif @@ -1949,6 +1974,26 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight); /** + * netif_tx_napi_add - initialize a napi context + * @dev: network device + * @napi: napi context + * @poll: polling function + * @weight: default weight + * + * This variant of netif_napi_add() should be used from drivers using NAPI + * to exclusively poll a TX queue. + * This will avoid we add it into napi_hash[], thus polluting this hash table. + */ +static inline void netif_tx_napi_add(struct net_device *dev, + struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), + int weight) +{ + set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); + netif_napi_add(dev, napi, poll, weight); +} + +/** * netif_napi_del - remove a napi context * @napi: napi context * @@ -2086,6 +2131,24 @@ struct pcpu_sw_netstats { #define netdev_alloc_pcpu_stats(type) \ __netdev_alloc_pcpu_stats(type, GFP_KERNEL) +enum netdev_lag_tx_type { + NETDEV_LAG_TX_TYPE_UNKNOWN, + NETDEV_LAG_TX_TYPE_RANDOM, + NETDEV_LAG_TX_TYPE_BROADCAST, + NETDEV_LAG_TX_TYPE_ROUNDROBIN, + NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, + NETDEV_LAG_TX_TYPE_HASH, +}; + +struct netdev_lag_upper_info { + enum netdev_lag_tx_type tx_type; +}; + +struct netdev_lag_lower_state_info { + u8 link_up : 1, + tx_enabled : 1; +}; + #include <linux/notifier.h> /* netdevice notifier chain. Please remember to update the rtnetlink @@ -2121,6 +2184,7 @@ struct pcpu_sw_netstats { #define NETDEV_CHANGEINFODATA 0x0018 #define NETDEV_BONDING_INFO 0x0019 #define NETDEV_PRECHANGEUPPER 0x001A +#define NETDEV_CHANGELOWERSTATE 0x001B int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); @@ -2139,6 +2203,12 @@ struct netdev_notifier_changeupper_info { struct net_device *upper_dev; /* new upper dev */ bool master; /* is upper dev master */ bool linking; /* is the nofication for link or unlink */ + void *upper_info; /* upper dev info */ +}; + +struct netdev_notifier_changelowerstate_info { + struct netdev_notifier_info info; /* must be first */ + void *lower_state_info; /* is lower dev state */ }; static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, @@ -2472,6 +2542,71 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, remcsum_unadjust((__sum16 *)ptr, grc->delta); } +struct skb_csum_offl_spec { + __u16 ipv4_okay:1, + ipv6_okay:1, + encap_okay:1, + ip_options_okay:1, + ext_hdrs_okay:1, + tcp_okay:1, + udp_okay:1, + sctp_okay:1, + vlan_okay:1, + no_encapped_ipv6:1, + no_not_encapped:1; +}; + +bool __skb_csum_offload_chk(struct sk_buff *skb, + const struct skb_csum_offl_spec *spec, + bool *csum_encapped, + bool csum_help); + +static inline bool skb_csum_offload_chk(struct sk_buff *skb, + const struct skb_csum_offl_spec *spec, + bool *csum_encapped, + bool csum_help) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + return __skb_csum_offload_chk(skb, spec, csum_encapped, csum_help); +} + +static inline bool skb_csum_offload_chk_help(struct sk_buff *skb, + const struct skb_csum_offl_spec *spec) +{ + bool csum_encapped; + + return skb_csum_offload_chk(skb, spec, &csum_encapped, true); +} + +static inline bool skb_csum_off_chk_help_cmn(struct sk_buff *skb) +{ + static const struct skb_csum_offl_spec csum_offl_spec = { + .ipv4_okay = 1, + .ip_options_okay = 1, + .ipv6_okay = 1, + .vlan_okay = 1, + .tcp_okay = 1, + .udp_okay = 1, + }; + + return skb_csum_offload_chk_help(skb, &csum_offl_spec); +} + +static inline bool skb_csum_off_chk_help_cmn_v4_only(struct sk_buff *skb) +{ + static const struct skb_csum_offl_spec csum_offl_spec = { + .ipv4_okay = 1, + .ip_options_okay = 1, + .tcp_okay = 1, + .udp_okay = 1, + .vlan_okay = 1, + }; + + return skb_csum_offload_chk_help(skb, &csum_offl_spec); +} + static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, @@ -3595,15 +3730,15 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev); struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev); int netdev_master_upper_dev_link(struct net_device *dev, - struct net_device *upper_dev); -int netdev_master_upper_dev_link_private(struct net_device *dev, - struct net_device *upper_dev, - void *private); + struct net_device *upper_dev, + void *upper_priv, void *upper_info); void netdev_upper_dev_unlink(struct net_device *dev, struct net_device *upper_dev); void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); +void netdev_lower_state_changed(struct net_device *lower_dev, + void *lower_state_info); /* RSS keys are 40 or 52 bytes long */ #define NETDEV_RSS_KEY_LEN 52 @@ -3611,7 +3746,7 @@ extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; void netdev_rss_key_fill(void *buffer, size_t len); int dev_get_nest_level(struct net_device *dev, - bool (*type_check)(struct net_device *dev)); + bool (*type_check)(const struct net_device *dev)); int skb_checksum_help(struct sk_buff *skb); struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); @@ -3641,13 +3776,37 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth); static inline bool can_checksum_protocol(netdev_features_t features, __be16 protocol) { - return ((features & NETIF_F_GEN_CSUM) || - ((features & NETIF_F_V4_CSUM) && - protocol == htons(ETH_P_IP)) || - ((features & NETIF_F_V6_CSUM) && - protocol == htons(ETH_P_IPV6)) || - ((features & NETIF_F_FCOE_CRC) && - protocol == htons(ETH_P_FCOE))); + if (protocol == htons(ETH_P_FCOE)) + return !!(features & NETIF_F_FCOE_CRC); + + /* Assume this is an IP checksum (not SCTP CRC) */ + + if (features & NETIF_F_HW_CSUM) { + /* Can checksum everything */ + return true; + } + + switch (protocol) { + case htons(ETH_P_IP): + return !!(features & NETIF_F_IP_CSUM); + case htons(ETH_P_IPV6): + return !!(features & NETIF_F_IPV6_CSUM); + default: + return false; + } +} + +/* Map an ethertype into IP protocol if possible */ +static inline int eproto_to_ipproto(int eproto) +{ + switch (eproto) { + case htons(ETH_P_IP): + return IPPROTO_IP; + case htons(ETH_P_IPV6): + return IPPROTO_IPV6; + default: + return -1; + } } #ifdef CONFIG_BUG @@ -3712,15 +3871,14 @@ void linkwatch_run_queue(void); static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, netdev_features_t f2) { - if (f1 & NETIF_F_GEN_CSUM) - f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); - if (f2 & NETIF_F_GEN_CSUM) - f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); - f1 &= f2; - if (f1 & NETIF_F_GEN_CSUM) - f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); + if ((f1 ^ f2) & NETIF_F_HW_CSUM) { + if (f1 & NETIF_F_HW_CSUM) + f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); + else + f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); + } - return f1; + return f1 & f2; } static inline netdev_features_t netdev_get_wanted_features( @@ -3808,32 +3966,32 @@ static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, skb->mac_len = mac_len; } -static inline bool netif_is_macvlan(struct net_device *dev) +static inline bool netif_is_macvlan(const struct net_device *dev) { return dev->priv_flags & IFF_MACVLAN; } -static inline bool netif_is_macvlan_port(struct net_device *dev) +static inline bool netif_is_macvlan_port(const struct net_device *dev) { return dev->priv_flags & IFF_MACVLAN_PORT; } -static inline bool netif_is_ipvlan(struct net_device *dev) +static inline bool netif_is_ipvlan(const struct net_device *dev) { return dev->priv_flags & IFF_IPVLAN_SLAVE; } -static inline bool netif_is_ipvlan_port(struct net_device *dev) +static inline bool netif_is_ipvlan_port(const struct net_device *dev) { return dev->priv_flags & IFF_IPVLAN_MASTER; } -static inline bool netif_is_bond_master(struct net_device *dev) +static inline bool netif_is_bond_master(const struct net_device *dev) { return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; } -static inline bool netif_is_bond_slave(struct net_device *dev) +static inline bool netif_is_bond_slave(const struct net_device *dev) { return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; } @@ -3868,6 +4026,26 @@ static inline bool netif_is_ovs_master(const struct net_device *dev) return dev->priv_flags & IFF_OPENVSWITCH; } +static inline bool netif_is_team_master(const struct net_device *dev) +{ + return dev->priv_flags & IFF_TEAM; +} + +static inline bool netif_is_team_port(const struct net_device *dev) +{ + return dev->priv_flags & IFF_TEAM_PORT; +} + +static inline bool netif_is_lag_master(const struct net_device *dev) +{ + return netif_is_bond_master(dev) || netif_is_team_master(dev); +} + +static inline bool netif_is_lag_port(const struct net_device *dev) +{ + return netif_is_bond_slave(dev) || netif_is_team_port(dev); +} + /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ static inline void netif_keep_dst(struct net_device *dev) { diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h new file mode 100644 index 000000000000..22a16a23cd8a --- /dev/null +++ b/include/linux/netfilter/nf_conntrack_sctp.h @@ -0,0 +1,13 @@ +#ifndef _NF_CONNTRACK_SCTP_H +#define _NF_CONNTRACK_SCTP_H +/* SCTP tracking. */ + +#include <uapi/linux/netfilter/nf_conntrack_sctp.h> + +struct ip_ct_sctp { + enum sctp_conntrack state; + + __be32 vtag[IP_CT_DIR_MAX]; +}; + +#endif /* _NF_CONNTRACK_SCTP_H */ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 5646b24bfc64..ba0d9789eb6e 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -8,12 +8,12 @@ #include <uapi/linux/netfilter/nfnetlink.h> struct nfnl_callback { - int (*call)(struct sock *nl, struct sk_buff *skb, - const struct nlmsghdr *nlh, - const struct nlattr * const cda[]); - int (*call_rcu)(struct sock *nl, struct sk_buff *skb, + int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]); + int (*call_rcu)(struct net *net, struct sock *nl, struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const cda[]); int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]); @@ -26,8 +26,8 @@ struct nfnetlink_subsystem { __u8 subsys_id; /* nfnetlink subsystem ID */ __u8 cb_count; /* number of callbacks */ const struct nfnl_callback *cb; /* callback for individual types */ - int (*commit)(struct sk_buff *skb); - int (*abort)(struct sk_buff *skb); + int (*commit)(struct net *net, struct sk_buff *skb); + int (*abort)(struct net *net, struct sk_buff *skb); }; int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n); diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 639e9b8b0e4d..0b41959aab9f 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -131,6 +131,7 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask) struct netlink_callback { struct sk_buff *skb; const struct nlmsghdr *nlh; + int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff * skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); @@ -153,6 +154,7 @@ struct nlmsghdr * __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags); struct netlink_dump_control { + int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *skb, struct netlink_callback *); int (*done)(struct netlink_callback *); void *data; diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index e7e78537aea2..d6f9b4e6006d 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -139,10 +139,10 @@ enum nfs_opnum4 { Needs to be updated if more operations are defined in future.*/ #define FIRST_NFS4_OP OP_ACCESS -#define LAST_NFS4_OP OP_WRITE_SAME #define LAST_NFS40_OP OP_RELEASE_LOCKOWNER #define LAST_NFS41_OP OP_RECLAIM_COMPLETE -#define LAST_NFS42_OP OP_WRITE_SAME +#define LAST_NFS42_OP OP_CLONE +#define LAST_NFS4_OP LAST_NFS42_OP enum nfsstat4 { NFS4_OK = 0, @@ -592,4 +592,18 @@ enum data_content4 { NFS4_CONTENT_HOLE = 1, }; +enum pnfs_update_layout_reason { + PNFS_UPDATE_LAYOUT_UNKNOWN = 0, + PNFS_UPDATE_LAYOUT_NO_PNFS, + PNFS_UPDATE_LAYOUT_RD_ZEROLEN, + PNFS_UPDATE_LAYOUT_MDSTHRESH, + PNFS_UPDATE_LAYOUT_NOMEM, + PNFS_UPDATE_LAYOUT_BULK_RECALL, + PNFS_UPDATE_LAYOUT_IO_TEST_FAIL, + PNFS_UPDATE_LAYOUT_FOUND_CACHED, + PNFS_UPDATE_LAYOUT_RETURN, + PNFS_UPDATE_LAYOUT_BLOCKED, + PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET, +}; + #endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index c0e961474a52..48e0320cd643 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -60,18 +60,12 @@ struct nfs_lockowner { pid_t l_pid; }; -#define NFS_IO_INPROGRESS 0 -struct nfs_io_counter { - unsigned long flags; - atomic_t io_count; -}; - struct nfs_lock_context { atomic_t count; struct list_head list; struct nfs_open_context *open_context; struct nfs_lockowner lockowner; - struct nfs_io_counter io_count; + atomic_t io_count; }; struct nfs4_state; @@ -216,7 +210,6 @@ struct nfs_inode { #define NFS_INO_FLUSHING (4) /* inode is flushing out data */ #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ #define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ -#define NFS_INO_COMMIT (7) /* inode is committing unstable writes */ #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ #define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ @@ -359,6 +352,7 @@ extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); +extern int nfs_revalidate_mapping_rcu(struct inode *inode); extern int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping); extern int nfs_setattr(struct dentry *, struct iattr *); extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *); @@ -517,13 +511,25 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned */ extern int nfs_sync_inode(struct inode *inode); extern int nfs_wb_all(struct inode *inode); -extern int nfs_wb_page(struct inode *inode, struct page* page); +extern int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_commit_inode(struct inode *, int); extern struct nfs_commit_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_commit_data *data); static inline int +nfs_wb_launder_page(struct inode *inode, struct page *page) +{ + return nfs_wb_single_page(inode, page, true); +} + +static inline int +nfs_wb_page(struct inode *inode, struct page *page) +{ + return nfs_wb_single_page(inode, page, false); +} + +static inline int nfs_have_writebacks(struct inode *inode) { return NFS_I(inode)->nrequests != 0; diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 2469ab0bb3a1..7fcc13c8cf1f 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -102,6 +102,7 @@ struct nfs_client { #define NFS_SP4_MACH_CRED_STATEID 4 /* TEST_STATEID and FREE_STATEID */ #define NFS_SP4_MACH_CRED_WRITE 5 /* WRITE */ #define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */ +#define NFS_SP4_MACH_CRED_PNFS_CLEANUP 7 /* LAYOUTRETURN */ #endif /* CONFIG_NFS_V4 */ /* Our own IP address, as a null-terminated string. diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 11bbae44f4cb..791098a08a87 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1375,6 +1375,7 @@ enum { NFS_IOHDR_ERROR = 0, NFS_IOHDR_EOF, NFS_IOHDR_REDO, + NFS_IOHDR_STAT, }; struct nfs_pgio_header { @@ -1420,11 +1421,12 @@ struct nfs_mds_commit_info { struct list_head list; }; +struct nfs_commit_info; struct nfs_commit_data; struct nfs_inode; struct nfs_commit_completion_ops { - void (*error_cleanup) (struct nfs_inode *nfsi); void (*completion) (struct nfs_commit_data *data); + void (*resched_write) (struct nfs_commit_info *, struct nfs_page *); }; struct nfs_commit_info { @@ -1454,12 +1456,14 @@ struct nfs_commit_data { const struct rpc_call_ops *mds_ops; const struct nfs_commit_completion_ops *completion_ops; int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data); + unsigned long flags; }; struct nfs_pgio_completion_ops { void (*error_cleanup)(struct list_head *head); void (*init_hdr)(struct nfs_pgio_header *hdr); void (*completion)(struct nfs_pgio_header *hdr); + void (*reschedule_io)(struct nfs_pgio_header *hdr); }; struct nfs_unlinkdata { diff --git a/include/linux/nwpserial.h b/include/linux/nwpserial.h deleted file mode 100644 index 9acb21572eaf..000000000000 --- a/include/linux/nwpserial.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Serial Port driver for a NWP uart device - * - * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ -#ifndef _NWPSERIAL_H -#define _NWPSERIAL_H - -int nwpserial_register_port(struct uart_port *port); -void nwpserial_unregister_port(int line); - -#endif /* _NWPSERIAL_H */ diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 507daad0bc8d..01c0a556448b 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -3,6 +3,7 @@ #include <linux/ioport.h> #include <linux/errno.h> #include <linux/of.h> +#include <linux/io.h> struct of_pci_range_parser { struct device_node *node; @@ -36,6 +37,8 @@ extern struct device_node *of_find_matching_node_by_address( const struct of_device_id *matches, u64 base_address); extern void __iomem *of_iomap(struct device_node *device, int index); +void __iomem *of_io_request_and_map(struct device_node *device, + int index, const char *name); /* Extract an address from a device, returns the region size and * the address space flags too. The PCI version uses a BAR number @@ -57,6 +60,11 @@ extern int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size); extern bool of_dma_is_coherent(struct device_node *np); #else /* CONFIG_OF_ADDRESS */ +static inline void __iomem *of_io_request_and_map(struct device_node *device, + int index, const char *name) +{ + return IOMEM_ERR_PTR(-EINVAL); +} static inline u64 of_translate_address(struct device_node *np, const __be32 *addr) @@ -112,12 +120,7 @@ static inline bool of_dma_is_coherent(struct device_node *np) extern int of_address_to_resource(struct device_node *dev, int index, struct resource *r); void __iomem *of_iomap(struct device_node *node, int index); -void __iomem *of_io_request_and_map(struct device_node *device, - int index, const char *name); #else - -#include <linux/io.h> - static inline int of_address_to_resource(struct device_node *dev, int index, struct resource *r) { @@ -128,12 +131,6 @@ static inline void __iomem *of_iomap(struct device_node *device, int index) { return NULL; } - -static inline void __iomem *of_io_request_and_map(struct device_node *device, - int index, const char *name) -{ - return IOMEM_ERR_PTR(-EINVAL); -} #endif #if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index 88fa8af2b937..1d99b61adc65 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h @@ -267,6 +267,9 @@ struct omap_dma_reg { u8 type; }; +#define SDMA_FILTER_PARAM(hw_req) ((int[]) { (hw_req) }) +struct dma_slave_map; + /* System DMA platform data structure */ struct omap_system_dma_plat_info { const struct omap_dma_reg *reg_map; @@ -278,6 +281,9 @@ struct omap_system_dma_plat_info { void (*clear_dma)(int lch); void (*dma_write)(u32 val, int reg, int lch); u32 (*dma_read)(int reg, int lch); + + const struct dma_slave_map *slave_map; + int slavecnt; }; #ifdef CONFIG_ARCH_OMAP2PLUS diff --git a/include/linux/pci.h b/include/linux/pci.h index 6ae25aae88fd..d86378c226fb 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1946,6 +1946,16 @@ static inline struct irq_domain * pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } #endif /* CONFIG_OF */ +#ifdef CONFIG_ACPI +struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus); + +void +pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)); +#else +static inline struct irq_domain * +pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; } +#endif + #ifdef CONFIG_EEH static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d9ba49cedc5d..1acbefc4bbda 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2495,6 +2495,8 @@ #define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700 #define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff +#define PCI_VENDOR_ID_NETRONOME 0x19ee + #define PCI_VENDOR_ID_QMI 0x1a32 #define PCI_VENDOR_ID_AZWAVE 0x1a3b diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 12c9b485beb7..84f542df7ff5 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -116,7 +116,7 @@ void percpu_ref_reinit(struct percpu_ref *ref); */ static inline void percpu_ref_kill(struct percpu_ref *ref) { - return percpu_ref_kill_and_confirm(ref, NULL); + percpu_ref_kill_and_confirm(ref, NULL); } /* diff --git a/include/linux/percpu.h b/include/linux/percpu.h index caebf2a758dc..4bc6dafb703e 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -18,12 +18,6 @@ #define PERCPU_MODULE_RESERVE 0 #endif -#ifndef PERCPU_ENOUGH_ROOM -#define PERCPU_ENOUGH_ROOM \ - (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ - PERCPU_MODULE_RESERVE) -#endif - /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index bfa673bb822d..83b5e34c6580 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -111,8 +111,6 @@ struct arm_pmu { #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) -int armpmu_register(struct arm_pmu *armpmu, int type); - u64 armpmu_event_update(struct perf_event *event); int armpmu_event_set_period(struct perf_event *event); diff --git a/include/linux/pfn.h b/include/linux/pfn.h index 7646637221f3..97f3e88aead4 100644 --- a/include/linux/pfn.h +++ b/include/linux/pfn.h @@ -9,5 +9,6 @@ #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) #define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT) +#define PHYS_PFN(x) ((unsigned long)((x) >> PAGE_SHIFT)) #endif diff --git a/include/linux/phy.h b/include/linux/phy.h index 05fde31b6dc6..d6f3641e7933 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -16,8 +16,10 @@ #ifndef __PHY_H #define __PHY_H +#include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/ethtool.h> +#include <linux/mdio.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/timer.h> @@ -58,6 +60,7 @@ #define PHY_HAS_INTERRUPT 0x00000001 #define PHY_HAS_MAGICANEG 0x00000002 #define PHY_IS_INTERNAL 0x00000004 +#define MDIO_DEVICE_IS_PHY 0x80000000 /* Interface Mode definitions */ typedef enum { @@ -158,8 +161,8 @@ struct mii_bus { const char *name; char id[MII_BUS_ID_SIZE]; void *priv; - int (*read)(struct mii_bus *bus, int phy_id, int regnum); - int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val); + int (*read)(struct mii_bus *bus, int addr, int regnum); + int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val); int (*reset)(struct mii_bus *bus); /* @@ -178,7 +181,7 @@ struct mii_bus { struct device dev; /* list of all PHYs on bus */ - struct phy_device *phy_map[PHY_MAX_ADDR]; + struct mdio_device *mdio_map[PHY_MAX_ADDR]; /* PHY addresses to be ignored when probing */ u32 phy_mask; @@ -187,10 +190,10 @@ struct mii_bus { u32 phy_ignore_ta_mask; /* - * Pointer to an array of interrupts, each PHY's - * interrupt at the index matching its address + * An array of interrupts, each PHY's interrupt at the index + * matching its address */ - int *irq; + int irq[PHY_MAX_ADDR]; }; #define to_mii_bus(d) container_of(d, struct mii_bus, dev) @@ -212,11 +215,6 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); -int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); -int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); -int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); -int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val); - #define PHY_INTERRUPT_DISABLED 0x0 #define PHY_INTERRUPT_ENABLED 0x80000000 @@ -361,14 +359,12 @@ struct phy_c45_device_ids { * handling, as well as handling shifts in PHY hardware state */ struct phy_device { + struct mdio_device mdio; + /* Information about the PHY type */ /* And management functions */ struct phy_driver *drv; - struct mii_bus *bus; - - struct device dev; - u32 phy_id; struct phy_c45_device_ids c45_ids; @@ -384,9 +380,6 @@ struct phy_device { phy_interface_t interface; - /* Bus address of the PHY (0-31) */ - int addr; - /* * forced speed & duplex (no autoneg) * partner speed & duplex & pause (autoneg) @@ -435,10 +428,12 @@ struct phy_device { void (*adjust_link)(struct net_device *dev); }; -#define to_phy_device(d) container_of(d, struct phy_device, dev) +#define to_phy_device(d) container_of(to_mdio_device(d), \ + struct phy_device, mdio) /* struct phy_driver: Driver structure for a particular PHY type * + * driver_data: static driver data * phy_id: The result of reading the UID registers of this PHY * type, and ANDing them with the phy_id_mask. This driver * only works for PHYs with IDs which match this field @@ -448,7 +443,6 @@ struct phy_device { * by this PHY * flags: A bitfield defining certain other features this PHY * supports (like interrupts) - * driver_data: static driver data * * The drivers must implement config_aneg and read_status. All * other functions are optional. Note that none of these @@ -459,6 +453,7 @@ struct phy_device { * supported in the driver). */ struct phy_driver { + struct mdio_driver_common mdiodrv; u32 phy_id; char *name; unsigned int phy_id_mask; @@ -589,9 +584,14 @@ struct phy_driver { int (*module_eeprom)(struct phy_device *dev, struct ethtool_eeprom *ee, u8 *data); - struct device_driver driver; + /* Get statistics from the phy using ethtool */ + int (*get_sset_count)(struct phy_device *dev); + void (*get_strings)(struct phy_device *dev, u8 *data); + void (*get_stats)(struct phy_device *dev, + struct ethtool_stats *stats, u64 *data); }; -#define to_phy_driver(d) container_of(d, struct phy_driver, driver) +#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \ + struct phy_driver, mdiodrv) #define PHY_ANY_ID "MATCH ANY PHY" #define PHY_ANY_UID 0xffffffff @@ -619,7 +619,7 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) if (!phydev->is_c45) return -EOPNOTSUPP; - return mdiobus_read(phydev->bus, phydev->addr, + return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff)); } @@ -627,14 +627,12 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) * phy_read_mmd_indirect - reads data from the MMD registers * @phydev: The PHY device bus * @prtad: MMD Address - * @devad: MMD DEVAD * @addr: PHY address on the MII bus * * Description: it reads data from the MMD registers (clause 22 to access to * clause 45) of the specified phy address. */ -int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, - int devad, int addr); +int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad); /** * phy_read - Convenience function for reading a given PHY register @@ -647,7 +645,7 @@ int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, */ static inline int phy_read(struct phy_device *phydev, u32 regnum) { - return mdiobus_read(phydev->bus, phydev->addr, regnum); + return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum); } /** @@ -662,7 +660,7 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum) */ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) { - return mdiobus_write(phydev->bus, phydev->addr, regnum, val); + return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val); } /** @@ -725,7 +723,7 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad, regnum = MII_ADDR_C45 | ((devad & 0x1f) << 16) | (regnum & 0xffff); - return mdiobus_write(phydev->bus, phydev->addr, regnum, val); + return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val); } /** @@ -733,14 +731,13 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad, * @phydev: The PHY device * @prtad: MMD Address * @devad: MMD DEVAD - * @addr: PHY address on the MII bus * @data: data to write in the MMD register * * Description: Write data from the MMD registers of the specified * phy address. */ void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, - int devad, int addr, u32 data); + int devad, u32 data); struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, @@ -775,6 +772,20 @@ static inline int phy_read_status(struct phy_device *phydev) return phydev->drv->read_status(phydev); } +#define phydev_err(_phydev, format, args...) \ + dev_err(&_phydev->mdio.dev, format, ##args) + +#define phydev_dbg(_phydev, format, args...) \ + dev_dbg(&_phydev->mdio.dev, format, ##args); + +static inline const char *phydev_name(const struct phy_device *phydev) +{ + return dev_name(&phydev->mdio.dev); +} + +void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) + __printf(2, 3); +void phy_attached_info(struct phy_device *phydev); int genphy_config_init(struct phy_device *phydev); int genphy_setup_forced(struct phy_device *phydev); int genphy_restart_aneg(struct phy_device *phydev); @@ -787,8 +798,9 @@ int genphy_resume(struct phy_device *phydev); int genphy_soft_reset(struct phy_device *phydev); void phy_driver_unregister(struct phy_driver *drv); void phy_drivers_unregister(struct phy_driver *drv, int n); -int phy_driver_register(struct phy_driver *new_driver); -int phy_drivers_register(struct phy_driver *new_driver, int n); +int phy_driver_register(struct phy_driver *new_driver, struct module *owner); +int phy_drivers_register(struct phy_driver *new_driver, int n, + struct module *owner); void phy_state_machine(struct work_struct *work); void phy_change(struct work_struct *work); void phy_mac_interrupt(struct phy_device *phydev, int new_link); @@ -833,7 +845,7 @@ extern struct bus_type mdio_bus_type; #define phy_module_driver(__phy_drivers, __count) \ static int __init phy_module_init(void) \ { \ - return phy_drivers_register(__phy_drivers, __count); \ + return phy_drivers_register(__phy_drivers, __count, THIS_MODULE); \ } \ module_init(phy_module_init); \ static void __exit phy_module_exit(void) \ diff --git a/include/linux/phy/omap_usb.h b/include/linux/phy/omap_usb.h index dc2c541a619b..2e5fb870efa9 100644 --- a/include/linux/phy/omap_usb.h +++ b/include/linux/phy/omap_usb.h @@ -30,6 +30,12 @@ struct usb_dpll_params { u32 mf; }; +enum omap_usb_phy_type { + TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */ + TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */ + TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */ +}; + struct omap_usb { struct usb_phy phy; struct phy_companion *comparator; @@ -40,11 +46,20 @@ struct omap_usb { struct clk *wkupclk; struct clk *optclk; u8 flags; + enum omap_usb_phy_type type; + struct regmap *syscon_phy_power; /* ctrl. reg. acces */ + unsigned int power_reg; /* power reg. index within syscon */ + u32 mask; + u32 power_on; + u32 power_off; }; struct usb_phy_data { const char *label; u8 flags; + u32 mask; + u32 power_on; + u32 power_off; }; /* Driver Flags */ @@ -52,6 +67,14 @@ struct usb_phy_data { #define OMAP_USB2_HAS_SET_VBUS (1 << 1) #define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT (1 << 2) +#define OMAP_DEV_PHY_PD BIT(0) +#define OMAP_USB2_PHY_PD BIT(28) + +#define AM437X_USB2_PHY_PD BIT(0) +#define AM437X_USB2_OTG_PD BIT(1) +#define AM437X_USB2_OTGVDET_EN BIT(19) +#define AM437X_USB2_OTGSESSEND_EN BIT(20) + #define phy_to_omapusb(x) container_of((x), struct omap_usb, phy) #if defined(CONFIG_OMAP_USB2) || defined(CONFIG_OMAP_USB2_MODULE) diff --git a/include/linux/pim.h b/include/linux/pim.h index 252bf6644c51..e1d756f81348 100644 --- a/include/linux/pim.h +++ b/include/linux/pim.h @@ -13,6 +13,11 @@ #define PIM_NULL_REGISTER cpu_to_be32(0x40000000) +static inline bool ipmr_pimsm_enabled(void) +{ + return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2); +} + /* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ struct pimreghdr { diff --git a/include/linux/platform_data/camera-rcar.h b/include/linux/platform_data/camera-rcar.h deleted file mode 100644 index dfc83c581593..000000000000 --- a/include/linux/platform_data/camera-rcar.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Platform data for Renesas R-Car VIN soc-camera driver - * - * Copyright (C) 2011-2013 Renesas Solutions Corp. - * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef __CAMERA_RCAR_H_ -#define __CAMERA_RCAR_H_ - -#define RCAR_VIN_HSYNC_ACTIVE_LOW (1 << 0) -#define RCAR_VIN_VSYNC_ACTIVE_LOW (1 << 1) -#define RCAR_VIN_BT601 (1 << 2) -#define RCAR_VIN_BT656 (1 << 3) - -struct rcar_vin_platform_data { - unsigned int flags; -}; - -#endif /* __CAMERA_RCAR_H_ */ diff --git a/include/linux/platform_data/dma-rcar-hpbdma.h b/include/linux/platform_data/dma-rcar-hpbdma.h deleted file mode 100644 index 648b8ea61a22..000000000000 --- a/include/linux/platform_data/dma-rcar-hpbdma.h +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (C) 2011-2013 Renesas Electronics Corporation - * Copyright (C) 2013 Cogent Embedded, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - */ - -#ifndef __DMA_RCAR_HPBDMA_H -#define __DMA_RCAR_HPBDMA_H - -#include <linux/bitops.h> -#include <linux/types.h> - -/* Transmit sizes and respective register values */ -enum { - XMIT_SZ_8BIT = 0, - XMIT_SZ_16BIT = 1, - XMIT_SZ_32BIT = 2, - XMIT_SZ_MAX -}; - -/* DMA control register (DCR) bits */ -#define HPB_DMAE_DCR_DTAMD (1u << 26) -#define HPB_DMAE_DCR_DTAC (1u << 25) -#define HPB_DMAE_DCR_DTAU (1u << 24) -#define HPB_DMAE_DCR_DTAU1 (1u << 23) -#define HPB_DMAE_DCR_SWMD (1u << 22) -#define HPB_DMAE_DCR_BTMD (1u << 21) -#define HPB_DMAE_DCR_PKMD (1u << 20) -#define HPB_DMAE_DCR_CT (1u << 18) -#define HPB_DMAE_DCR_ACMD (1u << 17) -#define HPB_DMAE_DCR_DIP (1u << 16) -#define HPB_DMAE_DCR_SMDL (1u << 13) -#define HPB_DMAE_DCR_SPDAM (1u << 12) -#define HPB_DMAE_DCR_SDRMD_MASK (3u << 10) -#define HPB_DMAE_DCR_SDRMD_MOD (0u << 10) -#define HPB_DMAE_DCR_SDRMD_AUTO (1u << 10) -#define HPB_DMAE_DCR_SDRMD_TIMER (2u << 10) -#define HPB_DMAE_DCR_SPDS_MASK (3u << 8) -#define HPB_DMAE_DCR_SPDS_8BIT (0u << 8) -#define HPB_DMAE_DCR_SPDS_16BIT (1u << 8) -#define HPB_DMAE_DCR_SPDS_32BIT (2u << 8) -#define HPB_DMAE_DCR_DMDL (1u << 5) -#define HPB_DMAE_DCR_DPDAM (1u << 4) -#define HPB_DMAE_DCR_DDRMD_MASK (3u << 2) -#define HPB_DMAE_DCR_DDRMD_MOD (0u << 2) -#define HPB_DMAE_DCR_DDRMD_AUTO (1u << 2) -#define HPB_DMAE_DCR_DDRMD_TIMER (2u << 2) -#define HPB_DMAE_DCR_DPDS_MASK (3u << 0) -#define HPB_DMAE_DCR_DPDS_8BIT (0u << 0) -#define HPB_DMAE_DCR_DPDS_16BIT (1u << 0) -#define HPB_DMAE_DCR_DPDS_32BIT (2u << 0) - -/* Asynchronous reset register (ASYNCRSTR) bits */ -#define HPB_DMAE_ASYNCRSTR_ASRST41 BIT(10) -#define HPB_DMAE_ASYNCRSTR_ASRST40 BIT(9) -#define HPB_DMAE_ASYNCRSTR_ASRST39 BIT(8) -#define HPB_DMAE_ASYNCRSTR_ASRST27 BIT(7) -#define HPB_DMAE_ASYNCRSTR_ASRST26 BIT(6) -#define HPB_DMAE_ASYNCRSTR_ASRST25 BIT(5) -#define HPB_DMAE_ASYNCRSTR_ASRST24 BIT(4) -#define HPB_DMAE_ASYNCRSTR_ASRST23 BIT(3) -#define HPB_DMAE_ASYNCRSTR_ASRST22 BIT(2) -#define HPB_DMAE_ASYNCRSTR_ASRST21 BIT(1) -#define HPB_DMAE_ASYNCRSTR_ASRST20 BIT(0) - -struct hpb_dmae_slave_config { - unsigned int id; - dma_addr_t addr; - u32 dcr; - u32 port; - u32 rstr; - u32 mdr; - u32 mdm; - u32 flags; -#define HPB_DMAE_SET_ASYNC_RESET BIT(0) -#define HPB_DMAE_SET_ASYNC_MODE BIT(1) - u32 dma_ch; -}; - -#define HPB_DMAE_CHANNEL(_irq, _s_id) \ -{ \ - .ch_irq = _irq, \ - .s_id = _s_id, \ -} - -struct hpb_dmae_channel { - unsigned int ch_irq; - unsigned int s_id; -}; - -struct hpb_dmae_pdata { - const struct hpb_dmae_slave_config *slaves; - int num_slaves; - const struct hpb_dmae_channel *channels; - int num_channels; - const unsigned int ts_shift[XMIT_SZ_MAX]; - int num_hw_channels; -}; - -#endif diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index 4299f4ba03bd..0a533f94438f 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h @@ -53,12 +53,16 @@ enum dma_event_q { #define EDMA_CTLR(i) ((i) >> 16) #define EDMA_CHAN_SLOT(i) ((i) & 0xffff) +#define EDMA_FILTER_PARAM(ctlr, chan) ((int[]) { EDMA_CTLR_CHAN(ctlr, chan) }) + struct edma_rsv_info { const s16 (*rsv_chans)[2]; const s16 (*rsv_slots)[2]; }; +struct dma_slave_map; + /* platform_data for EDMA driver */ struct edma_soc_info { /* @@ -76,6 +80,9 @@ struct edma_soc_info { s8 (*queue_priority_mapping)[2]; const s16 (*xbar_chans)[2]; + + const struct dma_slave_map *slave_map; + int slavecnt; }; #endif diff --git a/include/linux/platform_data/irq-renesas-intc-irqpin.h b/include/linux/platform_data/irq-renesas-intc-irqpin.h deleted file mode 100644 index e4cb911066a6..000000000000 --- a/include/linux/platform_data/irq-renesas-intc-irqpin.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Renesas INTC External IRQ Pin Driver - * - * Copyright (C) 2013 Magnus Damm - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __IRQ_RENESAS_INTC_IRQPIN_H__ -#define __IRQ_RENESAS_INTC_IRQPIN_H__ - -struct renesas_intc_irqpin_config { - unsigned int sense_bitfield_width; - unsigned int irq_base; - bool control_parent; -}; - -#endif /* __IRQ_RENESAS_INTC_IRQPIN_H__ */ diff --git a/include/linux/platform_data/camera-mx2.h b/include/linux/platform_data/media/camera-mx2.h index 7ded6f1f74bc..7ded6f1f74bc 100644 --- a/include/linux/platform_data/camera-mx2.h +++ b/include/linux/platform_data/media/camera-mx2.h diff --git a/include/linux/platform_data/camera-mx3.h b/include/linux/platform_data/media/camera-mx3.h index a910dadc8258..a910dadc8258 100644 --- a/include/linux/platform_data/camera-mx3.h +++ b/include/linux/platform_data/media/camera-mx3.h diff --git a/include/linux/platform_data/camera-pxa.h b/include/linux/platform_data/media/camera-pxa.h index 6709b1cd7c77..6709b1cd7c77 100644 --- a/include/linux/platform_data/camera-pxa.h +++ b/include/linux/platform_data/media/camera-pxa.h diff --git a/include/linux/platform_data/coda.h b/include/linux/platform_data/media/coda.h index 6ad4410d9e20..6ad4410d9e20 100644 --- a/include/linux/platform_data/coda.h +++ b/include/linux/platform_data/media/coda.h diff --git a/include/linux/platform_data/media/gpio-ir-recv.h b/include/linux/platform_data/media/gpio-ir-recv.h new file mode 100644 index 000000000000..0c298f569d5a --- /dev/null +++ b/include/linux/platform_data/media/gpio-ir-recv.h @@ -0,0 +1,23 @@ +/* Copyright (c) 2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __GPIO_IR_RECV_H__ +#define __GPIO_IR_RECV_H__ + +struct gpio_ir_recv_platform_data { + int gpio_nr; + bool active_low; + u64 allowed_protos; + const char *map_name; +}; + +#endif /* __GPIO_IR_RECV_H__ */ diff --git a/include/linux/platform_data/media/ir-rx51.h b/include/linux/platform_data/media/ir-rx51.h new file mode 100644 index 000000000000..104aa892f31b --- /dev/null +++ b/include/linux/platform_data/media/ir-rx51.h @@ -0,0 +1,10 @@ +#ifndef _LIRC_RX51_H +#define _LIRC_RX51_H + +struct lirc_rx51_platform_data { + int pwm_timer; + + int(*set_max_mpu_wakeup_lat)(struct device *dev, long t); +}; + +#endif diff --git a/include/linux/platform_data/media/mmp-camera.h b/include/linux/platform_data/media/mmp-camera.h new file mode 100644 index 000000000000..7611963a257f --- /dev/null +++ b/include/linux/platform_data/media/mmp-camera.h @@ -0,0 +1,9 @@ +/* + * Information for the Marvell Armada MMP camera + */ + +struct mmp_camera_platform_data { + struct platform_device *i2c_device; + int sensor_power_gpio; + int sensor_reset_gpio; +}; diff --git a/include/linux/platform_data/media/omap1_camera.h b/include/linux/platform_data/media/omap1_camera.h new file mode 100644 index 000000000000..819767cf04d4 --- /dev/null +++ b/include/linux/platform_data/media/omap1_camera.h @@ -0,0 +1,35 @@ +/* + * Header for V4L2 SoC Camera driver for OMAP1 Camera Interface + * + * Copyright (C) 2010, Janusz Krzysztofik <jkrzyszt@tis.icnet.pl> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MEDIA_OMAP1_CAMERA_H_ +#define __MEDIA_OMAP1_CAMERA_H_ + +#include <linux/bitops.h> + +#define OMAP1_CAMERA_IOSIZE 0x1c + +enum omap1_cam_vb_mode { + OMAP1_CAM_DMA_CONTIG = 0, + OMAP1_CAM_DMA_SG, +}; + +#define OMAP1_CAMERA_MIN_BUF_COUNT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? 3 : 2) + +struct omap1_cam_platform_data { + unsigned long camexclk_khz; + unsigned long lclk_khz_max; + unsigned long flags; +}; + +#define OMAP1_CAMERA_LCLK_RISING BIT(0) +#define OMAP1_CAMERA_RST_LOW BIT(1) +#define OMAP1_CAMERA_RST_HIGH BIT(2) + +#endif /* __MEDIA_OMAP1_CAMERA_H_ */ diff --git a/include/linux/platform_data/media/omap4iss.h b/include/linux/platform_data/media/omap4iss.h new file mode 100644 index 000000000000..0d7620db5e32 --- /dev/null +++ b/include/linux/platform_data/media/omap4iss.h @@ -0,0 +1,65 @@ +#ifndef ARCH_ARM_PLAT_OMAP4_ISS_H +#define ARCH_ARM_PLAT_OMAP4_ISS_H + +#include <linux/i2c.h> + +struct iss_device; + +enum iss_interface_type { + ISS_INTERFACE_CSI2A_PHY1, + ISS_INTERFACE_CSI2B_PHY2, +}; + +/** + * struct iss_csiphy_lane: CSI2 lane position and polarity + * @pos: position of the lane + * @pol: polarity of the lane + */ +struct iss_csiphy_lane { + u8 pos; + u8 pol; +}; + +#define ISS_CSIPHY1_NUM_DATA_LANES 4 +#define ISS_CSIPHY2_NUM_DATA_LANES 1 + +/** + * struct iss_csiphy_lanes_cfg - CSI2 lane configuration + * @data: Configuration of one or two data lanes + * @clk: Clock lane configuration + */ +struct iss_csiphy_lanes_cfg { + struct iss_csiphy_lane data[ISS_CSIPHY1_NUM_DATA_LANES]; + struct iss_csiphy_lane clk; +}; + +/** + * struct iss_csi2_platform_data - CSI2 interface platform data + * @crc: Enable the cyclic redundancy check + * @vpclk_div: Video port output clock control + */ +struct iss_csi2_platform_data { + unsigned crc:1; + unsigned vpclk_div:2; + struct iss_csiphy_lanes_cfg lanecfg; +}; + +struct iss_subdev_i2c_board_info { + struct i2c_board_info *board_info; + int i2c_adapter_id; +}; + +struct iss_v4l2_subdevs_group { + struct iss_subdev_i2c_board_info *subdevs; + enum iss_interface_type interface; + union { + struct iss_csi2_platform_data csi2; + } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */ +}; + +struct iss_platform_data { + struct iss_v4l2_subdevs_group *subdevs; + void (*set_constraints)(struct iss_device *iss, bool enable); +}; + +#endif diff --git a/include/linux/platform_data/media/s5p_hdmi.h b/include/linux/platform_data/media/s5p_hdmi.h new file mode 100644 index 000000000000..bb9cacb0cbb0 --- /dev/null +++ b/include/linux/platform_data/media/s5p_hdmi.h @@ -0,0 +1,36 @@ +/* + * Driver header for S5P HDMI chip. + * + * Copyright (c) 2011 Samsung Electronics, Co. Ltd + * Contact: Tomasz Stanislawski <t.stanislaws@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef S5P_HDMI_H +#define S5P_HDMI_H + +struct i2c_board_info; + +/** + * @hdmiphy_bus: controller id for HDMIPHY bus + * @hdmiphy_info: template for HDMIPHY I2C device + * @mhl_bus: controller id for MHL control bus + * @mhl_info: template for MHL I2C device + * @hpd_gpio: GPIO for Hot-Plug-Detect pin + * + * NULL pointer for *_info fields indicates that + * the corresponding chip is not present + */ +struct s5p_hdmi_platform_data { + int hdmiphy_bus; + struct i2c_board_info *hdmiphy_info; + int mhl_bus; + struct i2c_board_info *mhl_info; + int hpd_gpio; +}; + +#endif /* S5P_HDMI_H */ diff --git a/include/linux/platform_data/media/si4713.h b/include/linux/platform_data/media/si4713.h new file mode 100644 index 000000000000..932668ad54f7 --- /dev/null +++ b/include/linux/platform_data/media/si4713.h @@ -0,0 +1,48 @@ +/* + * include/linux/platform_data/media/si4713.h + * + * Board related data definitions for Si4713 i2c device driver. + * + * Copyright (c) 2009 Nokia Corporation + * Contact: Eduardo Valentin <eduardo.valentin@nokia.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + * + */ + +#ifndef SI4713_H +#define SI4713_H + +/* The SI4713 I2C sensor chip has a fixed slave address of 0xc6 or 0x22. */ +#define SI4713_I2C_ADDR_BUSEN_HIGH 0x63 +#define SI4713_I2C_ADDR_BUSEN_LOW 0x11 + +/* + * Platform dependent definition + */ +struct si4713_platform_data { + bool is_platform_device; +}; + +/* + * Structure to query for Received Noise Level (RNL). + */ +struct si4713_rnl { + __u32 index; /* modulator index */ + __u32 frequency; /* frequency to peform rnl measurement */ + __s32 rnl; /* result of measurement in dBuV */ + __u32 reserved[4]; /* drivers and apps must init this to 0 */ +}; + +/* + * This is the ioctl number to query for rnl. Users must pass a + * struct si4713_rnl pointer specifying desired frequency in 'frequency' field + * following driver capabilities (i.e V4L2_TUNER_CAP_LOW). + * Driver must return measured value in the same struture, filling 'rnl' field. + */ +#define SI4713_IOC_MEASURE_RNL _IOWR('V', BASE_VIDIOC_PRIVATE + 0, \ + struct si4713_rnl) + +#endif /* ifndef SI4713_H*/ diff --git a/include/linux/platform_data/media/sii9234.h b/include/linux/platform_data/media/sii9234.h new file mode 100644 index 000000000000..6a4a809fe9a3 --- /dev/null +++ b/include/linux/platform_data/media/sii9234.h @@ -0,0 +1,24 @@ +/* + * Driver header for SII9234 MHL converter chip. + * + * Copyright (c) 2011 Samsung Electronics, Co. Ltd + * Contact: Tomasz Stanislawski <t.stanislaws@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef SII9234_H +#define SII9234_H + +/** + * @gpio_n_reset: GPIO driving nRESET pin + */ + +struct sii9234_platform_data { + int gpio_n_reset; +}; + +#endif /* SII9234_H */ diff --git a/include/linux/platform_data/media/soc_camera_platform.h b/include/linux/platform_data/media/soc_camera_platform.h new file mode 100644 index 000000000000..1e5065dab430 --- /dev/null +++ b/include/linux/platform_data/media/soc_camera_platform.h @@ -0,0 +1,83 @@ +/* + * Generic Platform Camera Driver Header + * + * Copyright (C) 2008 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __SOC_CAMERA_H__ +#define __SOC_CAMERA_H__ + +#include <linux/videodev2.h> +#include <media/soc_camera.h> +#include <media/v4l2-mediabus.h> + +struct device; + +struct soc_camera_platform_info { + const char *format_name; + unsigned long format_depth; + struct v4l2_mbus_framefmt format; + unsigned long mbus_param; + enum v4l2_mbus_type mbus_type; + struct soc_camera_device *icd; + int (*set_capture)(struct soc_camera_platform_info *info, int enable); +}; + +static inline void soc_camera_platform_release(struct platform_device **pdev) +{ + *pdev = NULL; +} + +static inline int soc_camera_platform_add(struct soc_camera_device *icd, + struct platform_device **pdev, + struct soc_camera_link *plink, + void (*release)(struct device *dev), + int id) +{ + struct soc_camera_subdev_desc *ssdd = + (struct soc_camera_subdev_desc *)plink; + struct soc_camera_platform_info *info = ssdd->drv_priv; + int ret; + + if (&icd->sdesc->subdev_desc != ssdd) + return -ENODEV; + + if (*pdev) + return -EBUSY; + + *pdev = platform_device_alloc("soc_camera_platform", id); + if (!*pdev) + return -ENOMEM; + + info->icd = icd; + + (*pdev)->dev.platform_data = info; + (*pdev)->dev.release = release; + + ret = platform_device_add(*pdev); + if (ret < 0) { + platform_device_put(*pdev); + *pdev = NULL; + info->icd = NULL; + } + + return ret; +} + +static inline void soc_camera_platform_del(const struct soc_camera_device *icd, + struct platform_device *pdev, + const struct soc_camera_link *plink) +{ + const struct soc_camera_subdev_desc *ssdd = + (const struct soc_camera_subdev_desc *)plink; + if (&icd->sdesc->subdev_desc != ssdd || !pdev) + return; + + platform_device_unregister(pdev); +} + +#endif /* __SOC_CAMERA_H__ */ diff --git a/include/linux/platform_data/media/timb_radio.h b/include/linux/platform_data/media/timb_radio.h new file mode 100644 index 000000000000..a40a6a348d21 --- /dev/null +++ b/include/linux/platform_data/media/timb_radio.h @@ -0,0 +1,30 @@ +/* + * timb_radio.h Platform struct for the Timberdale radio driver + * Copyright (c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _TIMB_RADIO_ +#define _TIMB_RADIO_ 1 + +#include <linux/i2c.h> + +struct timb_radio_platform_data { + int i2c_adapter; /* I2C adapter where the tuner and dsp are attached */ + struct i2c_board_info *tuner; + struct i2c_board_info *dsp; +}; + +#endif diff --git a/include/linux/platform_data/media/timb_video.h b/include/linux/platform_data/media/timb_video.h new file mode 100644 index 000000000000..70ae43970a49 --- /dev/null +++ b/include/linux/platform_data/media/timb_video.h @@ -0,0 +1,33 @@ +/* + * timb_video.h Platform struct for the Timberdale video driver + * Copyright (c) 2009-2010 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _TIMB_VIDEO_ +#define _TIMB_VIDEO_ 1 + +#include <linux/i2c.h> + +struct timb_video_platform_data { + int dma_channel; + int i2c_adapter; /* The I2C adapter where the encoder is attached */ + struct { + const char *module_name; + struct i2c_board_info *info; + } encoder; +}; + +#endif diff --git a/include/linux/platform_data/microread.h b/include/linux/platform_data/microread.h index cfda59b226ee..ca13992089b8 100644 --- a/include/linux/platform_data/microread.h +++ b/include/linux/platform_data/microread.h @@ -1,5 +1,5 @@ /* - * Driver include for the PN544 NFC chip. + * Driver include for the Inside Secure microread NFC Chip. * * Copyright (C) 2011 Tieto Poland * Copyright (C) 2012 Intel Corporation. All rights reserved. diff --git a/include/linux/platform_data/mmc-mvsdio.h b/include/linux/platform_data/mmc-mvsdio.h deleted file mode 100644 index d02704cd3695..000000000000 --- a/include/linux/platform_data/mmc-mvsdio.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#ifndef __MMC_MVSDIO_H -#define __MMC_MVSDIO_H - -#include <linux/mbus.h> - -struct mvsdio_platform_data { - unsigned int clock; - int gpio_card_detect; - int gpio_write_protect; -}; - -#endif diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index d3889b98a1a1..fb5625bcca9a 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h @@ -40,6 +40,8 @@ struct s3c64xx_spi_info { int num_cs; int (*cfg_gpio)(void); dma_filter_fn filter; + void *dma_tx; + void *dma_rx; }; /** diff --git a/include/linux/platform_data/usb-rcar-phy.h b/include/linux/platform_data/usb-rcar-phy.h deleted file mode 100644 index 8ec6964a32a5..000000000000 --- a/include/linux/platform_data/usb-rcar-phy.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (C) 2013 Renesas Solutions Corp. - * Copyright (C) 2013 Cogent Embedded, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __USB_RCAR_PHY_H -#define __USB_RCAR_PHY_H - -#include <linux/types.h> - -struct rcar_phy_platform_data { - bool ferrite_bead:1; /* (R8A7778 only) */ - - bool port1_func:1; /* true: port 1 used by function, false: host */ - unsigned penc1:1; /* Output of the PENC1 pin in function mode */ - struct { /* Overcurrent pin control for ports 0..2 */ - bool select_3_3v:1; /* true: USB_OVCn pin, false: OVCn pin */ - /* Set to false on port 1 in function mode */ - bool active_high:1; /* true: active high, false: active low */ - /* Set to true on port 1 in function mode */ - } ovc_pin[3]; /* (R8A7778 only has 2 ports) */ -}; - -#endif /* __USB_RCAR_PHY_H */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index dc777be5f2e1..03b755521fd9 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -18,6 +18,7 @@ #define PLATFORM_DEVID_AUTO (-2) struct mfd_cell; +struct property_set; struct platform_device { const char *name; @@ -51,6 +52,7 @@ extern void arch_setup_pdev_archdata(struct platform_device *); extern struct resource *platform_get_resource(struct platform_device *, unsigned int, unsigned int); extern int platform_get_irq(struct platform_device *, unsigned int); +extern int platform_irq_count(struct platform_device *); extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, const char *); @@ -70,6 +72,8 @@ struct platform_device_info { const void *data; size_t size_data; u64 dma_mask; + + const struct property_set *pset; }; extern struct platform_device *platform_device_register_full( const struct platform_device_info *pdevinfo); @@ -167,6 +171,8 @@ extern int platform_device_add_resources(struct platform_device *pdev, unsigned int num); extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); +extern int platform_device_add_properties(struct platform_device *pdev, + const struct property_set *pset); extern int platform_device_add(struct platform_device *pdev); extern void platform_device_del(struct platform_device *pdev); extern void platform_device_put(struct platform_device *pdev); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 9a2e50337af9..95403d2ccaf5 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -55,6 +55,11 @@ int dev_pm_opp_enable(struct device *dev, unsigned long freq); int dev_pm_opp_disable(struct device *dev, unsigned long freq); struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev); +int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, + unsigned int count); +void dev_pm_opp_put_supported_hw(struct device *dev); +int dev_pm_opp_set_prop_name(struct device *dev, const char *name); +void dev_pm_opp_put_prop_name(struct device *dev); #else static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) { @@ -129,6 +134,23 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( { return ERR_PTR(-EINVAL); } + +static inline int dev_pm_opp_set_supported_hw(struct device *dev, + const u32 *versions, + unsigned int count) +{ + return -EINVAL; +} + +static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} + +static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) +{ + return -EINVAL; +} + +static inline void dev_pm_opp_put_prop_name(struct device *dev) {} + #endif /* CONFIG_PM_OPP */ #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 3bdbb4189780..7af093d6a4dd 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -39,6 +39,7 @@ extern int pm_runtime_force_resume(struct device *dev); extern int __pm_runtime_idle(struct device *dev, int rpmflags); extern int __pm_runtime_suspend(struct device *dev, int rpmflags); extern int __pm_runtime_resume(struct device *dev, int rpmflags); +extern int pm_runtime_get_if_in_use(struct device *dev); extern int pm_schedule_suspend(struct device *dev, unsigned int delay); extern int __pm_runtime_set_status(struct device *dev, unsigned int status); extern int pm_runtime_barrier(struct device *dev); @@ -143,6 +144,10 @@ static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) { return -ENOSYS; } +static inline int pm_runtime_get_if_in_use(struct device *dev) +{ + return -EINVAL; +} static inline int __pm_runtime_set_status(struct device *dev, unsigned int status) { return 0; } static inline int pm_runtime_barrier(struct device *dev) { return 0; } diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h index 6f14ee295822..e5e8ec40278d 100644 --- a/include/linux/posix_acl_xattr.h +++ b/include/linux/posix_acl_xattr.h @@ -9,16 +9,12 @@ #ifndef _POSIX_ACL_XATTR_H #define _POSIX_ACL_XATTR_H +#include <uapi/linux/xattr.h> #include <linux/posix_acl.h> -/* Extended attribute names */ -#define POSIX_ACL_XATTR_ACCESS "system.posix_acl_access" -#define POSIX_ACL_XATTR_DEFAULT "system.posix_acl_default" - /* Supported ACL a_version fields */ #define POSIX_ACL_XATTR_VERSION 0x0002 - /* An undefined entry e_id value */ #define ACL_UNDEFINED_ID (-1) diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 45f6a7b5b3cb..998d8f1c3c91 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -1,6 +1,16 @@ #ifndef __LINUX_BQ27X00_BATTERY_H__ #define __LINUX_BQ27X00_BATTERY_H__ +enum bq27xxx_chip { + BQ27000 = 1, /* bq27000, bq27200 */ + BQ27010, /* bq27010, bq27210 */ + BQ27500, /* bq27500, bq27510, bq27520 */ + BQ27530, /* bq27530, bq27531 */ + BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ + BQ27545, /* bq27545 */ + BQ27421, /* bq27421, bq27425, bq27441, bq27621 */ +}; + /** * struct bq27xxx_plaform_data - Platform data for bq27xxx devices * @name: Name of the battery. @@ -12,20 +22,47 @@ * register to be read. The return value should either be the content of * the passed register or an error value. */ -enum bq27xxx_chip { - BQ27000 = 1, /* bq27000, bq27200 */ - BQ27010, /* bq27010, bq27210 */ - BQ27500, /* bq27500, bq27510, bq27520 */ - BQ27530, /* bq27530, bq27531 */ - BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ - BQ27545, /* bq27545 */ - BQ27421, /* bq27421, bq27425, bq27441, bq27621 */ -}; - struct bq27xxx_platform_data { const char *name; enum bq27xxx_chip chip; int (*read)(struct device *dev, unsigned int); }; +struct bq27xxx_device_info; +struct bq27xxx_access_methods { + int (*read)(struct bq27xxx_device_info *di, u8 reg, bool single); +}; + +struct bq27xxx_reg_cache { + int temperature; + int time_to_empty; + int time_to_empty_avg; + int time_to_full; + int charge_full; + int cycle_count; + int capacity; + int energy; + int flags; + int power_avg; + int health; +}; + +struct bq27xxx_device_info { + struct device *dev; + enum bq27xxx_chip chip; + const char *name; + struct bq27xxx_access_methods bus; + struct bq27xxx_reg_cache cache; + int charge_design_full; + unsigned long last_update; + struct delayed_work work; + struct power_supply *bat; + struct mutex lock; + u8 *regs; +}; + +void bq27xxx_battery_update(struct bq27xxx_device_info *di); +int bq27xxx_battery_setup(struct bq27xxx_device_info *di); +void bq27xxx_battery_teardown(struct bq27xxx_device_info *di); + #endif diff --git a/include/linux/powercap.h b/include/linux/powercap.h index 4e250417ee30..f0a4e6257dcc 100644 --- a/include/linux/powercap.h +++ b/include/linux/powercap.h @@ -208,7 +208,7 @@ struct powercap_zone_constraint_ops { struct powercap_zone_constraint { int id; struct powercap_zone *power_zone; - struct powercap_zone_constraint_ops *ops; + const struct powercap_zone_constraint_ops *ops; }; @@ -309,7 +309,7 @@ struct powercap_zone *powercap_register_zone( struct powercap_zone *parent, const struct powercap_zone_ops *ops, int nr_constraints, - struct powercap_zone_constraint_ops *const_ops); + const struct powercap_zone_constraint_ops *const_ops); /** * powercap_unregister_zone() - Unregister a zone device diff --git a/include/linux/property.h b/include/linux/property.h index 0a3705a7c9f2..b51fcd36d892 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -73,8 +73,8 @@ int fwnode_property_match_string(struct fwnode_handle *fwnode, struct fwnode_handle *device_get_next_child_node(struct device *dev, struct fwnode_handle *child); -#define device_for_each_child_node(dev, child) \ - for (child = device_get_next_child_node(dev, NULL); child; \ +#define device_for_each_child_node(dev, child) \ + for (child = device_get_next_child_node(dev, NULL); child; \ child = device_get_next_child_node(dev, child)) void fwnode_handle_put(struct fwnode_handle *fwnode); @@ -144,24 +144,100 @@ static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode, /** * struct property_entry - "Built-in" device property representation. * @name: Name of the property. - * @type: Type of the property. - * @nval: Number of items of type @type making up the value. - * @value: Value of the property (an array of @nval items of type @type). + * @length: Length of data making up the value. + * @is_array: True when the property is an array. + * @is_string: True when property is a string. + * @pointer: Pointer to the property (an array of items of the given type). + * @value: Value of the property (when it is a single item of the given type). */ struct property_entry { const char *name; - enum dev_prop_type type; - size_t nval; + size_t length; + bool is_array; + bool is_string; union { - void *raw_data; - u8 *u8_data; - u16 *u16_data; - u32 *u32_data; - u64 *u64_data; - const char **str; - } value; + union { + void *raw_data; + u8 *u8_data; + u16 *u16_data; + u32 *u32_data; + u64 *u64_data; + const char **str; + } pointer; + union { + unsigned long long raw_data; + u8 u8_data; + u16 u16_data; + u32 u32_data; + u64 u64_data; + const char *str; + } value; + }; }; +/* + * Note: the below four initializers for the anonymous union are carefully + * crafted to avoid gcc-4.4.4's problems with initialization of anon unions + * and structs. + */ + +#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \ +{ \ + .name = _name_, \ + .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ + .is_array = true, \ + .is_string = false, \ + { .pointer = { _type_##_data = _val_ } }, \ +} + +#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u8, _val_) +#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u16, _val_) +#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u32, _val_) +#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_) + +#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ +{ \ + .name = _name_, \ + .length = ARRAY_SIZE(_val_) * sizeof(const char *), \ + .is_array = true, \ + .is_string = true, \ + { .pointer = { .str = _val_ } }, \ +} + +#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \ +{ \ + .name = _name_, \ + .length = sizeof(_type_), \ + .is_string = false, \ + { .value = { ._type_##_data = _val_ } }, \ +} + +#define PROPERTY_ENTRY_U8(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u8, _val_) +#define PROPERTY_ENTRY_U16(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u16, _val_) +#define PROPERTY_ENTRY_U32(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u32, _val_) +#define PROPERTY_ENTRY_U64(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u64, _val_) + +#define PROPERTY_ENTRY_STRING(_name_, _val_) \ +{ \ + .name = _name_, \ + .length = sizeof(_val_), \ + .is_string = true, \ + { .value = { .str = _val_ } }, \ +} + +#define PROPERTY_ENTRY_BOOL(_name_) \ +{ \ + .name = _name_, \ +} + /** * struct property_set - Collection of "built-in" device properties. * @fwnode: Handle to be pointed to by the fwnode field of struct device. @@ -172,7 +248,8 @@ struct property_set { struct property_entry *properties; }; -void device_add_property_set(struct device *dev, struct property_set *pset); +int device_add_property_set(struct device *dev, const struct property_set *pset); +void device_remove_property_set(struct device *dev); bool device_dma_supported(struct device *dev); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index dc9a1353f971..d4a32e878180 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -25,6 +25,12 @@ #include <linux/qed/common_hsi.h> #include <linux/qed/qed_chain.h> +enum qed_led_mode { + QED_LED_MODE_OFF, + QED_LED_MODE_ON, + QED_LED_MODE_RESTORE +}; + #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ (void __iomem *)(reg_addr)) @@ -252,6 +258,17 @@ struct qed_common_ops { void (*chain_free)(struct qed_dev *cdev, struct qed_chain *p_chain); + +/** + * @brief set_led - Configure LED mode + * + * @param cdev + * @param mode - LED mode + * + * @return 0 on success, error otherwise. + */ + int (*set_led)(struct qed_dev *cdev, + enum qed_led_mode mode); }; /** diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 5ed540986019..14ec1652daf4 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -179,32 +179,31 @@ static inline void list_replace_rcu(struct list_head *old, } /** - * list_splice_init_rcu - splice an RCU-protected list into an existing list. + * __list_splice_init_rcu - join an RCU-protected list into an existing list. * @list: the RCU-protected list to splice - * @head: the place in the list to splice the first list into + * @prev: points to the last element of the existing list + * @next: points to the first element of the existing list * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... * - * @head can be RCU-read traversed concurrently with this function. + * The list pointed to by @prev and @next can be RCU-read traversed + * concurrently with this function. * * Note that this function blocks. * - * Important note: the caller must take whatever action is necessary to - * prevent any other updates to @head. In principle, it is possible - * to modify the list as soon as sync() begins execution. - * If this sort of thing becomes necessary, an alternative version - * based on call_rcu() could be created. But only if -really- - * needed -- there is no shortage of RCU API members. + * Important note: the caller must take whatever action is necessary to prevent + * any other updates to the existing list. In principle, it is possible to + * modify the list as soon as sync() begins execution. If this sort of thing + * becomes necessary, an alternative version based on call_rcu() could be + * created. But only if -really- needed -- there is no shortage of RCU API + * members. */ -static inline void list_splice_init_rcu(struct list_head *list, - struct list_head *head, - void (*sync)(void)) +static inline void __list_splice_init_rcu(struct list_head *list, + struct list_head *prev, + struct list_head *next, + void (*sync)(void)) { struct list_head *first = list->next; struct list_head *last = list->prev; - struct list_head *at = head->next; - - if (list_empty(list)) - return; /* * "first" and "last" tracking list, so initialize it. RCU readers @@ -231,10 +230,40 @@ static inline void list_splice_init_rcu(struct list_head *list, * this function. */ - last->next = at; - rcu_assign_pointer(list_next_rcu(head), first); - first->prev = head; - at->prev = last; + last->next = next; + rcu_assign_pointer(list_next_rcu(prev), first); + first->prev = prev; + next->prev = last; +} + +/** + * list_splice_init_rcu - splice an RCU-protected list into an existing list, + * designed for stacks. + * @list: the RCU-protected list to splice + * @head: the place in the existing list to splice the first list into + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... + */ +static inline void list_splice_init_rcu(struct list_head *list, + struct list_head *head, + void (*sync)(void)) +{ + if (!list_empty(list)) + __list_splice_init_rcu(list, head, head->next, sync); +} + +/** + * list_splice_tail_init_rcu - splice an RCU-protected list into an existing + * list, designed for queues. + * @list: the RCU-protected list to splice + * @head: the place in the existing list to splice the first list into + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... + */ +static inline void list_splice_tail_init_rcu(struct list_head *list, + struct list_head *head, + void (*sync)(void)) +{ + if (!list_empty(list)) + __list_splice_init_rcu(list, head->prev, head, sync); } /** @@ -305,6 +334,42 @@ static inline void list_splice_init_rcu(struct list_head *list, pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** + * list_entry_lockless - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu(), but requires some implicit RCU + * read-side guarding. One example is running within a special + * exception-time environment where preemption is disabled and where + * lockdep cannot be invoked (in which case updaters must use RCU-sched, + * as in synchronize_sched(), call_rcu_sched(), and friends). Another + * example is when items are added to the list, but never deleted. + */ +#define list_entry_lockless(ptr, type, member) \ + container_of((typeof(ptr))lockless_dereference(ptr), type, member) + +/** + * list_for_each_entry_lockless - iterate over rcu list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu(), but requires some implicit RCU + * read-side guarding. One example is running within a special + * exception-time environment where preemption is disabled and where + * lockdep cannot be invoked (in which case updaters must use RCU-sched, + * as in synchronize_sched(), call_rcu_sched(), and friends). Another + * example is when items are added to the list, but never deleted. + */ +#define list_for_each_entry_lockless(pos, head, member) \ + for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) + +/** * list_for_each_entry_continue_rcu - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index a0189ba67fde..14e6f47ee16f 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -48,10 +48,17 @@ #include <asm/barrier.h> +#ifndef CONFIG_TINY_RCU extern int rcu_expedited; /* for sysctl */ +extern int rcu_normal; /* also for sysctl */ +#endif /* #ifndef CONFIG_TINY_RCU */ #ifdef CONFIG_TINY_RCU /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ +static inline bool rcu_gp_is_normal(void) /* Internal RCU use. */ +{ + return true; +} static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */ { return false; @@ -65,6 +72,7 @@ static inline void rcu_unexpedite_gp(void) { } #else /* #ifdef CONFIG_TINY_RCU */ +bool rcu_gp_is_normal(void); /* Internal RCU use. */ bool rcu_gp_is_expedited(void); /* Internal RCU use. */ void rcu_expedite_gp(void); void rcu_unexpedite_gp(void); @@ -321,7 +329,6 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); -void rcu_end_inkernel_boot(void); void rcu_sched_qs(void); void rcu_bh_qs(void); void rcu_check_callbacks(int user); @@ -329,6 +336,12 @@ struct notifier_block; int rcu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu); +#ifndef CONFIG_TINY_RCU +void rcu_end_inkernel_boot(void); +#else /* #ifndef CONFIG_TINY_RCU */ +static inline void rcu_end_inkernel_boot(void) { } +#endif /* #ifndef CONFIG_TINY_RCU */ + #ifdef CONFIG_RCU_STALL_COMMON void rcu_sysrq_start(void); void rcu_sysrq_end(void); @@ -379,9 +392,9 @@ static inline void rcu_init_nohz(void) */ #define RCU_NONIDLE(a) \ do { \ - rcu_irq_enter(); \ + rcu_irq_enter_irqson(); \ do { a; } while (0); \ - rcu_irq_exit(); \ + rcu_irq_exit_irqson(); \ } while (0) /* @@ -741,7 +754,7 @@ static inline void rcu_preempt_sleep_check(void) * The tracing infrastructure traces RCU (we want that), but unfortunately * some of the RCU checks causes tracing to lock up the system. * - * The tracing version of rcu_dereference_raw() must not call + * The no-tracing version of rcu_dereference_raw() must not call * rcu_read_lock_held(). */ #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 4c1aaf9cce7b..64809aea661c 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -181,6 +181,14 @@ static inline void rcu_irq_enter(void) { } +static inline void rcu_irq_exit_irqson(void) +{ +} + +static inline void rcu_irq_enter_irqson(void) +{ +} + static inline void rcu_irq_exit(void) { } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 60d15a080d7c..ad1eda9fa4da 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -37,7 +37,7 @@ void rcu_cpu_stall_reset(void); /* * Note a virtualization-based context switch. This is simply a * wrapper around rcu_note_context_switch(), which allows TINY_RCU - * to save a few bytes. + * to save a few bytes. The caller must have disabled interrupts. */ static inline void rcu_virt_note_context_switch(int cpu) { @@ -97,6 +97,8 @@ void rcu_idle_enter(void); void rcu_idle_exit(void); void rcu_irq_enter(void); void rcu_irq_exit(void); +void rcu_irq_enter_irqson(void); +void rcu_irq_exit_irqson(void); void exit_rcu(void); diff --git a/include/linux/regmap.h b/include/linux/regmap.h index d68bb402120e..18394343f489 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -788,10 +788,16 @@ int regmap_fields_update_bits(struct regmap_field *field, unsigned int id, * * @reg_offset: Offset of the status/mask register within the bank * @mask: Mask used to flag/control the register. + * @type_reg_offset: Offset register for the irq type setting. + * @type_rising_mask: Mask bit to configure RISING type irq. + * @type_falling_mask: Mask bit to configure FALLING type irq. */ struct regmap_irq { unsigned int reg_offset; unsigned int mask; + unsigned int type_reg_offset; + unsigned int type_rising_mask; + unsigned int type_falling_mask; }; #define REGMAP_IRQ_REG(_irq, _off, _mask) \ @@ -811,18 +817,23 @@ struct regmap_irq { * @ack_base: Base ack address. If zero then the chip is clear on read. * Using zero value is possible with @use_ack bit. * @wake_base: Base address for wake enables. If zero unsupported. + * @type_base: Base address for irq type. If zero unsupported. * @irq_reg_stride: Stride to use for chips where registers are not contiguous. * @init_ack_masked: Ack all masked interrupts once during initalization. * @mask_invert: Inverted mask register: cleared bits are masked out. * @use_ack: Use @ack register even if it is zero. * @ack_invert: Inverted ack register: cleared bits for ack. * @wake_invert: Inverted wake register: cleared bits are wake enabled. + * @type_invert: Invert the type flags. * @runtime_pm: Hold a runtime PM lock on the device when accessing it. * * @num_regs: Number of registers in each control bank. * @irqs: Descriptors for individual IRQs. Interrupt numbers are * assigned based on the index in the array of the interrupt. * @num_irqs: Number of descriptors. + * @num_type_reg: Number of type registers. + * @type_reg_stride: Stride to use for chips where type registers are not + * contiguous. */ struct regmap_irq_chip { const char *name; @@ -832,6 +843,7 @@ struct regmap_irq_chip { unsigned int unmask_base; unsigned int ack_base; unsigned int wake_base; + unsigned int type_base; unsigned int irq_reg_stride; bool init_ack_masked:1; bool mask_invert:1; @@ -839,11 +851,15 @@ struct regmap_irq_chip { bool ack_invert:1; bool wake_invert:1; bool runtime_pm:1; + bool type_invert:1; int num_regs; const struct regmap_irq *irqs; int num_irqs; + + int num_type_reg; + unsigned int type_reg_stride; }; struct regmap_irq_chip_data; @@ -1021,7 +1037,7 @@ static inline void regmap_async_complete(struct regmap *map) } static inline int regmap_register_patch(struct regmap *map, - const struct reg_default *regs, + const struct reg_sequence *regs, int num_regs) { WARN_ONCE(1, "regmap API is disabled"); diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 9e0e76992be0..48603506f8de 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -140,6 +140,8 @@ struct regulator; * * @supply: The name of the supply. Initialised by the user before * using the bulk regulator APIs. + * @optional: The supply should be considered optional. Initialised by the user + * before using the bulk regulator APIs. * @consumer: The regulator consumer for the supply. This will be managed * by the bulk API. * @@ -149,6 +151,7 @@ struct regulator; */ struct regulator_bulk_data { const char *supply; + bool optional; struct regulator *consumer; /* private: Internal use */ diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 9c2903e58adb..16ac9e108806 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -302,6 +302,8 @@ struct regulator_desc { unsigned int vsel_reg; unsigned int vsel_mask; + unsigned int csel_reg; + unsigned int csel_mask; unsigned int apply_reg; unsigned int apply_bit; unsigned int enable_reg; diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index e50b31d18462..63bd7601b6de 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -823,4 +823,86 @@ out: return err; } +/* Internal function, please use rhashtable_replace_fast() instead */ +static inline int __rhashtable_replace_fast( + struct rhashtable *ht, struct bucket_table *tbl, + struct rhash_head *obj_old, struct rhash_head *obj_new, + const struct rhashtable_params params) +{ + struct rhash_head __rcu **pprev; + struct rhash_head *he; + spinlock_t *lock; + unsigned int hash; + int err = -ENOENT; + + /* Minimally, the old and new objects must have same hash + * (which should mean identifiers are the same). + */ + hash = rht_head_hashfn(ht, tbl, obj_old, params); + if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) + return -EINVAL; + + lock = rht_bucket_lock(tbl, hash); + + spin_lock_bh(lock); + + pprev = &tbl->buckets[hash]; + rht_for_each(he, tbl, hash) { + if (he != obj_old) { + pprev = &he->next; + continue; + } + + rcu_assign_pointer(obj_new->next, obj_old->next); + rcu_assign_pointer(*pprev, obj_new); + err = 0; + break; + } + + spin_unlock_bh(lock); + + return err; +} + +/** + * rhashtable_replace_fast - replace an object in hash table + * @ht: hash table + * @obj_old: pointer to hash head inside object being replaced + * @obj_new: pointer to hash head inside object which is new + * @params: hash table parameters + * + * Replacing an object doesn't affect the number of elements in the hash table + * or bucket, so we don't need to worry about shrinking or expanding the + * table here. + * + * Returns zero on success, -ENOENT if the entry could not be found, + * -EINVAL if hash is not the same for the old and new objects. + */ +static inline int rhashtable_replace_fast( + struct rhashtable *ht, struct rhash_head *obj_old, + struct rhash_head *obj_new, + const struct rhashtable_params params) +{ + struct bucket_table *tbl; + int err; + + rcu_read_lock(); + + tbl = rht_dereference_rcu(ht->tbl, ht); + + /* Because we have already taken (and released) the bucket + * lock in old_tbl, if we find that future_tbl is not yet + * visible then that guarantees the entry to still be in + * the old tbl if it exists. + */ + while ((err = __rhashtable_replace_fast(ht, tbl, obj_old, + obj_new, params)) && + (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) + ; + + rcu_read_unlock(); + + return err; +} + #endif /* _LINUX_RHASHTABLE_H */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 4be5048b1fbe..c006cc900c44 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -84,6 +84,11 @@ void net_inc_ingress_queue(void); void net_dec_ingress_queue(void); #endif +#ifdef CONFIG_NET_EGRESS +void net_inc_egress_queue(void); +void net_dec_egress_queue(void); +#endif + extern void rtnetlink_init(void); extern void __rtnl_unlock(void); diff --git a/include/linux/sched.h b/include/linux/sched.h index edad7a43edea..61aa9bbea871 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -177,9 +177,9 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); extern void calc_global_load(unsigned long ticks); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) -extern void update_cpu_load_nohz(void); +extern void update_cpu_load_nohz(int active); #else -static inline void update_cpu_load_nohz(void) { } +static inline void update_cpu_load_nohz(int active) { } #endif extern unsigned long get_parent_ip(unsigned long addr); @@ -377,6 +377,7 @@ extern void scheduler_tick(void); extern void sched_show_task(struct task_struct *p); #ifdef CONFIG_LOCKUP_DETECTOR +extern void touch_softlockup_watchdog_sched(void); extern void touch_softlockup_watchdog(void); extern void touch_softlockup_watchdog_sync(void); extern void touch_all_softlockup_watchdogs(void); @@ -387,6 +388,9 @@ extern unsigned int softlockup_panic; extern unsigned int hardlockup_panic; void lockup_detector_init(void); #else +static inline void touch_softlockup_watchdog_sched(void) +{ +} static inline void touch_softlockup_watchdog(void) { } @@ -830,6 +834,7 @@ struct user_struct { unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ #endif unsigned long locked_shm; /* How many pages of mlocked shm ? */ + unsigned long unix_inflight; /* How many files in flight in unix sockets */ #ifdef CONFIG_KEYS struct key *uid_keyring; /* UID specific keyring */ @@ -1268,8 +1273,13 @@ struct sched_entity { #endif #ifdef CONFIG_SMP - /* Per entity load average tracking */ - struct sched_avg avg; + /* + * Per entity load average tracking. + * + * Put into separate cache line so it does not + * collide with read-mostly values above. + */ + struct sched_avg avg ____cacheline_aligned_in_smp; #endif }; @@ -1455,14 +1465,15 @@ struct task_struct { /* Used for emulating ABI behavior of previous Linux versions */ unsigned int personality; - unsigned in_execve:1; /* Tell the LSMs that the process is doing an - * execve */ - unsigned in_iowait:1; - - /* Revert to default priority/policy when forking */ + /* scheduler bits, serialized by scheduler locks */ unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; + unsigned :0; /* force alignment to the next boundary */ + + /* unserialized, strictly 'current' */ + unsigned in_execve:1; /* bit to tell LSMs we're in execve */ + unsigned in_iowait:1; #ifdef CONFIG_MEMCG unsigned memcg_may_oom:1; #endif @@ -1519,11 +1530,14 @@ struct task_struct { cputime_t gtime; struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - seqlock_t vtime_seqlock; + seqcount_t vtime_seqcount; unsigned long long vtime_snap; enum { - VTIME_SLEEPING = 0, + /* Task is sleeping or running in a CPU with VTIME inactive */ + VTIME_INACTIVE = 0, + /* Task runs in userspace in a CPU with VTIME active */ VTIME_USER, + /* Task runs in kernelspace in a CPU with VTIME active */ VTIME_SYS, } vtime_snap_whence; #endif @@ -2002,7 +2016,8 @@ static inline int pid_alive(const struct task_struct *p) } /** - * is_global_init - check if a task structure is init + * is_global_init - check if a task structure is init. Since init + * is free to have sub-threads we need to check tgid. * @tsk: Task structure to be checked. * * Check if a task structure is the first user space task the kernel created. @@ -2011,7 +2026,7 @@ static inline int pid_alive(const struct task_struct *p) */ static inline int is_global_init(struct task_struct *tsk) { - return tsk->pid == 1; + return task_tgid_nr(tsk) == 1; } extern struct pid *cad_pid; diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h index efa931c5cef1..411b52e424e1 100644 --- a/include/linux/sched_clock.h +++ b/include/linux/sched_clock.h @@ -10,11 +10,17 @@ #ifdef CONFIG_GENERIC_SCHED_CLOCK extern void sched_clock_postinit(void); -#else -static inline void sched_clock_postinit(void) { } -#endif extern void sched_clock_register(u64 (*read)(void), int bits, unsigned long rate); +#else +static inline void sched_clock_postinit(void) { } + +static inline void sched_clock_register(u64 (*read)(void), int bits, + unsigned long rate) +{ + ; +} +#endif #endif diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 297d4fa1cfe5..e03d6ba5e5b4 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -145,11 +145,12 @@ struct uart_port { #define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */ #define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */ -#define UPIO_MEM (SERIAL_IO_MEM) /* 8b MMIO access */ +#define UPIO_MEM (SERIAL_IO_MEM) /* driver-specific */ #define UPIO_MEM32 (SERIAL_IO_MEM32) /* 32b little endian */ #define UPIO_AU (SERIAL_IO_AU) /* Au1x00 and RT288x type IO */ #define UPIO_TSI (SERIAL_IO_TSI) /* Tsi108/109 type IO */ #define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */ +#define UPIO_MEM16 (SERIAL_IO_MEM16) /* 16b little endian */ unsigned int read_status_mask; /* driver specific */ unsigned int ignore_status_mask; /* driver specific */ diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 7c536ac5be05..9f2bfd055742 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -32,6 +32,7 @@ enum { SCIx_SH2_SCIF_FIFODATA_REGTYPE, SCIx_SH3_SCIF_REGTYPE, SCIx_SH4_SCIF_REGTYPE, + SCIx_SH4_SCIF_BRG_REGTYPE, SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, SCIx_SH4_SCIF_FIFODATA_REGTYPE, SCIx_SH7705_SCIF_REGTYPE, diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h index 8c9131db2b25..f2e27e078362 100644 --- a/include/linux/sh_eth.h +++ b/include/linux/sh_eth.h @@ -4,7 +4,7 @@ #include <linux/phy.h> #include <linux/if_ether.h> -enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; +enum {EDMAC_LITTLE_ENDIAN}; struct sh_eth_plat_data { int phy; diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 50777b5b1e4c..a43f41cb3c43 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -60,6 +60,10 @@ extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); extern int shmem_unuse(swp_entry_t entry, struct page *page); +extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); +extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, + pgoff_t start, pgoff_t end); + static inline struct page *shmem_read_mapping_page( struct address_space *mapping, pgoff_t index) { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4355129fff91..07f9ccd28654 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -39,11 +39,55 @@ #include <linux/in6.h> #include <net/flow.h> -/* A. Checksumming of received packets by device. +/* The interface for checksum offload between the stack and networking drivers + * is as follows... + * + * A. IP checksum related features + * + * Drivers advertise checksum offload capabilities in the features of a device. + * From the stack's point of view these are capabilities offered by the driver, + * a driver typically only advertises features that it is capable of offloading + * to its device. + * + * The checksum related features are: + * + * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one + * IP (one's complement) checksum for any combination + * of protocols or protocol layering. The checksum is + * computed and set in a packet per the CHECKSUM_PARTIAL + * interface (see below). + * + * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain + * TCP or UDP packets over IPv4. These are specifically + * unencapsulated packets of the form IPv4|TCP or + * IPv4|UDP where the Protocol field in the IPv4 header + * is TCP or UDP. The IPv4 header may contain IP options + * This feature cannot be set in features for a device + * with NETIF_F_HW_CSUM also set. This feature is being + * DEPRECATED (see below). + * + * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain + * TCP or UDP packets over IPv6. These are specifically + * unencapsulated packets of the form IPv6|TCP or + * IPv4|UDP where the Next Header field in the IPv6 + * header is either TCP or UDP. IPv6 extension headers + * are not supported with this feature. This feature + * cannot be set in features for a device with + * NETIF_F_HW_CSUM also set. This feature is being + * DEPRECATED (see below). + * + * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload. + * This flag is used only used to disable the RX checksum + * feature for a device. The stack will accept receive + * checksum indication in packets received on a device + * regardless of whether NETIF_F_RXCSUM is set. + * + * B. Checksumming of received packets by device. Indication of checksum + * verification is in set skb->ip_summed. Possible values are: * * CHECKSUM_NONE: * - * Device failed to checksum this packet e.g. due to lack of capabilities. + * Device did not checksum this packet e.g. due to lack of capabilities. * The packet contains full (though not verified) checksum in packet but * not in skb->csum. Thus, skb->csum is undefined in this case. * @@ -53,9 +97,8 @@ * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY * if their checksums are okay. skb->csum is still undefined in this case - * though. It is a bad option, but, unfortunately, nowadays most vendors do - * this. Apparently with the secret goal to sell you new devices, when you - * will add new protocol to your host, f.e. IPv6 8) + * though. A driver or device must never modify the checksum field in the + * packet even if checksum is verified. * * CHECKSUM_UNNECESSARY is applicable to following protocols: * TCP: IPv6 and IPv4. @@ -96,40 +139,77 @@ * packet that are after the checksum being offloaded are not considered to * be verified. * - * B. Checksumming on output. - * - * CHECKSUM_NONE: - * - * The skb was already checksummed by the protocol, or a checksum is not - * required. + * C. Checksumming on transmit for non-GSO. The stack requests checksum offload + * in the skb->ip_summed for a packet. Values are: * * CHECKSUM_PARTIAL: * - * The device is required to checksum the packet as seen by hard_start_xmit() + * The driver is required to checksum the packet as seen by hard_start_xmit() * from skb->csum_start up to the end, and to record/write the checksum at - * offset skb->csum_start + skb->csum_offset. + * offset skb->csum_start + skb->csum_offset. A driver may verify that the + * csum_start and csum_offset values are valid values given the length and + * offset of the packet, however they should not attempt to validate that the + * checksum refers to a legitimate transport layer checksum-- it is the + * purview of the stack to validate that csum_start and csum_offset are set + * correctly. + * + * When the stack requests checksum offload for a packet, the driver MUST + * ensure that the checksum is set correctly. A driver can either offload the + * checksum calculation to the device, or call skb_checksum_help (in the case + * that the device does not support offload for a particular checksum). + * + * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of + * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate + * checksum offload capability. If a device has limited checksum capabilities + * (for instance can only perform NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM as + * described above) a helper function can be called to resolve + * CHECKSUM_PARTIAL. The helper functions are skb_csum_off_chk*. The helper + * function takes a spec argument that describes the protocol layer that is + * supported for checksum offload and can be called for each packet. If a + * packet does not match the specification for offload, skb_checksum_help + * is called to resolve the checksum. * - * The device must show its capabilities in dev->features, set up at device - * setup time, e.g. netdev_features.h: + * CHECKSUM_NONE: * - * NETIF_F_HW_CSUM - It's a clever device, it's able to checksum everything. - * NETIF_F_IP_CSUM - Device is dumb, it's able to checksum only TCP/UDP over - * IPv4. Sigh. Vendors like this way for an unknown reason. - * Though, see comment above about CHECKSUM_UNNECESSARY. 8) - * NETIF_F_IPV6_CSUM - About as dumb as the last one but does IPv6 instead. - * NETIF_F_... - Well, you get the picture. + * The skb was already checksummed by the protocol, or a checksum is not + * required. * * CHECKSUM_UNNECESSARY: * - * Normally, the device will do per protocol specific checksumming. Protocol - * implementations that do not want the NIC to perform the checksum - * calculation should use this flag in their outgoing skbs. - * - * NETIF_F_FCOE_CRC - This indicates that the device can do FCoE FC CRC - * offload. Correspondingly, the FCoE protocol driver - * stack should use CHECKSUM_UNNECESSARY. + * This has the same meaning on as CHECKSUM_NONE for checksum offload on + * output. * - * Any questions? No questions, good. --ANK + * CHECKSUM_COMPLETE: + * Not used in checksum output. If a driver observes a packet with this value + * set in skbuff, if should treat as CHECKSUM_NONE being set. + * + * D. Non-IP checksum (CRC) offloads + * + * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of + * offloading the SCTP CRC in a packet. To perform this offload the stack + * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset + * accordingly. Note the there is no indication in the skbuff that the + * CHECKSUM_PARTIAL refers to an SCTP checksum, a driver that supports + * both IP checksum offload and SCTP CRC offload must verify which offload + * is configured for a packet presumably by inspecting packet headers. + * + * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of + * offloading the FCOE CRC in a packet. To perform this offload the stack + * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset + * accordingly. Note the there is no indication in the skbuff that the + * CHECKSUM_PARTIAL refers to an FCOE checksum, a driver that supports + * both IP checksum offload and FCOE CRC offload must verify which offload + * is configured for a packet presumably by inspecting packet headers. + * + * E. Checksumming on output with GSO. + * + * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload + * is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the + * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as + * part of the GSO operation is implied. If a checksum is being offloaded + * with GSO then ip_summed is CHECKSUM_PARTIAL, csum_start and csum_offset + * are set to refer to the outermost checksum being offload (two offloaded + * checksums are possible with UDP encapsulation). */ /* Don't change this without changing skb_csum_unnecessary! */ @@ -833,7 +913,7 @@ struct sk_buff_fclones { * skb_fclone_busy - check if fclone is busy * @skb: buffer * - * Returns true is skb is a fast clone, and its clone is not freed. + * Returns true if skb is a fast clone, and its clone is not freed. * Some drivers call skb_orphan() in their ndo_start_xmit(), * so we also check that this didnt happen. */ @@ -1082,9 +1162,6 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) static inline void skb_sender_cpu_clear(struct sk_buff *skb) { -#ifdef CONFIG_XPS - skb->sender_cpu = 0; -#endif } #ifdef NET_SKBUFF_DATA_USES_OFFSET @@ -1942,6 +2019,11 @@ static inline unsigned char *skb_inner_transport_header(const struct sk_buff return skb->head + skb->inner_transport_header; } +static inline int skb_inner_transport_offset(const struct sk_buff *skb) +{ + return skb_inner_transport_header(skb) - skb->data; +} + static inline void skb_reset_inner_transport_header(struct sk_buff *skb) { skb->inner_transport_header = skb->data - skb->head; @@ -2723,6 +2805,23 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); +static inline void skb_postpush_rcsum(struct sk_buff *skb, + const void *start, unsigned int len) +{ + /* For performing the reverse operation to skb_postpull_rcsum(), + * we can instead of ... + * + * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0)); + * + * ... just use this equivalent version here to save a few + * instructions. Feeding csum of 0 in csum_partial() and later + * on adding skb->csum is equivalent to feed skb->csum in the + * first place. + */ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_partial(start, len, skb->csum); +} + /** * pskb_trim_rcsum - trim received skb and update checksum * @skb: buffer to trim @@ -2788,6 +2887,12 @@ static inline void skb_frag_list_init(struct sk_buff *skb) #define skb_walk_frags(skb, iter) \ for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) + +int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p, + const struct sk_buff *skb); +struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags, + int *peeked, int *off, int *err, + struct sk_buff **last); struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, int *peeked, int *off, int *err); struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, diff --git a/include/linux/slab.h b/include/linux/slab.h index 2037a861e367..3ffee7422012 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -86,6 +86,11 @@ #else # define SLAB_FAILSLAB 0x00000000UL #endif +#ifdef CONFIG_MEMCG_KMEM +# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */ +#else +# define SLAB_ACCOUNT 0x00000000UL +#endif /* The following flags affect the page allocator grouping pages by mobility */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h index dad035c16d94..343c13ac4f71 100644 --- a/include/linux/soc/ti/knav_dma.h +++ b/include/linux/soc/ti/knav_dma.h @@ -144,17 +144,17 @@ struct knav_dma_cfg { * @psdata: Protocol specific */ struct knav_dma_desc { - u32 desc_info; - u32 tag_info; - u32 packet_info; - u32 buff_len; - u32 buff; - u32 next_desc; - u32 orig_len; - u32 orig_buff; - u32 epib[KNAV_DMA_NUM_EPIB_WORDS]; - u32 psdata[KNAV_DMA_NUM_PS_WORDS]; - u32 pad[4]; + __le32 desc_info; + __le32 tag_info; + __le32 packet_info; + __le32 buff_len; + __le32 buff; + __le32 next_desc; + __le32 orig_len; + __le32 orig_buff; + __le32 epib[KNAV_DMA_NUM_EPIB_WORDS]; + __le32 psdata[KNAV_DMA_NUM_PS_WORDS]; + __le32 pad[4]; } ____cacheline_aligned; #if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA) diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h index fddebc617469..4018b48f2b3b 100644 --- a/include/linux/sock_diag.h +++ b/include/linux/sock_diag.h @@ -15,6 +15,7 @@ struct sock_diag_handler { __u8 family; int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); int (*get_info)(struct sk_buff *skb, struct sock *sk); + int (*destroy)(struct sk_buff *skb, struct nlmsghdr *nlh); }; int sock_diag_register(const struct sock_diag_handler *h); @@ -68,4 +69,5 @@ bool sock_diag_has_destroy_listeners(const struct sock *sk) } void sock_diag_broadcast_destroy(struct sock *sk); +int sock_diag_destroy(struct sock *sk, int err); #endif diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index cce80e6dc7d1..53be3a4c60cb 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -425,6 +425,12 @@ struct spi_master { #define SPI_MASTER_MUST_RX BIT(3) /* requires rx */ #define SPI_MASTER_MUST_TX BIT(4) /* requires tx */ + /* + * on some hardware transfer size may be constrained + * the limit may depend on device transfer settings + */ + size_t (*max_transfer_size)(struct spi_device *spi); + /* lock and mutex for SPI bus locking */ spinlock_t bus_lock_spinlock; struct mutex bus_lock_mutex; @@ -762,10 +768,15 @@ struct spi_message { void *state; }; +static inline void spi_message_init_no_memset(struct spi_message *m) +{ + INIT_LIST_HEAD(&m->transfers); +} + static inline void spi_message_init(struct spi_message *m) { memset(m, 0, sizeof *m); - INIT_LIST_HEAD(&m->transfers); + spi_message_init_no_memset(m); } static inline void @@ -832,6 +843,15 @@ extern int spi_async(struct spi_device *spi, struct spi_message *message); extern int spi_async_locked(struct spi_device *spi, struct spi_message *message); +static inline size_t +spi_max_transfer_size(struct spi_device *spi) +{ + struct spi_master *master = spi->master; + if (!master->max_transfer_size) + return SIZE_MAX; + return master->max_transfer_size(spi); +} + /*---------------------------------------------------------------------------*/ /* All these synchronous SPI transfer routines are utilities layered @@ -1115,12 +1135,7 @@ spi_add_device(struct spi_device *spi); extern struct spi_device * spi_new_device(struct spi_master *, struct spi_board_info *); -static inline void -spi_unregister_device(struct spi_device *spi) -{ - if (spi) - device_unregister(&spi->dev); -} +extern void spi_unregister_device(struct spi_device *spi); extern const struct spi_device_id * spi_get_device_id(const struct spi_device *sdev); diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index c3d1a525bacc..26a0b3c3ce5f 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -524,13 +524,9 @@ struct ssb_init_invariants { typedef int (*ssb_invariants_func_t)(struct ssb_bus *bus, struct ssb_init_invariants *iv); -/* Register a SSB system bus. get_invariants() is called after the - * basic system devices are initialized. - * The invariants are usually fetched from some NVRAM. - * Put the invariants into the struct pointed to by iv. */ -extern int ssb_bus_ssbbus_register(struct ssb_bus *bus, - unsigned long baseaddr, - ssb_invariants_func_t get_invariants); +/* Register SoC bus. */ +extern int ssb_bus_host_soc_register(struct ssb_bus *bus, + unsigned long baseaddr); #ifdef CONFIG_SSB_PCIHOST extern int ssb_bus_pcibus_register(struct ssb_bus *bus, struct pci_dev *host_pci); diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 0e1b1540597a..3cc9632dcc2a 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -29,7 +29,7 @@ struct cpu_stop_work { int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg); -void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, +bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf); int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); @@ -65,7 +65,7 @@ static void stop_one_cpu_nowait_workfn(struct work_struct *work) preempt_enable(); } -static inline void stop_one_cpu_nowait(unsigned int cpu, +static inline bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf) { @@ -74,7 +74,10 @@ static inline void stop_one_cpu_nowait(unsigned int cpu, work_buf->fn = fn; work_buf->arg = arg; schedule_work(&work_buf->work); + return true; } + + return false; } static inline int stop_cpus(const struct cpumask *cpumask, diff --git a/include/linux/string.h b/include/linux/string.h index 9ef7795e65e4..9eebc66d957a 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -10,6 +10,7 @@ extern char *strndup_user(const char __user *, long); extern void *memdup_user(const void __user *, size_t); +extern void *memdup_user_nul(const void __user *, size_t); /* * Include machine specific inline routines diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 78512cfe1fe6..b7dabc4baafd 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -128,6 +128,7 @@ struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, const unsigned short port); int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen); void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *xprt); +void svc_age_temp_xprts_now(struct svc_serv *, struct sockaddr *); static inline void svc_xprt_get(struct svc_xprt *xprt) { diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index 8d71d6577459..c00f53a4ccdd 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -23,13 +23,19 @@ struct svc_cred { kgid_t cr_gid; struct group_info *cr_group_info; u32 cr_flavor; /* pseudoflavor */ - char *cr_principal; /* for gss */ + /* name of form servicetype/hostname@REALM, passed down by + * gss-proxy: */ + char *cr_raw_principal; + /* name of form servicetype@hostname, passed down by + * rpc.svcgssd, or computed from the above: */ + char *cr_principal; struct gss_api_mech *cr_gss_mech; }; static inline void init_svc_cred(struct svc_cred *cred) { cred->cr_group_info = NULL; + cred->cr_raw_principal = NULL; cred->cr_principal = NULL; cred->cr_gss_mech = NULL; } @@ -38,6 +44,7 @@ static inline void free_svc_cred(struct svc_cred *cred) { if (cred->cr_group_info) put_group_info(cred->cr_group_info); + kfree(cred->cr_raw_principal); kfree(cred->cr_principal); gss_mech_put(cred->cr_gss_mech); init_svc_cred(cred); diff --git a/include/linux/swap.h b/include/linux/swap.h index 7ba7dccaf0e7..066bd21765ad 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -287,7 +287,6 @@ static inline void workingset_node_shadows_dec(struct radix_tree_node *node) /* linux/mm/page_alloc.c */ extern unsigned long totalram_pages; extern unsigned long totalreserve_pages; -extern unsigned long dirty_balance_reserve; extern unsigned long nr_free_buffer_pages(void); extern unsigned long nr_free_pagecache_pages(void); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index c2b66a277e98..185815c96433 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -886,6 +886,9 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename, const char __user *const __user *envp, int flags); asmlinkage long sys_membarrier(int cmd, int flags); +asmlinkage long sys_copy_file_range(int fd_in, loff_t __user *off_in, + int fd_out, loff_t __user *off_out, + size_t len, unsigned int flags); asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags); diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index ff307b548ed3..b4c2a485b28a 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -56,9 +56,10 @@ extern long do_no_restart_syscall(struct restart_block *parm); #ifdef __KERNEL__ #ifdef CONFIG_DEBUG_STACK_USAGE -# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) +# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ + __GFP_ZERO) #else -# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK) +# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK) #endif /* diff --git a/include/linux/time.h b/include/linux/time.h index beebe3a02d43..297f09f23896 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -125,6 +125,32 @@ static inline bool timeval_valid(const struct timeval *tv) extern struct timespec timespec_trunc(struct timespec t, unsigned gran); +/* + * Validates if a timespec/timeval used to inject a time offset is valid. + * Offsets can be postive or negative. The value of the timeval/timespec + * is the sum of its fields, but *NOTE*: the field tv_usec/tv_nsec must + * always be non-negative. + */ +static inline bool timeval_inject_offset_valid(const struct timeval *tv) +{ + /* We don't check the tv_sec as it can be positive or negative */ + + /* Can't have more microseconds then a second */ + if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC) + return false; + return true; +} + +static inline bool timespec_inject_offset_valid(const struct timespec *ts) +{ + /* We don't check the tv_sec as it can be positive or negative */ + + /* Can't have more nanoseconds then a second */ + if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC) + return false; + return true; +} + #define CURRENT_TIME (current_kernel_time()) #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h new file mode 100644 index 000000000000..e1ee97c713bf --- /dev/null +++ b/include/linux/tracepoint-defs.h @@ -0,0 +1,27 @@ +#ifndef TRACEPOINT_DEFS_H +#define TRACEPOINT_DEFS_H 1 + +/* + * File can be included directly by headers who only want to access + * tracepoint->key to guard out of line trace calls. Otherwise + * linux/tracepoint.h should be used. + */ + +#include <linux/atomic.h> +#include <linux/static_key.h> + +struct tracepoint_func { + void *func; + void *data; + int prio; +}; + +struct tracepoint { + const char *name; /* Tracepoint name */ + struct static_key key; + void (*regfunc)(void); + void (*unregfunc)(void); + struct tracepoint_func __rcu *funcs; +}; + +#endif diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 696a339c592c..acd522a91539 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -17,26 +17,12 @@ #include <linux/errno.h> #include <linux/types.h> #include <linux/rcupdate.h> -#include <linux/static_key.h> +#include <linux/tracepoint-defs.h> struct module; struct tracepoint; struct notifier_block; -struct tracepoint_func { - void *func; - void *data; - int prio; -}; - -struct tracepoint { - const char *name; /* Tracepoint name */ - struct static_key key; - void (*regfunc)(void); - void (*unregfunc)(void); - struct tracepoint_func __rcu *funcs; -}; - struct trace_enum_map { const char *system; const char *enum_string; @@ -171,8 +157,8 @@ extern void syscall_unregfunc(void); TP_PROTO(data_proto), \ TP_ARGS(data_args), \ TP_CONDITION(cond), \ - rcu_irq_enter(), \ - rcu_irq_exit()); \ + rcu_irq_enter_irqson(), \ + rcu_irq_exit_irqson()); \ } #else #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) @@ -493,6 +479,10 @@ extern void syscall_unregfunc(void); #define TRACE_EVENT_FN(name, proto, args, struct, \ assign, print, reg, unreg) \ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) +#define TRACE_EVENT_FN_COND(name, proto, args, cond, struct, \ + assign, print, reg, unreg) \ + DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ + PARAMS(args), PARAMS(cond)) #define TRACE_EVENT_CONDITION(name, proto, args, cond, \ struct, assign, print) \ DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ diff --git a/include/linux/tty.h b/include/linux/tty.h index 5e31f1b99037..2fd8708ea888 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -345,8 +345,6 @@ struct tty_file_private { #define TTY_HUPPED 18 /* Post driver->hangup() */ #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ -#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty)) - /* Values for tty->flow_change */ #define TTY_THROTTLE_SAFE 1 #define TTY_UNTHROTTLE_SAFE 2 @@ -395,8 +393,6 @@ static inline int __init tty_init(void) { return 0; } #endif -extern void tty_write_flush(struct tty_struct *); - extern struct ktermios tty_std_termios; extern int vcs_init(void); @@ -419,9 +415,8 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) return tty; } -extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, - const char *routine); extern const char *tty_name(const struct tty_struct *tty); +extern const char *tty_driver_name(const struct tty_struct *tty); extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); extern int __tty_check_change(struct tty_struct *tty, int sig); extern int tty_check_change(struct tty_struct *tty); @@ -667,10 +662,16 @@ static inline void proc_tty_register_driver(struct tty_driver *d) {} static inline void proc_tty_unregister_driver(struct tty_driver *d) {} #endif -#define tty_debug(tty, f, args...) \ - do { \ - printk(KERN_DEBUG "%s: %s: " f, __func__, \ - tty_name(tty), ##args); \ - } while (0) +#define tty_msg(fn, tty, f, ...) \ + fn("%s %s: " f, tty_driver_name(tty), tty_name(tty), ##__VA_ARGS__) + +#define tty_debug(tty, f, ...) tty_msg(pr_debug, tty, f, ##__VA_ARGS__) +#define tty_info(tty, f, ...) tty_msg(pr_info, tty, f, ##__VA_ARGS__) +#define tty_notice(tty, f, ...) tty_msg(pr_notice, tty, f, ##__VA_ARGS__) +#define tty_warn(tty, f, ...) tty_msg(pr_warn, tty, f, ##__VA_ARGS__) +#define tty_err(tty, f, ...) tty_msg(pr_err, tty, f, ##__VA_ARGS__) + +#define tty_info_ratelimited(tty, f, ...) \ + tty_msg(pr_info_ratelimited, tty, f, ##__VA_ARGS__) #endif diff --git a/include/linux/uinput.h b/include/linux/uinput.h index 0994c0d01a09..75de43da2301 100644 --- a/include/linux/uinput.h +++ b/include/linux/uinput.h @@ -20,6 +20,11 @@ * Author: Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org> * * Changes/Revisions: + * 0.5 08/13/2015 (David Herrmann <dh.herrmann@gmail.com> & + * Benjamin Tissoires <benjamin.tissoires@redhat.com>) + * - add UI_DEV_SETUP ioctl + * - add UI_ABS_SETUP ioctl + * - add UI_GET_VERSION ioctl * 0.4 01/09/2014 (Benjamin Tissoires <benjamin.tissoires@redhat.com>) * - add UI_GET_SYSNAME ioctl * 0.3 24/05/2006 (Anssi Hannula <anssi.hannulagmail.com>) diff --git a/include/linux/uio.h b/include/linux/uio.h index 8b01e1c3c614..fd9bcfedad42 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -82,7 +82,7 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); -size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i); +size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i); size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); size_t iov_iter_zero(size_t bytes, struct iov_iter *); @@ -145,7 +145,7 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) { i->count = count; } -size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); +size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); int import_iovec(int type, const struct iovec __user * uvector, diff --git a/include/linux/usb.h b/include/linux/usb.h index b9a28074210f..89533ba38691 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -510,7 +510,8 @@ struct usb3_lpm_parameters { * @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM * @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled * @usb2_hw_lpm_allowed: Userspace allows USB 2.0 LPM to be enabled - * @usb3_lpm_enabled: USB3 hardware LPM enabled + * @usb3_lpm_u1_enabled: USB3 hardware U1 LPM enabled + * @usb3_lpm_u2_enabled: USB3 hardware U2 LPM enabled * @string_langid: language ID for strings * @product: iProduct string, if present (static) * @manufacturer: iManufacturer string, if present (static) @@ -583,7 +584,8 @@ struct usb_device { unsigned usb2_hw_lpm_besl_capable:1; unsigned usb2_hw_lpm_enabled:1; unsigned usb2_hw_lpm_allowed:1; - unsigned usb3_lpm_enabled:1; + unsigned usb3_lpm_u1_enabled:1; + unsigned usb3_lpm_u2_enabled:1; int string_langid; /* static strings from the device */ diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 1f6526c76ee8..3a375d07d0dc 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -138,6 +138,7 @@ struct cdc_ncm_ctx { }; u8 cdc_ncm_select_altsetting(struct usb_interface *intf); +int cdc_ncm_change_mtu(struct net_device *net, int new_mtu); int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags); void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 3d583a10b926..d82d0068872b 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -402,6 +402,9 @@ static inline void usb_ep_free_request(struct usb_ep *ep, static inline int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { + if (WARN_ON_ONCE(!ep->enabled && ep->address)) + return -ESHUTDOWN; + return ep->ops->queue(ep, req, gfp_flags); } @@ -1012,6 +1015,9 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget) * @reset: Invoked on USB bus reset. It is mandatory for all gadget drivers * and should be called in_interrupt. * @driver: Driver model state for this driver. + * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL, + * this driver will be bound to any available UDC. + * @pending: UDC core private data used for deferred probe of this driver. * * Devices are disabled till a gadget driver successfully bind()s, which * means the driver will handle setup() requests needed to enumerate (and @@ -1072,6 +1078,9 @@ struct usb_gadget_driver { /* FIXME support safe rmmod */ struct device_driver driver; + + char *udc_name; + struct list_head pending; }; @@ -1117,8 +1126,6 @@ extern int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, void (*release)(struct device *dev)); extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); extern void usb_del_gadget_udc(struct usb_gadget *gadget); -extern int usb_udc_attach_driver(const char *name, - struct usb_gadget_driver *driver); /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index f89c24bd53a4..4dcf8446dbcd 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -660,7 +660,7 @@ struct usb_mon_operations { /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */ }; -extern struct usb_mon_operations *mon_ops; +extern const struct usb_mon_operations *mon_ops; static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) { @@ -682,7 +682,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb, (*mon_ops->urb_complete)(bus, urb, status); } -int usb_mon_register(struct usb_mon_operations *ops); +int usb_mon_register(const struct usb_mon_operations *ops); void usb_mon_deregister(void); #else diff --git a/include/linux/usb/musb-omap.h b/include/linux/usb/musb-omap.h deleted file mode 100644 index 7774c5986f07..000000000000 --- a/include/linux/usb/musb-omap.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (C) 2011-2012 by Texas Instruments - * - * The Inventra Controller Driver for Linux is free software; you - * can redistribute it and/or modify it under the terms of the GNU - * General Public License version 2 as published by the Free Software - * Foundation. - */ - -#ifndef __MUSB_OMAP_H__ -#define __MUSB_OMAP_H__ - -enum omap_musb_vbus_id_status { - OMAP_MUSB_UNKNOWN = 0, - OMAP_MUSB_ID_GROUND, - OMAP_MUSB_ID_FLOAT, - OMAP_MUSB_VBUS_VALID, - OMAP_MUSB_VBUS_OFF, -}; - -#if (defined(CONFIG_USB_MUSB_OMAP2PLUS) || \ - defined(CONFIG_USB_MUSB_OMAP2PLUS_MODULE)) -void omap_musb_mailbox(enum omap_musb_vbus_id_status status); -#else -static inline void omap_musb_mailbox(enum omap_musb_vbus_id_status status) -{ -} -#endif - -#endif /* __MUSB_OMAP_H__ */ diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index fa6dc132bd1b..96ddfb7ab018 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h @@ -133,6 +133,21 @@ struct musb_hdrc_platform_data { const void *platform_ops; }; +enum musb_vbus_id_status { + MUSB_UNKNOWN = 0, + MUSB_ID_GROUND, + MUSB_ID_FLOAT, + MUSB_VBUS_VALID, + MUSB_VBUS_OFF, +}; + +#if IS_ENABLED(CONFIG_USB_MUSB_HDRC) +void musb_mailbox(enum musb_vbus_id_status status); +#else +static inline void musb_mailbox(enum musb_vbus_id_status status) +{ +} +#endif /* TUSB 6010 support */ diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h index c3fe9e48ce27..974bce93aa28 100644 --- a/include/linux/usb/of.h +++ b/include/linux/usb/of.h @@ -12,10 +12,16 @@ #include <linux/usb/phy.h> #if IS_ENABLED(CONFIG_OF) +enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np); bool of_usb_host_tpl_support(struct device_node *np); int of_usb_update_otg_caps(struct device_node *np, struct usb_otg_caps *otg_caps); #else +static inline enum usb_dr_mode +of_usb_get_dr_mode_by_phy(struct device_node *phy_np) +{ + return USB_DR_MODE_UNKNOWN; +} static inline bool of_usb_host_tpl_support(struct device_node *np) { return false; diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index bfb74723f151..4db191fe8c2c 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -105,12 +105,26 @@ struct renesas_usbhs_platform_callback { * some register needs USB chip specific parameters. * This struct show it to driver */ + +struct renesas_usbhs_driver_pipe_config { + u8 type; /* USB_ENDPOINT_XFER_xxx */ + u16 bufsize; + u8 bufnum; + bool double_buf; +}; +#define RENESAS_USBHS_PIPE(_type, _size, _num, _double_buf) { \ + .type = (_type), \ + .bufsize = (_size), \ + .bufnum = (_num), \ + .double_buf = (_double_buf), \ + } + struct renesas_usbhs_driver_param { /* * pipe settings */ - u32 *pipe_type; /* array of USB_ENDPOINT_XFER_xxx (from ep0) */ - int pipe_size; /* pipe_type array size */ + struct renesas_usbhs_driver_pipe_config *pipe_configs; + int pipe_size; /* pipe_configs array size */ /* * option: diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 73ea2fb04731..16c0ed6c50a7 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -46,7 +46,7 @@ * All kernel-specific stuff were moved to media/v4l2-dev.h, so * no #if __KERNEL tests are allowed here * - * See http://linuxtv.org for more info + * See https://linuxtv.org for more info * * Author: Bill Dirks <bill@thedirks.org> * Justin Schoeman diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 3bff87a25a42..d1f1d338af20 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -14,7 +14,6 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ #define VM_ALLOC 0x00000002 /* vmalloc() */ #define VM_MAP 0x00000004 /* vmap()ed pages */ #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ -#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 3e4535876d37..3347cc3ec0ab 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -12,6 +12,9 @@ struct vmpressure { unsigned long scanned; unsigned long reclaimed; + + unsigned long tree_scanned; + unsigned long tree_reclaimed; /* The lock is used to keep the scanned/reclaimed above in sync. */ struct spinlock sr_lock; @@ -26,7 +29,7 @@ struct vmpressure { struct mem_cgroup; #ifdef CONFIG_MEMCG -extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, +extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed); extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); @@ -40,7 +43,7 @@ extern int vmpressure_register_event(struct mem_cgroup *memcg, extern void vmpressure_unregister_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd); #else -static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, +static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed) {} static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) {} diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 5dbc8b0ee567..73fae8c4a5fb 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -176,11 +176,11 @@ extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) #ifdef CONFIG_SMP -void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); +void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); void __inc_zone_page_state(struct page *, enum zone_stat_item); void __dec_zone_page_state(struct page *, enum zone_stat_item); -void mod_zone_page_state(struct zone *, enum zone_stat_item, int); +void mod_zone_page_state(struct zone *, enum zone_stat_item, long); void inc_zone_page_state(struct page *, enum zone_stat_item); void dec_zone_page_state(struct page *, enum zone_stat_item); @@ -189,6 +189,7 @@ extern void __inc_zone_state(struct zone *, enum zone_stat_item); extern void dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_zone_state(struct zone *, enum zone_stat_item); +void quiet_vmstat(void); void cpu_vm_stats_fold(int cpu); void refresh_zone_stat_thresholds(void); @@ -205,7 +206,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat, * The functions directly modify the zone and global counters. */ static inline void __mod_zone_page_state(struct zone *zone, - enum zone_stat_item item, int delta) + enum zone_stat_item item, long delta) { zone_page_state_add(delta, zone, item); } @@ -249,6 +250,7 @@ static inline void __dec_zone_page_state(struct page *page, static inline void refresh_zone_stat_thresholds(void) { } static inline void cpu_vm_stats_fold(int cpu) { } +static inline void quiet_vmstat(void) { } static inline void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset) { } diff --git a/include/linux/vtime.h b/include/linux/vtime.h index c5165fd256f9..fa2196990f84 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -10,16 +10,27 @@ struct task_struct; /* - * vtime_accounting_enabled() definitions/declarations + * vtime_accounting_cpu_enabled() definitions/declarations */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -static inline bool vtime_accounting_enabled(void) { return true; } +static inline bool vtime_accounting_cpu_enabled(void) { return true; } #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +/* + * Checks if vtime is enabled on some CPU. Cputime readers want to be careful + * in that case and compute the tickless cputime. + * For now vtime state is tied to context tracking. We might want to decouple + * those later if necessary. + */ static inline bool vtime_accounting_enabled(void) { - if (context_tracking_is_enabled()) { + return context_tracking_is_enabled(); +} + +static inline bool vtime_accounting_cpu_enabled(void) +{ + if (vtime_accounting_enabled()) { if (context_tracking_cpu_is_enabled()) return true; } @@ -29,7 +40,7 @@ static inline bool vtime_accounting_enabled(void) #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ #ifndef CONFIG_VIRT_CPU_ACCOUNTING -static inline bool vtime_accounting_enabled(void) { return false; } +static inline bool vtime_accounting_cpu_enabled(void) { return false; } #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ @@ -44,7 +55,7 @@ extern void vtime_task_switch(struct task_struct *prev); extern void vtime_common_task_switch(struct task_struct *prev); static inline void vtime_task_switch(struct task_struct *prev) { - if (vtime_accounting_enabled()) + if (vtime_accounting_cpu_enabled()) vtime_common_task_switch(prev); } #endif /* __ARCH_HAS_VTIME_TASK_SWITCH */ @@ -59,7 +70,7 @@ extern void vtime_account_irq_enter(struct task_struct *tsk); extern void vtime_common_account_irq_enter(struct task_struct *tsk); static inline void vtime_account_irq_enter(struct task_struct *tsk) { - if (vtime_accounting_enabled()) + if (vtime_accounting_cpu_enabled()) vtime_common_account_irq_enter(tsk); } #endif /* __ARCH_HAS_VTIME_ACCOUNT */ @@ -78,7 +89,7 @@ extern void vtime_gen_account_irq_exit(struct task_struct *tsk); static inline void vtime_account_irq_exit(struct task_struct *tsk) { - if (vtime_accounting_enabled()) + if (vtime_accounting_cpu_enabled()) vtime_gen_account_irq_exit(tsk); } diff --git a/include/linux/wait.h b/include/linux/wait.h index 513b36f04dfd..ae71a769b89e 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -102,11 +102,62 @@ init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func) q->func = func; } +/** + * waitqueue_active -- locklessly test for waiters on the queue + * @q: the waitqueue to test for waiters + * + * returns true if the wait list is not empty + * + * NOTE: this function is lockless and requires care, incorrect usage _will_ + * lead to sporadic and non-obvious failure. + * + * Use either while holding wait_queue_head_t::lock or when used for wakeups + * with an extra smp_mb() like: + * + * CPU0 - waker CPU1 - waiter + * + * for (;;) { + * @cond = true; prepare_to_wait(&wq, &wait, state); + * smp_mb(); // smp_mb() from set_current_state() + * if (waitqueue_active(wq)) if (@cond) + * wake_up(wq); break; + * schedule(); + * } + * finish_wait(&wq, &wait); + * + * Because without the explicit smp_mb() it's possible for the + * waitqueue_active() load to get hoisted over the @cond store such that we'll + * observe an empty wait list while the waiter might not observe @cond. + * + * Also note that this 'optimization' trades a spin_lock() for an smp_mb(), + * which (when the lock is uncontended) are of roughly equal cost. + */ static inline int waitqueue_active(wait_queue_head_t *q) { return !list_empty(&q->task_list); } +/** + * wq_has_sleeper - check if there are any waiting processes + * @wq: wait queue head + * + * Returns true if wq has waiting processes + * + * Please refer to the comment for waitqueue_active. + */ +static inline bool wq_has_sleeper(wait_queue_head_t *wq) +{ + /* + * We need to be sure we are in sync with the + * add_wait_queue modifications to the wait queue. + * + * This memory barrier should be paired with one on the + * waiting side. + */ + smp_mb(); + return waitqueue_active(wq); +} + extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0197358f1e81..0e32bc71245e 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -618,4 +618,10 @@ static inline int workqueue_sysfs_register(struct workqueue_struct *wq) { return 0; } #endif /* CONFIG_SYSFS */ +#ifdef CONFIG_WQ_WATCHDOG +void wq_watchdog_touch(int cpu); +#else /* CONFIG_WQ_WATCHDOG */ +static inline void wq_watchdog_touch(int cpu) { } +#endif /* CONFIG_WQ_WATCHDOG */ + #endif diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 89474b9d260c..4457541de3c9 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -19,12 +19,16 @@ struct inode; struct dentry; +/* + * struct xattr_handler: When @name is set, match attributes with exactly that + * name. When @prefix is set instead, match attributes with that prefix and + * with a non-empty suffix. + */ struct xattr_handler { + const char *name; const char *prefix; int flags; /* fs private flags */ - size_t (*list)(const struct xattr_handler *, struct dentry *dentry, - char *list, size_t list_size, const char *name, - size_t name_len); + bool (*list)(struct dentry *dentry); int (*get)(const struct xattr_handler *, struct dentry *dentry, const char *name, void *buffer, size_t size); int (*set)(const struct xattr_handler *, struct dentry *dentry, @@ -53,8 +57,11 @@ int generic_setxattr(struct dentry *dentry, const char *name, const void *value, int generic_removexattr(struct dentry *dentry, const char *name); ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value, size_t size, gfp_t flags); -int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name, - const char *value, size_t size, gfp_t flags); + +static inline const char *xattr_prefix(const struct xattr_handler *handler) +{ + return handler->prefix ?: handler->name; +} struct simple_xattrs { struct list_head head; @@ -95,8 +102,7 @@ int simple_xattr_get(struct simple_xattrs *xattrs, const char *name, void *buffer, size_t size); int simple_xattr_set(struct simple_xattrs *xattrs, const char *name, const void *value, size_t size, int flags); -int simple_xattr_remove(struct simple_xattrs *xattrs, const char *name); -ssize_t simple_xattr_list(struct simple_xattrs *xattrs, char *buffer, +ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs, char *buffer, size_t size); void simple_xattr_list_add(struct simple_xattrs *xattrs, struct simple_xattr *new_xattr); |