diff options
Diffstat (limited to 'include/linux')
137 files changed, 2685 insertions, 1135 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index d6f95bb481d4..ebfac2fe0c81 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -514,6 +514,11 @@ static inline bool has_acpi_companion(struct device *dev) return false; } +static inline void acpi_preset_companion(struct device *dev, + struct acpi_device *parent, u64 addr) +{ +} + static inline const char *acpi_dev_name(struct acpi_device *adev) { return NULL; diff --git a/include/linux/aer.h b/include/linux/aer.h index 4fef65e57023..744b997d6a94 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -42,6 +42,7 @@ struct aer_capability_regs { int pci_enable_pcie_error_reporting(struct pci_dev *dev); int pci_disable_pcie_error_reporting(struct pci_dev *dev); int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); +int pci_cleanup_aer_error_status_regs(struct pci_dev *dev); #else static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) { @@ -55,6 +56,10 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) { return -EINVAL; } +static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) +{ + return -EINVAL; +} #endif void cper_print_aer(struct pci_dev *dev, int cper_severity, diff --git a/include/linux/atmel_tc.h b/include/linux/atmel_tc.h index b87c1c7c242a..468fdfa643f0 100644 --- a/include/linux/atmel_tc.h +++ b/include/linux/atmel_tc.h @@ -67,6 +67,7 @@ struct atmel_tc { const struct atmel_tcb_config *tcb_config; int irq[3]; struct clk *clk[3]; + struct clk *slow_clk; struct list_head node; bool allocated; }; diff --git a/include/linux/audit.h b/include/linux/audit.h index b2abc996c25d..20eba1eb0a3c 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -143,7 +143,7 @@ extern void __audit_inode_child(const struct inode *parent, extern void __audit_seccomp(unsigned long syscall, long signr, int code); extern void __audit_ptrace(struct task_struct *t); -static inline int audit_dummy_context(void) +static inline bool audit_dummy_context(void) { void *p = current->audit_context; return !p || *(int *)p; @@ -345,9 +345,9 @@ static inline void audit_syscall_entry(int major, unsigned long a0, { } static inline void audit_syscall_exit(void *pt_regs) { } -static inline int audit_dummy_context(void) +static inline bool audit_dummy_context(void) { - return 1; + return true; } static inline struct filename *audit_reusename(const __user char *name) { @@ -457,7 +457,7 @@ extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp extern __printf(2, 3) void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); extern void audit_log_end(struct audit_buffer *ab); -extern int audit_string_contains_control(const char *string, +extern bool audit_string_contains_control(const char *string, size_t len); extern void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf, diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index c85f74946a8b..c82794f20110 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -13,7 +13,6 @@ #include <linux/sched.h> #include <linux/blkdev.h> #include <linux/writeback.h> -#include <linux/memcontrol.h> #include <linux/blk-cgroup.h> #include <linux/backing-dev-defs.h> #include <linux/slab.h> @@ -267,8 +266,8 @@ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); - return cgroup_on_dfl(mem_cgroup_root_css->cgroup) && - cgroup_on_dfl(blkcg_root_css->cgroup) && + return cgroup_subsys_on_dfl(memory_cgrp_subsys) && + cgroup_subsys_on_dfl(io_cgrp_subsys) && bdi_cap_account_dirty(bdi) && (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); diff --git a/include/linux/bitops.h b/include/linux/bitops.h index e63553386ae7..2b8ed123ad36 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -164,6 +164,8 @@ static inline __u8 ror8(__u8 word, unsigned int shift) * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit * @value: value to sign extend * @index: 0 based bit index (0<=index<32) to sign bit + * + * This is safe to use for 16- and 8-bit types as well. */ static inline __s32 sign_extend32(__u32 value, int index) { @@ -171,6 +173,17 @@ static inline __s32 sign_extend32(__u32 value, int index) return (__s32)(value << shift) >> shift; } +/** + * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit + * @value: value to sign extend + * @index: 0 based bit index (0<=index<64) to sign bit + */ +static inline __s64 sign_extend64(__u64 value, int index) +{ + __u8 shift = 63 - index; + return (__s64)(value << shift) >> shift; +} + static inline unsigned fls_long(unsigned long l) { if (sizeof(l) == 4) diff --git a/include/linux/blkpg.h b/include/linux/blkpg.h new file mode 100644 index 000000000000..bef124fde61e --- /dev/null +++ b/include/linux/blkpg.h @@ -0,0 +1,21 @@ +#ifndef _LINUX_BLKPG_H +#define _LINUX_BLKPG_H + +/* + * Partition table and disk geometry handling + */ + +#include <linux/compat.h> +#include <uapi/linux/blkpg.h> + +#ifdef CONFIG_COMPAT +/* For 32-bit/64-bit compatibility of struct blkpg_ioctl_arg */ +struct blkpg_compat_ioctl_arg { + compat_int_t op; + compat_int_t flags; + compat_int_t datalen; + compat_uptr_t data; +}; +#endif + +#endif /* _LINUX_BLKPG_H */ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 8492721b39be..60d44b26276d 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -76,6 +76,7 @@ enum { CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ + CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ /* internal flags, do not use outside cgroup core proper */ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ @@ -83,6 +84,17 @@ enum { }; /* + * cgroup_file is the handle for a file instance created in a cgroup which + * is used, for example, to generate file changed notifications. This can + * be obtained by setting cftype->file_offset. + */ +struct cgroup_file { + /* do not access any fields from outside cgroup core */ + struct list_head node; /* anchored at css->files */ + struct kernfs_node *kn; +}; + +/* * Per-subsystem/per-cgroup state maintained by the system. This is the * fundamental structural building block that controllers deal with. * @@ -122,6 +134,9 @@ struct cgroup_subsys_state { */ u64 serial_nr; + /* all cgroup_files associated with this css */ + struct list_head files; + /* percpu_ref killing and RCU release */ struct rcu_head rcu_head; struct work_struct destroy_work; @@ -196,6 +211,9 @@ struct css_set { */ struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; + /* all css_task_iters currently walking this cset */ + struct list_head task_iters; + /* For RCU-protected deletion */ struct rcu_head rcu_head; }; @@ -217,16 +235,16 @@ struct cgroup { int id; /* - * If this cgroup contains any tasks, it contributes one to - * populated_cnt. All children with non-zero popuplated_cnt of - * their own contribute one. The count is zero iff there's no task - * in this cgroup or its subtree. + * Each non-empty css_set associated with this cgroup contributes + * one to populated_cnt. All children with non-zero popuplated_cnt + * of their own contribute one. The count is zero iff there's no + * task in this cgroup or its subtree. */ int populated_cnt; struct kernfs_node *kn; /* cgroup kernfs entry */ - struct kernfs_node *procs_kn; /* kn for "cgroup.procs" */ - struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */ + struct cgroup_file procs_file; /* handle for "cgroup.procs" */ + struct cgroup_file events_file; /* handle for "cgroup.events" */ /* * The bitmask of subsystems enabled on the child cgroups. @@ -324,11 +342,6 @@ struct cftype { */ char name[MAX_CFTYPE_NAME]; unsigned long private; - /* - * If not 0, file mode is set to this value, otherwise it will - * be figured out automatically - */ - umode_t mode; /* * The maximum length of string, excluding trailing nul, that can @@ -340,6 +353,14 @@ struct cftype { unsigned int flags; /* + * If non-zero, should contain the offset from the start of css to + * a struct cgroup_file field. cgroup will record the handle of + * the created file into it. The recorded handle can be used as + * long as the containing css remains accessible. + */ + unsigned int file_offset; + + /* * Fields used for internal bookkeeping. Initialized automatically * during registration. */ @@ -414,12 +435,10 @@ struct cgroup_subsys { int (*can_fork)(struct task_struct *task, void **priv_p); void (*cancel_fork)(struct task_struct *task, void *priv); void (*fork)(struct task_struct *task, void *priv); - void (*exit)(struct cgroup_subsys_state *css, - struct cgroup_subsys_state *old_css, - struct task_struct *task); + void (*exit)(struct task_struct *task); + void (*free)(struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css); - int disabled; int early_init; /* @@ -473,8 +492,31 @@ struct cgroup_subsys { unsigned int depends_on; }; -void cgroup_threadgroup_change_begin(struct task_struct *tsk); -void cgroup_threadgroup_change_end(struct task_struct *tsk); +extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; + +/** + * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups + * @tsk: target task + * + * Called from threadgroup_change_begin() and allows cgroup operations to + * synchronize against threadgroup changes using a percpu_rw_semaphore. + */ +static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) +{ + percpu_down_read(&cgroup_threadgroup_rwsem); +} + +/** + * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups + * @tsk: target task + * + * Called from threadgroup_change_end(). Counterpart of + * cgroup_threadcgroup_change_begin(). + */ +static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) +{ + percpu_up_read(&cgroup_threadgroup_rwsem); +} #else /* CONFIG_CGROUPS */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index eb7ca55f72ef..22e3754f89c5 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -13,10 +13,10 @@ #include <linux/nodemask.h> #include <linux/rculist.h> #include <linux/cgroupstats.h> -#include <linux/rwsem.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/kernfs.h> +#include <linux/jump_label.h> #include <linux/cgroup-defs.h> @@ -41,6 +41,10 @@ struct css_task_iter { struct list_head *task_pos; struct list_head *tasks_head; struct list_head *mg_tasks_head; + + struct css_set *cur_cset; + struct task_struct *cur_task; + struct list_head iters_node; /* css_set->task_iters */ }; extern struct cgroup_root cgrp_dfl_root; @@ -50,6 +54,26 @@ extern struct css_set init_css_set; #include <linux/cgroup_subsys.h> #undef SUBSYS +#define SUBSYS(_x) \ + extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \ + extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key; +#include <linux/cgroup_subsys.h> +#undef SUBSYS + +/** + * cgroup_subsys_enabled - fast test on whether a subsys is enabled + * @ss: subsystem in question + */ +#define cgroup_subsys_enabled(ss) \ + static_branch_likely(&ss ## _enabled_key) + +/** + * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy + * @ss: subsystem in question + */ +#define cgroup_subsys_on_dfl(ss) \ + static_branch_likely(&ss ## _on_dfl_key) + bool css_has_online_children(struct cgroup_subsys_state *css); struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, @@ -78,6 +102,7 @@ extern void cgroup_cancel_fork(struct task_struct *p, extern void cgroup_post_fork(struct task_struct *p, void *old_ss_priv[CGROUP_CANFORK_COUNT]); void cgroup_exit(struct task_struct *p); +void cgroup_free(struct task_struct *p); int cgroup_init_early(void); int cgroup_init(void); @@ -211,11 +236,33 @@ void css_task_iter_end(struct css_task_iter *it); * cgroup_taskset_for_each - iterate cgroup_taskset * @task: the loop cursor * @tset: taskset to iterate + * + * @tset may contain multiple tasks and they may belong to multiple + * processes. When there are multiple tasks in @tset, if a task of a + * process is in @tset, all tasks of the process are in @tset. Also, all + * are guaranteed to share the same source and destination csses. + * + * Iteration is not in any specific order. */ #define cgroup_taskset_for_each(task, tset) \ for ((task) = cgroup_taskset_first((tset)); (task); \ (task) = cgroup_taskset_next((tset))) +/** + * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset + * @leader: the loop cursor + * @tset: takset to iterate + * + * Iterate threadgroup leaders of @tset. For single-task migrations, @tset + * may not contain any. + */ +#define cgroup_taskset_for_each_leader(leader, tset) \ + for ((leader) = cgroup_taskset_first((tset)); (leader); \ + (leader) = cgroup_taskset_next((tset))) \ + if ((leader) != (leader)->group_leader) \ + ; \ + else + /* * Inline functions. */ @@ -320,11 +367,11 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) */ #ifdef CONFIG_PROVE_RCU extern struct mutex cgroup_mutex; -extern struct rw_semaphore css_set_rwsem; +extern spinlock_t css_set_lock; #define task_css_set_check(task, __c) \ rcu_dereference_check((task)->cgroups, \ lockdep_is_held(&cgroup_mutex) || \ - lockdep_is_held(&css_set_rwsem) || \ + lockdep_is_held(&css_set_lock) || \ ((task)->flags & PF_EXITING) || (__c)) #else #define task_css_set_check(task, __c) \ @@ -412,68 +459,10 @@ static inline struct cgroup *task_cgroup(struct task_struct *task, return task_css(task, subsys_id)->cgroup; } -/** - * cgroup_on_dfl - test whether a cgroup is on the default hierarchy - * @cgrp: the cgroup of interest - * - * The default hierarchy is the v2 interface of cgroup and this function - * can be used to test whether a cgroup is on the default hierarchy for - * cases where a subsystem should behave differnetly depending on the - * interface version. - * - * The set of behaviors which change on the default hierarchy are still - * being determined and the mount option is prefixed with __DEVEL__. - * - * List of changed behaviors: - * - * - Mount options "noprefix", "xattr", "clone_children", "release_agent" - * and "name" are disallowed. - * - * - When mounting an existing superblock, mount options should match. - * - * - Remount is disallowed. - * - * - rename(2) is disallowed. - * - * - "tasks" is removed. Everything should be at process granularity. Use - * "cgroup.procs" instead. - * - * - "cgroup.procs" is not sorted. pids will be unique unless they got - * recycled inbetween reads. - * - * - "release_agent" and "notify_on_release" are removed. Replacement - * notification mechanism will be implemented. - * - * - "cgroup.clone_children" is removed. - * - * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup - * and its descendants contain no task; otherwise, 1. The file also - * generates kernfs notification which can be monitored through poll and - * [di]notify when the value of the file changes. - * - * - cpuset: tasks will be kept in empty cpusets when hotplug happens and - * take masks of ancestors with non-empty cpus/mems, instead of being - * moved to an ancestor. - * - * - cpuset: a task can be moved into an empty cpuset, and again it takes - * masks of ancestors. - * - * - memcg: use_hierarchy is on by default and the cgroup file for the flag - * is not created. - * - * - blkcg: blk-throttle becomes properly hierarchical. - * - * - debug: disallowed on the default hierarchy. - */ -static inline bool cgroup_on_dfl(const struct cgroup *cgrp) -{ - return cgrp->root == &cgrp_dfl_root; -} - /* no synchronization, the result can only be used as a hint */ -static inline bool cgroup_has_tasks(struct cgroup *cgrp) +static inline bool cgroup_is_populated(struct cgroup *cgrp) { - return !list_empty(&cgrp->cset_links); + return cgrp->populated_cnt; } /* returns ino associated with a cgroup */ @@ -527,6 +516,19 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) pr_cont_kernfs_path(cgrp->kn); } +/** + * cgroup_file_notify - generate a file modified event for a cgroup_file + * @cfile: target cgroup_file + * + * @cfile must have been obtained by setting cftype->file_offset. + */ +static inline void cgroup_file_notify(struct cgroup_file *cfile) +{ + /* might not have been created due to one of the CFTYPE selector flags */ + if (cfile->kn) + kernfs_notify(cfile->kn); +} + #else /* !CONFIG_CGROUPS */ struct cgroup_subsys_state; @@ -546,6 +548,7 @@ static inline void cgroup_cancel_fork(struct task_struct *p, static inline void cgroup_post_fork(struct task_struct *p, void *ss_priv[CGROUP_CANFORK_COUNT]) {} static inline void cgroup_exit(struct task_struct *p) {} +static inline void cgroup_free(struct task_struct *p) {} static inline int cgroup_init_early(void) { return 0; } static inline int cgroup_init(void) { return 0; } diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 3ecc07d0da77..c56988ac63f7 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -500,13 +500,14 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name, * * Clock with adjustable fractional divider affecting its output frequency. */ - struct clk_fractional_divider { struct clk_hw hw; void __iomem *reg; u8 mshift; + u8 mwidth; u32 mmask; u8 nshift; + u8 nwidth; u32 nmask; u8 flags; spinlock_t *lock; @@ -518,6 +519,41 @@ struct clk *clk_register_fractional_divider(struct device *dev, void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, u8 clk_divider_flags, spinlock_t *lock); +/** + * struct clk_multiplier - adjustable multiplier clock + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register containing the multiplier + * @shift: shift to the multiplier bit field + * @width: width of the multiplier bit field + * @lock: register lock + * + * Clock with an adjustable multiplier affecting its output frequency. + * Implements .recalc_rate, .set_rate and .round_rate + * + * Flags: + * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read + * from the register, with 0 being a valid value effectively + * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is + * set, then a null multiplier will be considered as a bypass, + * leaving the parent rate unmodified. + * CLK_MULTIPLIER_ROUND_CLOSEST - Makes the best calculated divider to be + * rounded to the closest integer instead of the down one. + */ +struct clk_multiplier { + struct clk_hw hw; + void __iomem *reg; + u8 shift; + u8 width; + u8 flags; + spinlock_t *lock; +}; + +#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) +#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1) + +extern const struct clk_ops clk_multiplier_ops; + /*** * struct clk_composite - aggregate clock of mux, divider and gate clocks * @@ -606,7 +642,7 @@ void clk_unregister(struct clk *clk); void devm_clk_unregister(struct device *dev, struct clk *clk); /* helper functions */ -const char *__clk_get_name(struct clk *clk); +const char *__clk_get_name(const struct clk *clk); const char *clk_hw_get_name(const struct clk_hw *hw); struct clk_hw *__clk_get_hw(struct clk *clk); unsigned int clk_hw_get_num_parents(const struct clk_hw *hw); @@ -618,6 +654,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw); unsigned long __clk_get_flags(struct clk *clk); unsigned long clk_hw_get_flags(const struct clk_hw *hw); bool clk_hw_is_prepared(const struct clk_hw *hw); +bool clk_hw_is_enabled(const struct clk_hw *hw); bool __clk_is_enabled(struct clk *clk); struct clk *__clk_lookup(const char *name); int __clk_mux_determine_rate(struct clk_hw *hw, @@ -690,6 +727,15 @@ static inline struct clk *of_clk_src_onecell_get( { return ERR_PTR(-ENOENT); } +static inline int of_clk_get_parent_count(struct device_node *np) +{ + return 0; +} +static inline int of_clk_parent_fill(struct device_node *np, + const char **parents, unsigned int size) +{ + return 0; +} static inline const char *of_clk_get_parent_name(struct device_node *np, int index) { diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index 7669f7618f39..1e6932222e11 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -164,6 +164,7 @@ extern void __iomem *at91_pmc_base; #define AT91_PMC_MOSCSELS (1 << 16) /* Main Oscillator Selection [some SAM9] */ #define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */ #define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */ +#define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */ #define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */ #define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */ @@ -182,13 +183,18 @@ extern void __iomem *at91_pmc_base; #define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */ #define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */ -#define AT91_PMC_PCR_PID (0x3f << 0) /* Peripheral ID */ -#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */ -#define AT91_PMC_PCR_DIV(n) ((n) << 16) /* Divisor Value */ -#define AT91_PMC_PCR_DIV0 0x0 /* Peripheral clock is MCK */ -#define AT91_PMC_PCR_DIV2 0x1 /* Peripheral clock is MCK/2 */ -#define AT91_PMC_PCR_DIV4 0x2 /* Peripheral clock is MCK/4 */ -#define AT91_PMC_PCR_DIV8 0x3 /* Peripheral clock is MCK/8 */ -#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */ +#define AT91_PMC_PCR_PID_MASK 0x3f +#define AT91_PMC_PCR_GCKCSS_OFFSET 8 +#define AT91_PMC_PCR_GCKCSS_MASK (0x7 << AT91_PMC_PCR_GCKCSS_OFFSET) +#define AT91_PMC_PCR_GCKCSS(n) ((n) << AT91_PMC_PCR_GCKCSS_OFFSET) /* GCK Clock Source Selection */ +#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */ +#define AT91_PMC_PCR_DIV_OFFSET 16 +#define AT91_PMC_PCR_DIV_MASK (0x3 << AT91_PMC_PCR_DIV_OFFSET) +#define AT91_PMC_PCR_DIV(n) ((n) << AT91_PMC_PCR_DIV_OFFSET) /* Divisor Value */ +#define AT91_PMC_PCR_GCKDIV_OFFSET 20 +#define AT91_PMC_PCR_GCKDIV_MASK (0xff << AT91_PMC_PCR_GCKDIV_OFFSET) +#define AT91_PMC_PCR_GCKDIV(n) ((n) << AT91_PMC_PCR_GCKDIV_OFFSET) /* Generated Clock Divisor Value */ +#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */ +#define AT91_PMC_PCR_GCKEN (0x1 << 29) /* GCK Enable */ #endif diff --git a/include/linux/compaction.h b/include/linux/compaction.h index aa8f61cf3a19..4cd4ddf64cc7 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -15,7 +15,8 @@ /* For more detailed tracepoint output */ #define COMPACT_NO_SUITABLE_PAGE 5 #define COMPACT_NOT_SUITABLE_ZONE 6 -/* When adding new state, please change compaction_status_string, too */ +#define COMPACT_CONTENDED 7 +/* When adding new states, please adjust include/trace/events/compaction.h */ /* Used to signal whether compaction detected need_sched() or lock contention */ /* No contention detected */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 8efb40e61d6e..22ab246feed3 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -205,11 +205,31 @@ #if GCC_VERSION >= 40600 /* - * Tell the optimizer that something else uses this function or variable. + * When used with Link Time Optimization, gcc can optimize away C functions or + * variables which are referenced only from assembly code. __visible tells the + * optimizer that something else uses this function or variable, thus preventing + * this. */ #define __visible __attribute__((externally_visible)) #endif + +#if GCC_VERSION >= 40900 && !defined(__CHECKER__) +/* + * __assume_aligned(n, k): Tell the optimizer that the returned + * pointer can be assumed to be k modulo n. The second argument is + * optional (default 0), so we use a variadic macro to make the + * shorthand. + * + * Beware: Do not apply this to functions which may return + * ERR_PTRs. Also, it is probably unwise to apply it to functions + * returning extra information in the low bits (but in that case the + * compiler should see some alignment anyway, when the return value is + * massaged by 'flags = ptr & 3; ptr &= ~3;'). + */ +#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) +#endif + /* * GCC 'asm goto' miscompiles certain code sequences: * diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 52a459ff75f4..4dac1036594f 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -417,6 +417,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s #define __visible #endif +/* + * Assume alignment of return value. + */ +#ifndef __assume_aligned +#define __assume_aligned(a, ...) +#endif + + /* Are two types/vars the same type (ignoring qualifiers)? */ #ifndef __same_type # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) diff --git a/include/linux/count_zeros.h b/include/linux/count_zeros.h new file mode 100644 index 000000000000..363da78c4f64 --- /dev/null +++ b/include/linux/count_zeros.h @@ -0,0 +1,57 @@ +/* Count leading and trailing zeros functions + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_BITOPS_COUNT_ZEROS_H_ +#define _LINUX_BITOPS_COUNT_ZEROS_H_ + +#include <asm/bitops.h> + +/** + * count_leading_zeros - Count the number of zeros from the MSB back + * @x: The value + * + * Count the number of leading zeros from the MSB going towards the LSB in @x. + * + * If the MSB of @x is set, the result is 0. + * If only the LSB of @x is set, then the result is BITS_PER_LONG-1. + * If @x is 0 then the result is COUNT_LEADING_ZEROS_0. + */ +static inline int count_leading_zeros(unsigned long x) +{ + if (sizeof(x) == 4) + return BITS_PER_LONG - fls(x); + else + return BITS_PER_LONG - fls64(x); +} + +#define COUNT_LEADING_ZEROS_0 BITS_PER_LONG + +/** + * count_trailing_zeros - Count the number of zeros from the LSB forwards + * @x: The value + * + * Count the number of trailing zeros from the LSB going towards the MSB in @x. + * + * If the LSB of @x is set, the result is 0. + * If only the MSB of @x is set, then the result is BITS_PER_LONG-1. + * If @x is 0 then the result is COUNT_TRAILING_ZEROS_0. + */ +static inline int count_trailing_zeros(unsigned long x) +{ +#define COUNT_TRAILING_ZEROS_0 (-1) + + if (sizeof(x) == 4) + return ffs(x); + else + return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0; +} + +#endif /* _LINUX_BITOPS_COUNT_ZEROS_H_ */ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 1b357997cac5..85a868ccb493 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -93,7 +93,7 @@ extern int current_cpuset_is_being_rebound(void); extern void rebuild_sched_domains(void); -extern void cpuset_print_task_mems_allowed(struct task_struct *p); +extern void cpuset_print_current_mems_allowed(void); /* * read_mems_allowed_begin is required when making decisions involving @@ -104,6 +104,9 @@ extern void cpuset_print_task_mems_allowed(struct task_struct *p); */ static inline unsigned int read_mems_allowed_begin(void) { + if (!cpusets_enabled()) + return 0; + return read_seqcount_begin(¤t->mems_allowed_seq); } @@ -115,6 +118,9 @@ static inline unsigned int read_mems_allowed_begin(void) */ static inline bool read_mems_allowed_retry(unsigned int seq) { + if (!cpusets_enabled()) + return false; + return read_seqcount_retry(¤t->mems_allowed_seq, seq); } @@ -219,7 +225,7 @@ static inline void rebuild_sched_domains(void) partition_sched_domains(1, NULL, NULL); } -static inline void cpuset_print_task_mems_allowed(struct task_struct *p) +static inline void cpuset_print_current_mems_allowed(void) { } diff --git a/include/linux/device.h b/include/linux/device.h index 5d7bc6349930..b8f411b57dcb 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -604,13 +604,21 @@ typedef void (*dr_release_t)(struct device *dev, void *res); typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); #ifdef CONFIG_DEBUG_DEVRES -extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp, - const char *name); +extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, + int nid, const char *name); #define devres_alloc(release, size, gfp) \ - __devres_alloc(release, size, gfp, #release) + __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) +#define devres_alloc_node(release, size, gfp, nid) \ + __devres_alloc_node(release, size, gfp, nid, #release) #else -extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp); +extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, + int nid); +static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) +{ + return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); +} #endif + extern void devres_for_each_res(struct device *dev, dr_release_t release, dr_match_t match, void *match_data, void (*fn)(struct device *, void *, void *), diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h new file mode 100644 index 000000000000..fc481037478a --- /dev/null +++ b/include/linux/dma-iommu.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2014-2015 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __DMA_IOMMU_H +#define __DMA_IOMMU_H + +#ifdef __KERNEL__ +#include <asm/errno.h> + +#ifdef CONFIG_IOMMU_DMA +#include <linux/iommu.h> + +int iommu_dma_init(void); + +/* Domain management interface for IOMMU drivers */ +int iommu_get_dma_cookie(struct iommu_domain *domain); +void iommu_put_dma_cookie(struct iommu_domain *domain); + +/* Setup call for arch DMA mapping code */ +int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size); + +/* General helpers for DMA-API <-> IOMMU-API interaction */ +int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); + +/* + * These implement the bulk of the relevant DMA mapping callbacks, but require + * the arch code to take care of attributes and cache maintenance + */ +struct page **iommu_dma_alloc(struct device *dev, size_t size, + gfp_t gfp, int prot, dma_addr_t *handle, + void (*flush_page)(struct device *, const void *, phys_addr_t)); +void iommu_dma_free(struct device *dev, struct page **pages, size_t size, + dma_addr_t *handle); + +int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma); + +dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, int prot); +int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int prot); + +/* + * Arch code with no special attribute handling may use these + * directly as DMA mapping callbacks for simplicity + */ +void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, + enum dma_data_direction dir, struct dma_attrs *attrs); +void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs); +int iommu_dma_supported(struct device *dev, u64 mask); +int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); + +#else + +struct iommu_domain; + +static inline int iommu_dma_init(void) +{ + return 0; +} + +static inline int iommu_get_dma_cookie(struct iommu_domain *domain) +{ + return -ENODEV; +} + +static inline void iommu_put_dma_cookie(struct iommu_domain *domain) +{ +} + +#endif /* CONFIG_IOMMU_DMA */ +#endif /* __KERNEL__ */ +#endif /* __DMA_IOMMU_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index ac07ff090919..2e551e2d2d03 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -1,6 +1,7 @@ #ifndef _LINUX_DMA_MAPPING_H #define _LINUX_DMA_MAPPING_H +#include <linux/sizes.h> #include <linux/string.h> #include <linux/device.h> #include <linux/err.h> @@ -145,7 +146,9 @@ static inline void arch_teardown_dma_ops(struct device *dev) { } static inline unsigned int dma_get_max_seg_size(struct device *dev) { - return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536; + if (dev->dma_parms && dev->dma_parms->max_segment_size) + return dev->dma_parms->max_segment_size; + return SZ_64K; } static inline unsigned int dma_set_max_seg_size(struct device *dev, @@ -154,14 +157,15 @@ static inline unsigned int dma_set_max_seg_size(struct device *dev, if (dev->dma_parms) { dev->dma_parms->max_segment_size = size; return 0; - } else - return -EIO; + } + return -EIO; } static inline unsigned long dma_get_seg_boundary(struct device *dev) { - return dev->dma_parms ? - dev->dma_parms->segment_boundary_mask : 0xffffffff; + if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) + return dev->dma_parms->segment_boundary_mask; + return DMA_BIT_MASK(32); } static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) @@ -169,8 +173,8 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) if (dev->dma_parms) { dev->dma_parms->segment_boundary_mask = mask; return 0; - } else - return -EIO; + } + return -EIO; } #ifndef dma_max_pfn diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index 7ac17f57250e..187c10299722 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h @@ -20,6 +20,14 @@ #define CONTEXT_TT_MULTI_LEVEL 0 #define CONTEXT_TT_DEV_IOTLB 1 #define CONTEXT_TT_PASS_THROUGH 2 +/* Extended context entry types */ +#define CONTEXT_TT_PT_PASID 4 +#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5 +#define CONTEXT_TT_MASK (7ULL << 2) + +#define CONTEXT_DINVE (1ULL << 8) +#define CONTEXT_PRS (1ULL << 9) +#define CONTEXT_PASIDE (1ULL << 11) struct intel_iommu; struct dmar_domain; diff --git a/include/linux/fb.h b/include/linux/fb.h index 41a3b11f7796..3d003805aac3 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -156,7 +156,7 @@ struct fb_cursor_user { #define FB_EVENT_GET_REQ 0x0D /* Unbind from the console if possible */ #define FB_EVENT_FB_UNBIND 0x0E -/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */ +/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */ #define FB_EVENT_REMAP_ALL_CONSOLE 0x0F /* A hardware display blank early change occured */ #define FB_EARLY_EVENT_BLANK 0x10 diff --git a/include/linux/fence.h b/include/linux/fence.h index 39efee130d2b..bb522011383b 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h @@ -280,6 +280,22 @@ fence_is_signaled(struct fence *fence) } /** + * fence_is_later - return if f1 is chronologically later than f2 + * @f1: [in] the first fence from the same context + * @f2: [in] the second fence from the same context + * + * Returns true if f1 is chronologically later than f2. Both fences must be + * from the same context, since a seqno is not re-used across contexts. + */ +static inline bool fence_is_later(struct fence *f1, struct fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return false; + + return f1->seqno - f2->seqno < INT_MAX; +} + +/** * fence_later - return the chronologically later fence * @f1: [in] the first fence from the same context * @f2: [in] the second fence from the same context @@ -298,14 +314,15 @@ static inline struct fence *fence_later(struct fence *f1, struct fence *f2) * set if enable_signaling wasn't called, and enabling that here is * overkill. */ - if (f2->seqno - f1->seqno <= INT_MAX) - return fence_is_signaled(f2) ? NULL : f2; - else + if (fence_is_later(f1, f2)) return fence_is_signaled(f1) ? NULL : f1; + else + return fence_is_signaled(f2) ? NULL : f2; } signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); - +signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, + bool intr, signed long timeout); /** * fence_wait - sleep until the fence gets signaled diff --git a/include/linux/fs.h b/include/linux/fs.h index bcca36e4bc1e..6230eb2a9cca 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1053,12 +1053,11 @@ extern void locks_remove_file(struct file *); extern void locks_release_private(struct file_lock *); extern void posix_test_lock(struct file *, struct file_lock *); extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); -extern int posix_lock_inode_wait(struct inode *, struct file_lock *); extern int posix_unblock_lock(struct file_lock *); extern int vfs_test_lock(struct file *, struct file_lock *); extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); -extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl); +extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); extern void lease_get_mtime(struct inode *, struct timespec *time); extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); @@ -1144,12 +1143,6 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl, return -ENOLCK; } -static inline int posix_lock_inode_wait(struct inode *inode, - struct file_lock *fl) -{ - return -ENOLCK; -} - static inline int posix_unblock_lock(struct file_lock *waiter) { return -ENOENT; @@ -1171,8 +1164,7 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) return 0; } -static inline int flock_lock_inode_wait(struct inode *inode, - struct file_lock *request) +static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) { return -ENOLCK; } @@ -1215,14 +1207,9 @@ static inline struct inode *file_inode(const struct file *f) return f->f_inode; } -static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) -{ - return posix_lock_inode_wait(file_inode(filp), fl); -} - -static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl) +static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) { - return flock_lock_inode_wait(file_inode(filp), fl); + return locks_lock_inode_wait(file_inode(filp), fl); } struct fasync_struct { @@ -2422,6 +2409,7 @@ extern int write_inode_now(struct inode *, int); extern int filemap_fdatawrite(struct address_space *); extern int filemap_flush(struct address_space *); extern int filemap_fdatawait(struct address_space *); +extern void filemap_fdatawait_keep_errors(struct address_space *); extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); extern int filemap_write_and_wait(struct address_space *mapping); diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h new file mode 100644 index 000000000000..84d971ff3fba --- /dev/null +++ b/include/linux/fsl/guts.h @@ -0,0 +1,192 @@ +/** + * Freecale 85xx and 86xx Global Utilties register set + * + * Authors: Jeff Brown + * Timur Tabi <timur@freescale.com> + * + * Copyright 2004,2007,2012 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __FSL_GUTS_H__ +#define __FSL_GUTS_H__ + +#include <linux/types.h> + +/** + * Global Utility Registers. + * + * Not all registers defined in this structure are available on all chips, so + * you are expected to know whether a given register actually exists on your + * chip before you access it. + * + * Also, some registers are similar on different chips but have slightly + * different names. In these cases, one name is chosen to avoid extraneous + * #ifdefs. + */ +struct ccsr_guts { + __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ + __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ + __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ + __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ + __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ + __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ + u8 res018[0x20 - 0x18]; + __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ + u8 res024[0x30 - 0x24]; + __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ + u8 res034[0x40 - 0x34]; + __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ + u8 res044[0x50 - 0x44]; + __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ + u8 res054[0x60 - 0x54]; + __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ + __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ + __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ + u8 res06c[0x70 - 0x6c]; + __be32 devdisr; /* 0x.0070 - Device Disable Control */ +#define CCSR_GUTS_DEVDISR_TB1 0x00001000 +#define CCSR_GUTS_DEVDISR_TB0 0x00004000 + __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ + u8 res078[0x7c - 0x78]; + __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ + __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ + __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ + __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ + __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ + __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ + __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ + __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ + __be32 autorstsr; /* 0x.009c - Automatic reset status register */ + __be32 pvr; /* 0x.00a0 - Processor Version Register */ + __be32 svr; /* 0x.00a4 - System Version Register */ + u8 res0a8[0xb0 - 0xa8]; + __be32 rstcr; /* 0x.00b0 - Reset Control Register */ + u8 res0b4[0xc0 - 0xb4]; + __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register + Called 'elbcvselcr' on 86xx SOCs */ + u8 res0c4[0x100 - 0xc4]; + __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers + There are 16 registers */ + u8 res140[0x224 - 0x140]; + __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ + __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ + u8 res22c[0x604 - 0x22c]; + __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ + u8 res608[0x800 - 0x608]; + __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ + u8 res804[0x900 - 0x804]; + __be32 ircr; /* 0x.0900 - Infrared Control Register */ + u8 res904[0x908 - 0x904]; + __be32 dmacr; /* 0x.0908 - DMA Control Register */ + u8 res90c[0x914 - 0x90c]; + __be32 elbccr; /* 0x.0914 - eLBC Control Register */ + u8 res918[0xb20 - 0x918]; + __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ + __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ + __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ + u8 resb2c[0xe00 - 0xb2c]; + __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ + u8 rese04[0xe10 - 0xe04]; + __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ + u8 rese14[0xe20 - 0xe14]; + __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ + __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ + u8 rese28[0xf04 - 0xe28]; + __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ + __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ + u8 resf0c[0xf2c - 0xf0c]; + __be32 itcr; /* 0x.0f2c - Internal transaction control register */ + u8 resf30[0xf40 - 0xf30]; + __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ + __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ +} __attribute__ ((packed)); + + +/* Alternate function signal multiplex control */ +#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) + +#ifdef CONFIG_PPC_86xx + +#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */ +#define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */ + +/* + * Set the DMACR register in the GUTS + * + * The DMACR register determines the source of initiated transfers for each + * channel on each DMA controller. Rather than have a bunch of repetitive + * macros for the bit patterns, we just have a function that calculates + * them. + * + * guts: Pointer to GUTS structure + * co: The DMA controller (0 or 1) + * ch: The channel on the DMA controller (0, 1, 2, or 3) + * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx) + */ +static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts, + unsigned int co, unsigned int ch, unsigned int device) +{ + unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch)); + + clrsetbits_be32(&guts->dmacr, 3 << shift, device << shift); +} + +#define CCSR_GUTS_PMUXCR_LDPSEL 0x00010000 +#define CCSR_GUTS_PMUXCR_SSI1_MASK 0x0000C000 /* Bitmask for SSI1 */ +#define CCSR_GUTS_PMUXCR_SSI1_LA 0x00000000 /* Latched address */ +#define CCSR_GUTS_PMUXCR_SSI1_HI 0x00004000 /* High impedance */ +#define CCSR_GUTS_PMUXCR_SSI1_SSI 0x00008000 /* Used for SSI1 */ +#define CCSR_GUTS_PMUXCR_SSI2_MASK 0x00003000 /* Bitmask for SSI2 */ +#define CCSR_GUTS_PMUXCR_SSI2_LA 0x00000000 /* Latched address */ +#define CCSR_GUTS_PMUXCR_SSI2_HI 0x00001000 /* High impedance */ +#define CCSR_GUTS_PMUXCR_SSI2_SSI 0x00002000 /* Used for SSI2 */ +#define CCSR_GUTS_PMUXCR_LA_22_25_LA 0x00000000 /* Latched Address */ +#define CCSR_GUTS_PMUXCR_LA_22_25_HI 0x00000400 /* High impedance */ +#define CCSR_GUTS_PMUXCR_DBGDRV 0x00000200 /* Signals not driven */ +#define CCSR_GUTS_PMUXCR_DMA2_0 0x00000008 +#define CCSR_GUTS_PMUXCR_DMA2_3 0x00000004 +#define CCSR_GUTS_PMUXCR_DMA1_0 0x00000002 +#define CCSR_GUTS_PMUXCR_DMA1_3 0x00000001 + +/* + * Set the DMA external control bits in the GUTS + * + * The DMA external control bits in the PMUXCR are only meaningful for + * channels 0 and 3. Any other channels are ignored. + * + * guts: Pointer to GUTS structure + * co: The DMA controller (0 or 1) + * ch: The channel on the DMA controller (0, 1, 2, or 3) + * value: the new value for the bit (0 or 1) + */ +static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts, + unsigned int co, unsigned int ch, unsigned int value) +{ + if ((ch == 0) || (ch == 3)) { + unsigned int shift = 2 * (co + 1) - (ch & 1) - 1; + + clrsetbits_be32(&guts->pmuxcr, 1 << shift, value << shift); + } +} + +#define CCSR_GUTS_CLKDVDR_PXCKEN 0x80000000 +#define CCSR_GUTS_CLKDVDR_SSICKEN 0x20000000 +#define CCSR_GUTS_CLKDVDR_PXCKINV 0x10000000 +#define CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT 25 +#define CCSR_GUTS_CLKDVDR_PXCKDLY_MASK 0x06000000 +#define CCSR_GUTS_CLKDVDR_PXCKDLY(x) \ + (((x) & 3) << CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT) +#define CCSR_GUTS_CLKDVDR_PXCLK_SHIFT 16 +#define CCSR_GUTS_CLKDVDR_PXCLK_MASK 0x001F0000 +#define CCSR_GUTS_CLKDVDR_PXCLK(x) (((x) & 31) << CCSR_GUTS_CLKDVDR_PXCLK_SHIFT) +#define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF +#define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK) + +#endif + +#endif diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 6cd8c0ee4b6f..eae6548efbf0 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -263,7 +263,18 @@ static inline void ftrace_kill(void) { } #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_STACK_TRACER + +#define STACK_TRACE_ENTRIES 500 + +struct stack_trace; + +extern unsigned stack_trace_index[]; +extern struct stack_trace stack_trace_max; +extern unsigned long stack_trace_max_size; +extern arch_spinlock_t stack_trace_max_lock; + extern int stack_tracer_enabled; +void stack_trace_print(void); int stack_trace_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, diff --git a/include/linux/gfp.h b/include/linux/gfp.h index f92cbd2f4450..6523109e136d 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -14,7 +14,7 @@ struct vm_area_struct; #define ___GFP_HIGHMEM 0x02u #define ___GFP_DMA32 0x04u #define ___GFP_MOVABLE 0x08u -#define ___GFP_WAIT 0x10u +#define ___GFP_RECLAIMABLE 0x10u #define ___GFP_HIGH 0x20u #define ___GFP_IO 0x40u #define ___GFP_FS 0x80u @@ -29,18 +29,17 @@ struct vm_area_struct; #define ___GFP_NOMEMALLOC 0x10000u #define ___GFP_HARDWALL 0x20000u #define ___GFP_THISNODE 0x40000u -#define ___GFP_RECLAIMABLE 0x80000u +#define ___GFP_ATOMIC 0x80000u #define ___GFP_NOACCOUNT 0x100000u #define ___GFP_NOTRACK 0x200000u -#define ___GFP_NO_KSWAPD 0x400000u +#define ___GFP_DIRECT_RECLAIM 0x400000u #define ___GFP_OTHER_NODE 0x800000u #define ___GFP_WRITE 0x1000000u +#define ___GFP_KSWAPD_RECLAIM 0x2000000u /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* - * GFP bitmasks.. - * - * Zone modifiers (see linux/mmzone.h - low three bits) + * Physical address zone modifiers (see linux/mmzone.h - low four bits) * * Do not put any conditional on these. If necessary modify the definitions * without the underscores and use them consistently. The definitions here may @@ -50,116 +49,229 @@ struct vm_area_struct; #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ +#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) + +/* + * Page mobility and placement hints + * + * These flags provide hints about how mobile the page is. Pages with similar + * mobility are placed within the same pageblocks to minimise problems due + * to external fragmentation. + * + * __GFP_MOVABLE (also a zone modifier) indicates that the page can be + * moved by page migration during memory compaction or can be reclaimed. + * + * __GFP_RECLAIMABLE is used for slab allocations that specify + * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. + * + * __GFP_WRITE indicates the caller intends to dirty the page. Where possible, + * these pages will be spread between local zones to avoid all the dirty + * pages being in one zone (fair zone allocation policy). + * + * __GFP_HARDWALL enforces the cpuset memory allocation policy. + * + * __GFP_THISNODE forces the allocation to be satisified from the requested + * node with no fallbacks or placement policy enforcements. + */ +#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) +#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) +#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) +#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) + /* - * Action modifiers - doesn't change the zoning + * Watermark modifiers -- controls access to emergency reserves + * + * __GFP_HIGH indicates that the caller is high-priority and that granting + * the request is necessary before the system can make forward progress. + * For example, creating an IO context to clean pages. + * + * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is + * high priority. Users are typically interrupt handlers. This may be + * used in conjunction with __GFP_HIGH + * + * __GFP_MEMALLOC allows access to all memory. This should only be used when + * the caller guarantees the allocation will allow more memory to be freed + * very shortly e.g. process exiting or swapping. Users either should + * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). + * + * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. + * This takes precedence over the __GFP_MEMALLOC flag if both are set. + * + * __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement. + */ +#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) +#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) +#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) +#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) +#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) + +/* + * Reclaim modifiers + * + * __GFP_IO can start physical IO. + * + * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the + * allocator recursing into the filesystem which might already be holding + * locks. + * + * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. + * This flag can be cleared to avoid unnecessary delays when a fallback + * option is available. + * + * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when + * the low watermark is reached and have it reclaim pages until the high + * watermark is reached. A caller may wish to clear this flag when fallback + * options are available and the reclaim is likely to disrupt the system. The + * canonical example is THP allocation where a fallback is cheap but + * reclaim/compaction may cause indirect stalls. + * + * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. * * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt - * _might_ fail. This depends upon the particular VM implementation. + * _might_ fail. This depends upon the particular VM implementation. * * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller - * cannot handle allocation failures. New users should be evaluated carefully - * (and the flag should be used only when there is no reasonable failure policy) - * but it is definitely preferable to use the flag rather than opencode endless - * loop around allocator. + * cannot handle allocation failures. New users should be evaluated carefully + * (and the flag should be used only when there is no reasonable failure + * policy) but it is definitely preferable to use the flag rather than + * opencode endless loop around allocator. * * __GFP_NORETRY: The VM implementation must not retry indefinitely and will - * return NULL when direct reclaim and memory compaction have failed to allow - * the allocation to succeed. The OOM killer is not called with the current - * implementation. - * - * __GFP_MOVABLE: Flag that this page will be movable by the page migration - * mechanism or reclaimed + * return NULL when direct reclaim and memory compaction have failed to allow + * the allocation to succeed. The OOM killer is not called with the current + * implementation. */ -#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */ -#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */ -#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */ -#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */ -#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */ -#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */ -#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ -#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ -#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ -#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */ -#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ -#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ -#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves. - * This takes precedence over the - * __GFP_MEMALLOC flag if both are - * set - */ -#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ -#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ -#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ -#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */ -#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ - -#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) -#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ -#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ +#define __GFP_IO ((__force gfp_t)___GFP_IO) +#define __GFP_FS ((__force gfp_t)___GFP_FS) +#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ +#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ +#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) +#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) +#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) +#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* - * This may seem redundant, but it's a way of annotating false positives vs. - * allocations that simply cannot be supported (e.g. page tables). + * Action modifiers + * + * __GFP_COLD indicates that the caller does not expect to be used in the near + * future. Where possible, a cache-cold page will be returned. + * + * __GFP_NOWARN suppresses allocation failure reports. + * + * __GFP_COMP address compound page metadata. + * + * __GFP_ZERO returns a zeroed page on success. + * + * __GFP_NOTRACK avoids tracking with kmemcheck. + * + * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of + * distinguishing in the source between false positives and allocations that + * cannot be supported (e.g. page tables). + * + * __GFP_OTHER_NODE is for allocations that are on a remote node but that + * should not be accounted for as a remote allocation in vmstat. A + * typical user would be khugepaged collapsing a huge page on a remote + * node. */ +#define __GFP_COLD ((__force gfp_t)___GFP_COLD) +#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) +#define __GFP_COMP ((__force gfp_t)___GFP_COMP) +#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) +#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) +#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) -#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */ +/* Room for N __GFP_FOO bits */ +#define __GFP_BITS_SHIFT 26 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) -/* This equals 0, but use constants in case they ever change */ -#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) -/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ -#define GFP_ATOMIC (__GFP_HIGH) -#define GFP_NOIO (__GFP_WAIT) -#define GFP_NOFS (__GFP_WAIT | __GFP_IO) -#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) -#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ +/* + * Useful GFP flag combinations that are commonly used. It is recommended + * that subsystems start with one of these combinations and then set/clear + * __GFP_FOO flags as necessary. + * + * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower + * watermark is applied to allow access to "atomic reserves" + * + * GFP_KERNEL is typical for kernel-internal allocations. The caller requires + * ZONE_NORMAL or a lower zone for direct access but can direct reclaim. + * + * GFP_NOWAIT is for kernel allocations that should not stall for direct + * reclaim, start physical IO or use any filesystem callback. + * + * GFP_NOIO will use direct reclaim to discard clean pages or slab pages + * that do not require the starting of any physical IO. + * + * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. + * + * GFP_USER is for userspace allocations that also need to be directly + * accessibly by the kernel or hardware. It is typically used by hardware + * for buffers that are mapped to userspace (e.g. graphics) that hardware + * still must DMA to. cpuset limits are enforced for these allocations. + * + * GFP_DMA exists for historical reasons and should be avoided where possible. + * The flags indicates that the caller requires that the lowest zone be + * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but + * it would require careful auditing as some users really require it and + * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the + * lowest zone as a type of emergency reserve. + * + * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit + * address. + * + * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, + * do not need to be directly accessible by the kernel but that cannot + * move once in use. An example may be a hardware allocation that maps + * data directly into userspace but has no addressing limitations. + * + * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not + * need direct access to but can use kmap() when access is required. They + * are expected to be movable via page reclaim or page migration. Typically, + * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE. + * + * GFP_TRANSHUGE is used for THP allocations. They are compound allocations + * that will fail quickly if memory is not available and will not wake + * kswapd on failure. + */ +#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) +#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) +#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) +#define GFP_NOIO (__GFP_RECLAIM) +#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) +#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \ __GFP_RECLAIMABLE) -#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) +#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) +#define GFP_DMA __GFP_DMA +#define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) -#define GFP_IOFS (__GFP_IO | __GFP_FS) -#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ - __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ - __GFP_NO_KSWAPD) +#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ + __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ + ~__GFP_KSWAPD_RECLAIM) -/* This mask makes up all the page movable related flags */ +/* Convert GFP flags to their corresponding migrate type */ #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) +#define GFP_MOVABLE_SHIFT 3 -/* Control page allocator reclaim behavior */ -#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ - __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ - __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) - -/* Control slab gfp mask during early boot */ -#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) - -/* Control allocation constraints */ -#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) - -/* Do not use these with a slab allocator */ -#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) - -/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some - platforms, used as appropriate on others */ - -#define GFP_DMA __GFP_DMA - -/* 4GB DMA on some platforms */ -#define GFP_DMA32 __GFP_DMA32 - -/* Convert GFP flags to their corresponding migrate type */ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) { - WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); + VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); + BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); + BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); if (unlikely(page_group_by_mobility_disabled)) return MIGRATE_UNMOVABLE; /* Group based on mobility */ - return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | - ((gfp_flags & __GFP_RECLAIMABLE) != 0); + return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; +} +#undef GFP_MOVABLE_MASK +#undef GFP_MOVABLE_SHIFT + +static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) +{ + return gfp_flags & __GFP_DIRECT_RECLAIM; } #ifdef CONFIG_HIGHMEM diff --git a/include/linux/hid.h b/include/linux/hid.h index f17980de2662..251a1d382e23 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -698,8 +698,8 @@ struct hid_driver { int (*input_mapped)(struct hid_device *hdev, struct hid_input *hidinput, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max); - void (*input_configured)(struct hid_device *hdev, - struct hid_input *hidinput); + int (*input_configured)(struct hid_device *hdev, + struct hid_input *hidinput); void (*feature_mapping)(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage); diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 6aefcd0031a6..bb3f3297062a 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -78,7 +78,6 @@ static inline void __kunmap_atomic(void *addr) } #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) -#define kmap_atomic_to_page(ptr) virt_to_page(ptr) #define kmap_flush_unused() do {} while(0) #endif diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 5e35379f58a5..685c262e0be8 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -483,6 +483,17 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, #define hugepages_supported() (HPAGE_SHIFT != 0) #endif +void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); + +static inline void hugetlb_count_add(long l, struct mm_struct *mm) +{ + atomic_long_add(l, &mm->hugetlb_usage); +} + +static inline void hugetlb_count_sub(long l, struct mm_struct *mm) +{ + atomic_long_sub(l, &mm->hugetlb_usage); +} #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; #define alloc_huge_page(v, a, r) NULL @@ -519,6 +530,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, { return &mm->page_table_lock; } + +static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) +{ +} + +static inline void hugetlb_count_sub(long l, struct mm_struct *mm) +{ +} #endif /* CONFIG_HUGETLB_PAGE */ static inline spinlock_t *huge_pte_lock(struct hstate *h, diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index bcc853eccc85..24154c26d469 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -32,7 +32,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return NULL; - return (struct hugetlb_cgroup *)page[2].lru.next; + return (struct hugetlb_cgroup *)page[2].private; } static inline @@ -42,15 +42,13 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) return -1; - page[2].lru.next = (void *)h_cg; + page[2].private = (unsigned long)h_cg; return 0; } static inline bool hugetlb_cgroup_disabled(void) { - if (hugetlb_cgrp_subsys.disabled) - return true; - return false; + return !cgroup_subsys_enabled(hugetlb_cgrp_subsys); } extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 54733d5b503e..8fdc17b84739 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -26,6 +26,7 @@ #define _HYPERV_H #include <uapi/linux/hyperv.h> +#include <uapi/asm/hyperv.h> #include <linux/types.h> #include <linux/scatterlist.h> diff --git a/include/linux/i2c-ocores.h b/include/linux/i2c-ocores.h index 1c06b5c7c308..01edd96fe1f7 100644 --- a/include/linux/i2c-ocores.h +++ b/include/linux/i2c-ocores.h @@ -15,6 +15,7 @@ struct ocores_i2c_platform_data { u32 reg_shift; /* register offset shift value */ u32 reg_io_width; /* register io read/write width */ u32 clock_khz; /* input clock in kHz */ + bool big_endian; /* registers are big endian */ u8 num_devices; /* number of devices in the devices list */ struct i2c_board_info const *devices; /* devices connected to the bus */ }; diff --git a/include/linux/i2c/i2c-rcar.h b/include/linux/i2c/i2c-rcar.h deleted file mode 100644 index 496f5c2b23c9..000000000000 --- a/include/linux/i2c/i2c-rcar.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef __I2C_R_CAR_H__ -#define __I2C_R_CAR_H__ - -#include <linux/platform_device.h> - -struct i2c_rcar_platform_data { - u32 bus_speed; -}; - -#endif /* __I2C_R_CAR_H__ */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 810a34f60424..1c1ff7e4faa4 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -25,13 +25,6 @@ extern struct files_struct init_files; extern struct fs_struct init_fs; -#ifdef CONFIG_CGROUPS -#define INIT_GROUP_RWSEM(sig) \ - .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem), -#else -#define INIT_GROUP_RWSEM(sig) -#endif - #ifdef CONFIG_CPUSETS #define INIT_CPUSET_SEQ(tsk) \ .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), @@ -65,7 +58,6 @@ extern struct fs_struct init_fs; INIT_PREV_CPUTIME(sig) \ .cred_guard_mutex = \ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ - INIT_GROUP_RWSEM(sig) \ } extern struct nsproxy init_nsproxy; diff --git a/include/linux/input.h b/include/linux/input.h index 82ce323b9986..1e967694e9a5 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -469,6 +469,8 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke); int input_set_keycode(struct input_dev *dev, const struct input_keymap_entry *ke); +void input_enable_softrepeat(struct input_dev *dev, int delay, int period); + extern struct class input_class; /** diff --git a/include/linux/input/edt-ft5x06.h b/include/linux/input/edt-ft5x06.h deleted file mode 100644 index 8a1e0d1a0124..000000000000 --- a/include/linux/input/edt-ft5x06.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef _EDT_FT5X06_H -#define _EDT_FT5X06_H - -/* - * Copyright (c) 2012 Simon Budig, <simon.budig@kernelconcepts.de> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - */ - -struct edt_ft5x06_platform_data { - int irq_pin; - int reset_pin; - - /* startup defaults for operational parameters */ - bool use_parameters; - u8 gain; - u8 threshold; - u8 offset; - u8 report_rate; -}; - -#endif /* _EDT_FT5X06_H */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 6240063bdcac..821273ca4873 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -1,5 +1,9 @@ /* - * Copyright (c) 2006, Intel Corporation. + * Copyright © 2006-2015, Intel Corporation. + * + * Authors: Ashok Raj <ashok.raj@intel.com> + * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> + * David Woodhouse <David.Woodhouse@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -13,10 +17,6 @@ * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * Copyright (C) 2006-2008 Intel Corporation - * Author: Ashok Raj <ashok.raj@intel.com> - * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> */ #ifndef _INTEL_IOMMU_H_ @@ -25,7 +25,10 @@ #include <linux/types.h> #include <linux/iova.h> #include <linux/io.h> +#include <linux/idr.h> #include <linux/dma_remapping.h> +#include <linux/mmu_notifier.h> +#include <linux/list.h> #include <asm/cacheflush.h> #include <asm/iommu.h> @@ -57,16 +60,21 @@ #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ #define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ +#define DMAR_PQH_REG 0xc0 /* Page request queue head register */ +#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */ +#define DMAR_PQA_REG 0xd0 /* Page request queue address register */ +#define DMAR_PRS_REG 0xdc /* Page request status register */ +#define DMAR_PECTL_REG 0xe0 /* Page request event control register */ +#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */ +#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */ +#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */ #define OFFSET_STRIDE (9) -/* -#define dmar_readl(dmar, reg) readl(dmar + reg) -#define dmar_readq(dmar, reg) ({ \ - u32 lo, hi; \ - lo = readl(dmar + reg); \ - hi = readl(dmar + reg + 4); \ - (((u64) hi) << 32) + lo; }) -*/ + +#ifdef CONFIG_64BIT +#define dmar_readq(a) readq(a) +#define dmar_writeq(a,v) writeq(v,a) +#else static inline u64 dmar_readq(void __iomem *addr) { u32 lo, hi; @@ -80,6 +88,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) writel((u32)val, addr); writel((u32)(val >> 32), addr + 4); } +#endif #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) #define DMAR_VER_MINOR(v) ((v) & 0x0f) @@ -123,7 +132,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) #define ecap_srs(e) ((e >> 31) & 0x1) #define ecap_ers(e) ((e >> 30) & 0x1) #define ecap_prs(e) ((e >> 29) & 0x1) -/* PASID support used to be on bit 28 */ +#define ecap_broken_pasid(e) ((e >> 28) & 0x1) #define ecap_dis(e) ((e >> 27) & 0x1) #define ecap_nest(e) ((e >> 26) & 0x1) #define ecap_mts(e) ((e >> 25) & 0x1) @@ -253,6 +262,11 @@ enum { #define QI_DIOTLB_TYPE 0x3 #define QI_IEC_TYPE 0x4 #define QI_IWD_TYPE 0x5 +#define QI_EIOTLB_TYPE 0x6 +#define QI_PC_TYPE 0x7 +#define QI_DEIOTLB_TYPE 0x8 +#define QI_PGRP_RESP_TYPE 0x9 +#define QI_PSTRM_RESP_TYPE 0xa #define QI_IEC_SELECTIVE (((u64)1) << 4) #define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32)) @@ -280,6 +294,53 @@ enum { #define QI_DEV_IOTLB_SIZE 1 #define QI_DEV_IOTLB_MAX_INVS 32 +#define QI_PC_PASID(pasid) (((u64)pasid) << 32) +#define QI_PC_DID(did) (((u64)did) << 16) +#define QI_PC_GRAN(gran) (((u64)gran) << 4) + +#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0)) +#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) + +#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) +#define QI_EIOTLB_GL(gl) (((u64)gl) << 7) +#define QI_EIOTLB_IH(ih) (((u64)ih) << 6) +#define QI_EIOTLB_AM(am) (((u64)am)) +#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) +#define QI_EIOTLB_DID(did) (((u64)did) << 16) +#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) + +#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) +#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) +#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) +#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) +#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) +#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16) +#define QI_DEV_EIOTLB_MAX_INVS 32 + +#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) +#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32) +#define QI_PGRP_RESP_CODE(res) ((u64)(res)) +#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) +#define QI_PGRP_DID(did) (((u64)(did)) << 16) +#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) + +#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK) +#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4) +#define QI_PSTRM_RESP_CODE(res) ((u64)(res)) +#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55) +#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32) +#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24) +#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4) + +#define QI_RESP_SUCCESS 0x0 +#define QI_RESP_INVALID 0x1 +#define QI_RESP_FAILURE 0xf + +#define QI_GRAN_ALL_ALL 0 +#define QI_GRAN_NONG_ALL 1 +#define QI_GRAN_NONG_PASID 2 +#define QI_GRAN_PSI_PASID 3 + struct qi_desc { u64 low, high; }; @@ -327,6 +388,10 @@ enum { #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) +struct pasid_entry; +struct pasid_state_entry; +struct page_req_dsc; + struct intel_iommu { void __iomem *reg; /* Pointer to hardware regs, virtual addr */ u64 reg_phys; /* physical address of hw register set */ @@ -338,7 +403,7 @@ struct intel_iommu { int seq_id; /* sequence id of the iommu */ int agaw; /* agaw of this iommu */ int msagaw; /* max sagaw of this iommu */ - unsigned int irq; + unsigned int irq, pr_irq; u16 segment; /* PCI segment# */ unsigned char name[13]; /* Device Name */ @@ -350,6 +415,18 @@ struct intel_iommu { struct iommu_flush flush; #endif +#ifdef CONFIG_INTEL_IOMMU_SVM + /* These are large and need to be contiguous, so we allocate just + * one for now. We'll maybe want to rethink that if we truly give + * devices away to userspace processes (e.g. for DPDK) and don't + * want to trust that userspace will use *only* the PASID it was + * told to. But while it's all driver-arbitrated, we're fine. */ + struct pasid_entry *pasid_table; + struct pasid_state_entry *pasid_state_table; + struct page_req_dsc *prq; + unsigned char prq_name[16]; /* Name for PRQ interrupt */ + struct idr pasid_idr; +#endif struct q_inval *qi; /* Queued invalidation info */ u32 *iommu_state; /* Store iommu states between suspend and resume.*/ @@ -389,6 +466,38 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int dmar_ir_support(void); +#ifdef CONFIG_INTEL_IOMMU_SVM +extern int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu); +extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu); +extern int intel_svm_enable_prq(struct intel_iommu *iommu); +extern int intel_svm_finish_prq(struct intel_iommu *iommu); + +struct svm_dev_ops; + +struct intel_svm_dev { + struct list_head list; + struct rcu_head rcu; + struct device *dev; + struct svm_dev_ops *ops; + int users; + u16 did; + u16 dev_iotlb:1; + u16 sid, qdep; +}; + +struct intel_svm { + struct mmu_notifier notifier; + struct mm_struct *mm; + struct intel_iommu *iommu; + int flags; + int pasid; + struct list_head devs; +}; + +extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev); +extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); +#endif + extern const struct attribute_group *intel_iommu_groups[]; #endif diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h new file mode 100644 index 000000000000..3c25794042f9 --- /dev/null +++ b/include/linux/intel-svm.h @@ -0,0 +1,121 @@ +/* + * Copyright © 2015 Intel Corporation. + * + * Authors: David Woodhouse <David.Woodhouse@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __INTEL_SVM_H__ +#define __INTEL_SVM_H__ + +struct device; + +struct svm_dev_ops { + void (*fault_cb)(struct device *dev, int pasid, u64 address, + u32 private, int rwxp, int response); +}; + +/* Values for rxwp in fault_cb callback */ +#define SVM_REQ_READ (1<<3) +#define SVM_REQ_WRITE (1<<2) +#define SVM_REQ_EXEC (1<<1) +#define SVM_REQ_PRIV (1<<0) + + +/* + * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main" + * PASID for the current process. Even if a PASID already exists, a new one + * will be allocated. And the PASID allocated with SVM_FLAG_PRIVATE_PASID + * will not be given to subsequent callers. This facility allows a driver to + * disambiguate between multiple device contexts which access the same MM, + * if there is no other way to do so. It should be used sparingly, if at all. + */ +#define SVM_FLAG_PRIVATE_PASID (1<<0) + +/* + * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only + * for access to kernel addresses. No IOTLB flushes are automatically done + * for kernel mappings; it is valid only for access to the kernel's static + * 1:1 mapping of physical memory — not to vmalloc or even module mappings. + * A future API addition may permit the use of such ranges, by means of an + * explicit IOTLB flush call (akin to the DMA API's unmap method). + * + * It is unlikely that we will ever hook into flush_tlb_kernel_range() to + * do such IOTLB flushes automatically. + */ +#define SVM_FLAG_SUPERVISOR_MODE (1<<1) + +#ifdef CONFIG_INTEL_IOMMU_SVM + +/** + * intel_svm_bind_mm() - Bind the current process to a PASID + * @dev: Device to be granted acccess + * @pasid: Address for allocated PASID + * @flags: Flags. Later for requesting supervisor mode, etc. + * @ops: Callbacks to device driver + * + * This function attempts to enable PASID support for the given device. + * If the @pasid argument is non-%NULL, a PASID is allocated for access + * to the MM of the current process. + * + * By using a %NULL value for the @pasid argument, this function can + * be used to simply validate that PASID support is available for the + * given device — i.e. that it is behind an IOMMU which has the + * requisite support, and is enabled. + * + * Page faults are handled transparently by the IOMMU code, and there + * should be no need for the device driver to be involved. If a page + * fault cannot be handled (i.e. is an invalid address rather than + * just needs paging in), then the page request will be completed by + * the core IOMMU code with appropriate status, and the device itself + * can then report the resulting fault to its driver via whatever + * mechanism is appropriate. + * + * Multiple calls from the same process may result in the same PASID + * being re-used. A reference count is kept. + */ +extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, + struct svm_dev_ops *ops); + +/** + * intel_svm_unbind_mm() - Unbind a specified PASID + * @dev: Device for which PASID was allocated + * @pasid: PASID value to be unbound + * + * This function allows a PASID to be retired when the device no + * longer requires access to the address space of a given process. + * + * If the use count for the PASID in question reaches zero, the + * PASID is revoked and may no longer be used by hardware. + * + * Device drivers are required to ensure that no access (including + * page requests) is currently outstanding for the PASID in question, + * before calling this function. + */ +extern int intel_svm_unbind_mm(struct device *dev, int pasid); + +#else /* CONFIG_INTEL_IOMMU_SVM */ + +static inline int intel_svm_bind_mm(struct device *dev, int *pasid, + int flags, struct svm_dev_ops *ops) +{ + return -ENOSYS; +} + +static inline int intel_svm_unbind_mm(struct device *dev, int pasid) +{ + BUG(); +} +#endif /* CONFIG_INTEL_IOMMU_SVM */ + +#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL)) + +#endif /* __INTEL_SVM_H__ */ diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h new file mode 100644 index 000000000000..11d7e840d913 --- /dev/null +++ b/include/linux/io-64-nonatomic-hi-lo.h @@ -0,0 +1,32 @@ +#ifndef _LINUX_IO_64_NONATOMIC_HI_LO_H_ +#define _LINUX_IO_64_NONATOMIC_HI_LO_H_ + +#include <linux/io.h> +#include <asm-generic/int-ll64.h> + +static inline __u64 hi_lo_readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + high = readl(p + 1); + low = readl(p); + + return low + ((u64)high << 32); +} + +static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val >> 32, addr + 4); + writel(val, addr); +} + +#ifndef readq +#define readq hi_lo_readq +#endif + +#ifndef writeq +#define writeq hi_lo_writeq +#endif + +#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h new file mode 100644 index 000000000000..1a4315f97360 --- /dev/null +++ b/include/linux/io-64-nonatomic-lo-hi.h @@ -0,0 +1,32 @@ +#ifndef _LINUX_IO_64_NONATOMIC_LO_HI_H_ +#define _LINUX_IO_64_NONATOMIC_LO_HI_H_ + +#include <linux/io.h> +#include <asm-generic/int-ll64.h> + +static inline __u64 lo_hi_readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} + +static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} + +#ifndef readq +#define readq lo_hi_readq +#endif + +#ifndef writeq +#define writeq lo_hi_writeq +#endif + +#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h index bbced83b32ee..376a27c9cc6a 100644 --- a/include/linux/iommu-common.h +++ b/include/linux/iommu-common.h @@ -7,6 +7,7 @@ #define IOMMU_POOL_HASHBITS 4 #define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) +#define IOMMU_ERROR_CODE (~(unsigned long) 0) struct iommu_pool { unsigned long start; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index f9c1b6d0f2e4..f28dff313b07 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -81,6 +81,7 @@ struct iommu_domain { iommu_fault_handler_t handler; void *handler_token; struct iommu_domain_geometry geometry; + void *iova_cookie; }; enum iommu_cap { @@ -167,7 +168,7 @@ struct iommu_ops { phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); int (*add_device)(struct device *dev); void (*remove_device)(struct device *dev); - int (*device_group)(struct device *dev, unsigned int *groupid); + struct iommu_group *(*device_group)(struct device *dev); int (*domain_get_attr)(struct iommu_domain *domain, enum iommu_attr attr, void *data); int (*domain_set_attr)(struct iommu_domain *domain, @@ -316,6 +317,11 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain, return domain->ops->map_sg(domain, iova, sg, nents, prot); } +/* PCI device grouping function */ +extern struct iommu_group *pci_device_group(struct device *dev); +/* Generic device grouping function */ +extern struct iommu_group *generic_device_group(struct device *dev); + #else /* CONFIG_IOMMU_API */ struct iommu_ops {}; diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h new file mode 100644 index 000000000000..1551b5b2f4c2 --- /dev/null +++ b/include/linux/irqbypass.h @@ -0,0 +1,90 @@ +/* + * IRQ offload/bypass manager + * + * Copyright (C) 2015 Red Hat, Inc. + * Copyright (c) 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef IRQBYPASS_H +#define IRQBYPASS_H + +#include <linux/list.h> + +struct irq_bypass_consumer; + +/* + * Theory of operation + * + * The IRQ bypass manager is a simple set of lists and callbacks that allows + * IRQ producers (ex. physical interrupt sources) to be matched to IRQ + * consumers (ex. virtualization hardware that allows IRQ bypass or offload) + * via a shared token (ex. eventfd_ctx). Producers and consumers register + * independently. When a token match is found, the optional @stop callback + * will be called for each participant. The pair will then be connected via + * the @add_* callbacks, and finally the optional @start callback will allow + * any final coordination. When either participant is unregistered, the + * process is repeated using the @del_* callbacks in place of the @add_* + * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings + * are not supported. + */ + +/** + * struct irq_bypass_producer - IRQ bypass producer definition + * @node: IRQ bypass manager private list management + * @token: opaque token to match between producer and consumer + * @irq: Linux IRQ number for the producer device + * @add_consumer: Connect the IRQ producer to an IRQ consumer (optional) + * @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional) + * @stop: Perform any quiesce operations necessary prior to add/del (optional) + * @start: Perform any startup operations necessary after add/del (optional) + * + * The IRQ bypass producer structure represents an interrupt source for + * participation in possible host bypass, for instance an interrupt vector + * for a physical device assigned to a VM. + */ +struct irq_bypass_producer { + struct list_head node; + void *token; + int irq; + int (*add_consumer)(struct irq_bypass_producer *, + struct irq_bypass_consumer *); + void (*del_consumer)(struct irq_bypass_producer *, + struct irq_bypass_consumer *); + void (*stop)(struct irq_bypass_producer *); + void (*start)(struct irq_bypass_producer *); +}; + +/** + * struct irq_bypass_consumer - IRQ bypass consumer definition + * @node: IRQ bypass manager private list management + * @token: opaque token to match between producer and consumer + * @add_producer: Connect the IRQ consumer to an IRQ producer + * @del_producer: Disconnect the IRQ consumer from an IRQ producer + * @stop: Perform any quiesce operations necessary prior to add/del (optional) + * @start: Perform any startup operations necessary after add/del (optional) + * + * The IRQ bypass consumer structure represents an interrupt sink for + * participation in possible host bypass, for instance a hypervisor may + * support offloads to allow bypassing the host entirely or offload + * portions of the interrupt handling to the VM. + */ +struct irq_bypass_consumer { + struct list_head node; + void *token; + int (*add_producer)(struct irq_bypass_consumer *, + struct irq_bypass_producer *); + void (*del_producer)(struct irq_bypass_consumer *, + struct irq_bypass_producer *); + void (*stop)(struct irq_bypass_consumer *); + void (*start)(struct irq_bypass_consumer *); +}; + +int irq_bypass_register_producer(struct irq_bypass_producer *); +void irq_bypass_unregister_producer(struct irq_bypass_producer *); +int irq_bypass_register_consumer(struct irq_bypass_consumer *); +void irq_bypass_unregister_consumer(struct irq_bypass_consumer *); + +#endif /* IRQBYPASS_H */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index df07e78487d5..65407f6c9120 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -278,6 +278,7 @@ typedef struct journal_superblock_s /* 0x0400 */ } journal_superblock_t; +/* Use the jbd2_{has,set,clear}_feature_* helpers; these will be removed */ #define JBD2_HAS_COMPAT_FEATURE(j,mask) \ ((j)->j_format_version >= 2 && \ ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask)))) @@ -288,7 +289,7 @@ typedef struct journal_superblock_s ((j)->j_format_version >= 2 && \ ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask)))) -#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001 +#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001 #define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001 #define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 @@ -296,6 +297,8 @@ typedef struct journal_superblock_s #define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008 #define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010 +/* See "journal feature predicate functions" below */ + /* Features known to this kernel version: */ #define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM #define JBD2_KNOWN_ROCOMPAT_FEATURES 0 @@ -1034,6 +1037,69 @@ struct journal_s __u32 j_csum_seed; }; +/* journal feature predicate functions */ +#define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \ +static inline bool jbd2_has_feature_##name(journal_t *j) \ +{ \ + return ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_compat & \ + cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname)) != 0); \ +} \ +static inline void jbd2_set_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_compat |= \ + cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname); \ +} \ +static inline void jbd2_clear_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_compat &= \ + ~cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname); \ +} + +#define JBD2_FEATURE_RO_COMPAT_FUNCS(name, flagname) \ +static inline bool jbd2_has_feature_##name(journal_t *j) \ +{ \ + return ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_ro_compat & \ + cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname)) != 0); \ +} \ +static inline void jbd2_set_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_ro_compat |= \ + cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname); \ +} \ +static inline void jbd2_clear_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_ro_compat &= \ + ~cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname); \ +} + +#define JBD2_FEATURE_INCOMPAT_FUNCS(name, flagname) \ +static inline bool jbd2_has_feature_##name(journal_t *j) \ +{ \ + return ((j)->j_format_version >= 2 && \ + ((j)->j_superblock->s_feature_incompat & \ + cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname)) != 0); \ +} \ +static inline void jbd2_set_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_incompat |= \ + cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname); \ +} \ +static inline void jbd2_clear_feature_##name(journal_t *j) \ +{ \ + (j)->j_superblock->s_feature_incompat &= \ + ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname); \ +} + +JBD2_FEATURE_COMPAT_FUNCS(checksum, CHECKSUM) + +JBD2_FEATURE_INCOMPAT_FUNCS(revoke, REVOKE) +JBD2_FEATURE_INCOMPAT_FUNCS(64bit, 64BIT) +JBD2_FEATURE_INCOMPAT_FUNCS(async_commit, ASYNC_COMMIT) +JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2) +JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) + /* * Journal flag definitions */ @@ -1046,6 +1112,7 @@ struct journal_s #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file * data write error in ordered * mode */ +#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */ /* * Function declarations for the journaling transaction and buffer @@ -1338,13 +1405,17 @@ static inline int tid_geq(tid_t x, tid_t y) extern int jbd2_journal_blocks_per_page(struct inode *inode); extern size_t journal_tag_bytes(journal_t *journal); +static inline bool jbd2_journal_has_csum_v2or3_feature(journal_t *j) +{ + return jbd2_has_feature_csum2(j) || jbd2_has_feature_csum3(j); +} + static inline int jbd2_journal_has_csum_v2or3(journal_t *journal) { - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) || - JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) - return 1; + WARN_ON_ONCE(jbd2_journal_has_csum_v2or3_feature(journal) && + journal->j_chksum_driver == NULL); - return 0; + return journal->j_chksum_driver != NULL; } /* @@ -1444,4 +1515,7 @@ static inline tid_t jbd2_get_latest_transaction(journal_t *journal) #endif /* __KERNEL__ */ +#define EFSBADCRC EBADMSG /* Bad CRC detected */ +#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ + #endif /* _LINUX_JBD2_H */ diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index f1094238ab2a..8dde55974f18 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -214,11 +214,6 @@ static inline int jump_label_apply_nops(struct module *mod) #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE #define jump_label_enabled static_key_enabled -static inline bool static_key_enabled(struct static_key *key) -{ - return static_key_count(key) > 0; -} - static inline void static_key_enable(struct static_key *key) { int count = static_key_count(key); @@ -265,6 +260,17 @@ struct static_key_false { #define DEFINE_STATIC_KEY_FALSE(name) \ struct static_key_false name = STATIC_KEY_FALSE_INIT +extern bool ____wrong_branch_error(void); + +#define static_key_enabled(x) \ +({ \ + if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \ + !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\ + !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ + ____wrong_branch_error(); \ + static_key_count((struct static_key *)x) > 0; \ +}) + #ifdef HAVE_JUMP_LABEL /* @@ -323,8 +329,6 @@ struct static_key_false { * See jump_label_type() / jump_label_init_type(). */ -extern bool ____wrong_branch_error(void); - #define static_branch_likely(x) \ ({ \ bool branch; \ diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h index c838abe3ee0a..052c7b32cc91 100644 --- a/include/linux/kdev_t.h +++ b/include/linux/kdev_t.h @@ -20,7 +20,7 @@ }) /* acceptable for old filesystems */ -static inline int old_valid_dev(dev_t dev) +static inline bool old_valid_dev(dev_t dev) { return MAJOR(dev) < 256 && MINOR(dev) < 256; } @@ -35,7 +35,7 @@ static inline dev_t old_decode_dev(u16 val) return MKDEV((val >> 8) & 255, val & 255); } -static inline int new_valid_dev(dev_t dev) +static inline bool new_valid_dev(dev_t dev) { return 1; } @@ -54,11 +54,6 @@ static inline dev_t new_decode_dev(u32 dev) return MKDEV(major, minor); } -static inline int huge_valid_dev(dev_t dev) -{ - return 1; -} - static inline u64 huge_encode_dev(dev_t dev) { return new_encode_dev(dev); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 5582410727cb..350dfb08aee3 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -200,28 +200,28 @@ extern int _cond_resched(void); #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) -/* - * abs() handles unsigned and signed longs, ints, shorts and chars. For all - * input types abs() returns a signed long. - * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64() - * for those. +/** + * abs - return absolute value of an argument + * @x: the value. If it is unsigned type, it is converted to signed type first + * (s64, long or int depending on its size). + * + * Return: an absolute value of x. If x is 64-bit, macro's return type is s64, + * otherwise it is signed long. */ -#define abs(x) ({ \ - long ret; \ - if (sizeof(x) == sizeof(long)) { \ - long __x = (x); \ - ret = (__x < 0) ? -__x : __x; \ - } else { \ - int __x = (x); \ - ret = (__x < 0) ? -__x : __x; \ - } \ - ret; \ - }) - -#define abs64(x) ({ \ - s64 __x = (x); \ - (__x < 0) ? -__x : __x; \ - }) +#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \ + s64 __x = (x); \ + (__x < 0) ? -__x : __x; \ + }), ({ \ + long ret; \ + if (sizeof(x) == sizeof(long)) { \ + long __x = (x); \ + ret = (__x < 0) ? -__x : __x; \ + } else { \ + int __x = (x); \ + ret = (__x < 0) ? -__x : __x; \ + } \ + ret; \ + })) /** * reciprocal_scale - "scale" a value into range [0, ep_ro) @@ -413,6 +413,8 @@ extern __printf(2, 3) char *kasprintf(gfp_t gfp, const char *fmt, ...); extern __printf(2, 0) char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); +extern __printf(2, 0) +const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args); extern __scanf(2, 3) int sscanf(const char *, const char *, ...); diff --git a/include/linux/key-type.h b/include/linux/key-type.h index ff9f1d394235..7463355a198b 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -40,8 +40,7 @@ struct key_construction { */ struct key_preparsed_payload { char *description; /* Proposed key description (or NULL) */ - void *type_data[2]; /* Private key-type data */ - void *payload[2]; /* Proposed payload */ + union key_payload payload; /* Proposed payload */ const void *data; /* Raw data */ size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ diff --git a/include/linux/key.h b/include/linux/key.h index e1d4715f3222..66f705243985 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -89,6 +89,11 @@ struct keyring_index_key { size_t desc_len; }; +union key_payload { + void __rcu *rcu_data0; + void *data[4]; +}; + /*****************************************************************************/ /* * key reference with possession attribute handling @@ -186,28 +191,18 @@ struct key { }; }; - /* type specific data - * - this is used by the keyring type to index the name - */ - union { - struct list_head link; - unsigned long x[2]; - void *p[2]; - int reject_error; - } type_data; - /* key data * - this is used to hold the data actually used in cryptography or * whatever */ union { - union { - unsigned long value; - void __rcu *rcudata; - void *data; - void *data2[2]; - } payload; - struct assoc_array keys; + union key_payload payload; + struct { + /* Keyring bits */ + struct list_head name_link; + struct assoc_array keys; + }; + int reject_error; }; }; @@ -336,12 +331,12 @@ static inline bool key_is_instantiated(const struct key *key) } #define rcu_dereference_key(KEY) \ - (rcu_dereference_protected((KEY)->payload.rcudata, \ + (rcu_dereference_protected((KEY)->payload.rcu_data0, \ rwsem_is_locked(&((struct key *)(KEY))->sem))) #define rcu_assign_keypointer(KEY, PAYLOAD) \ do { \ - rcu_assign_pointer((KEY)->payload.rcudata, (PAYLOAD)); \ + rcu_assign_pointer((KEY)->payload.rcu_data0, (PAYLOAD)); \ } while (0) #ifdef CONFIG_SYSCTL diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1bef9e21e725..242a6d2b53ff 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -24,6 +24,7 @@ #include <linux/err.h> #include <linux/irqflags.h> #include <linux/context_tracking.h> +#include <linux/irqbypass.h> #include <asm/signal.h> #include <linux/kvm.h> @@ -140,6 +141,8 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_APIC_PAGE_RELOAD 25 #define KVM_REQ_SMI 26 #define KVM_REQ_HV_CRASH 27 +#define KVM_REQ_IOAPIC_EOI_EXIT 28 +#define KVM_REQ_HV_RESET 29 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 @@ -231,6 +234,9 @@ struct kvm_vcpu { unsigned long requests; unsigned long guest_debug; + int pre_pcpu; + struct list_head blocked_vcpu_list; + struct mutex mutex; struct kvm_run *run; @@ -329,6 +335,18 @@ struct kvm_kernel_irq_routing_entry { struct hlist_node link; }; +#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING +struct kvm_irq_routing_table { + int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; + u32 nr_rt_entries; + /* + * Array indexed by gsi. Each entry contains list of irq chips + * the gsi is connected to. + */ + struct hlist_head map[0]; +}; +#endif + #ifndef KVM_PRIVATE_MEM_SLOTS #define KVM_PRIVATE_MEM_SLOTS 0 #endif @@ -455,10 +473,14 @@ void vcpu_put(struct kvm_vcpu *vcpu); #ifdef __KVM_HAVE_IOAPIC void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); +void kvm_arch_irq_routing_update(struct kvm *kvm); #else static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) { } +static inline void kvm_arch_irq_routing_update(struct kvm *kvm) +{ +} #endif #ifdef CONFIG_HAVE_KVM_IRQFD @@ -625,6 +647,8 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_vcpu_block(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); int kvm_vcpu_yield_to(struct kvm_vcpu *target); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); @@ -803,10 +827,13 @@ int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status); -int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, int irq_source_id, int level, bool line_status); +int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, + int level, bool line_status); bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); +void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); @@ -1002,6 +1029,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) #endif int kvm_setup_default_irq_routing(struct kvm *kvm); +int kvm_setup_empty_irq_routing(struct kvm *kvm); int kvm_set_irq_routing(struct kvm *kvm, const struct kvm_irq_routing_entry *entries, unsigned nr, @@ -1144,5 +1172,15 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) { } #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ -#endif +#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS +int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, + struct irq_bypass_producer *); +void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, + struct irq_bypass_producer *); +void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); +void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); +int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, + uint32_t guest_irq, bool set); +#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ +#endif diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h new file mode 100644 index 000000000000..0c1de05098c8 --- /dev/null +++ b/include/linux/kvm_irqfd.h @@ -0,0 +1,71 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * irqfd: Allows an fd to be used to inject an interrupt to the guest + * Credit goes to Avi Kivity for the original idea. + */ + +#ifndef __LINUX_KVM_IRQFD_H +#define __LINUX_KVM_IRQFD_H + +#include <linux/kvm_host.h> +#include <linux/poll.h> + +/* + * Resampling irqfds are a special variety of irqfds used to emulate + * level triggered interrupts. The interrupt is asserted on eventfd + * trigger. On acknowledgment through the irq ack notifier, the + * interrupt is de-asserted and userspace is notified through the + * resamplefd. All resamplers on the same gsi are de-asserted + * together, so we don't need to track the state of each individual + * user. We can also therefore share the same irq source ID. + */ +struct kvm_kernel_irqfd_resampler { + struct kvm *kvm; + /* + * List of resampling struct _irqfd objects sharing this gsi. + * RCU list modified under kvm->irqfds.resampler_lock + */ + struct list_head list; + struct kvm_irq_ack_notifier notifier; + /* + * Entry in list of kvm->irqfd.resampler_list. Use for sharing + * resamplers among irqfds on the same gsi. + * Accessed and modified under kvm->irqfds.resampler_lock + */ + struct list_head link; +}; + +struct kvm_kernel_irqfd { + /* Used for MSI fast-path */ + struct kvm *kvm; + wait_queue_t wait; + /* Update side is protected by irqfds.lock */ + struct kvm_kernel_irq_routing_entry irq_entry; + seqcount_t irq_entry_sc; + /* Used for level IRQ fast-path */ + int gsi; + struct work_struct inject; + /* The resampler used by this irqfd (resampler-only) */ + struct kvm_kernel_irqfd_resampler *resampler; + /* Eventfd notified on resample (resampler-only) */ + struct eventfd_ctx *resamplefd; + /* Entry in list of irqfds for a resampler (resampler-only) */ + struct list_head resampler_link; + /* Used for setup/shutdown */ + struct eventfd_ctx *eventfd; + struct list_head list; + poll_table pt; + struct work_struct shutdown; + struct irq_bypass_consumer consumer; + struct irq_bypass_producer *producer; +}; + +#endif /* __LINUX_KVM_IRQFD_H */ diff --git a/include/linux/libata.h b/include/linux/libata.h index c9cfbcdb8d14..83577f8fd15b 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -254,6 +254,7 @@ enum { ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */ ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */ + ATA_PFLAG_EXTERNAL = (1 << 22), /* eSATA/external port */ /* struct ata_queued_cmd flags */ ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index c518eb589260..24daf8fc4d7c 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -89,10 +89,6 @@ int memblock_add_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int nid, unsigned long flags); -int memblock_remove_range(struct memblock_type *type, - phys_addr_t base, - phys_addr_t size); - void __next_mem_range(u64 *idx, int nid, ulong flags, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start, diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 3e3318ddfc0e..cd0e2413c358 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -213,6 +213,9 @@ struct mem_cgroup { /* OOM-Killer disable */ int oom_kill_disable; + /* handle for "memory.events" */ + struct cgroup_file events_file; + /* protect arrays of thresholds */ struct mutex thresholds_lock; @@ -285,6 +288,7 @@ static inline void mem_cgroup_events(struct mem_cgroup *memcg, unsigned int nr) { this_cpu_add(memcg->stat->events[idx], nr); + cgroup_file_notify(&memcg->events_file); } bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); @@ -297,8 +301,7 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); void mem_cgroup_uncharge(struct page *page); void mem_cgroup_uncharge_list(struct list_head *page_list); -void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, - bool lrucare); +void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage); struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); @@ -346,9 +349,7 @@ ino_t page_cgroup_ino(struct page *page); static inline bool mem_cgroup_disabled(void) { - if (memory_cgrp_subsys.disabled) - return true; - return false; + return !cgroup_subsys_enabled(memory_cgrp_subsys); } /* @@ -382,7 +383,7 @@ unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) return mz->lru_size[lru]; } -static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) +static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) { unsigned long inactive_ratio; unsigned long inactive; @@ -401,24 +402,26 @@ static inline int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) return inactive * inactive_ratio < active; } +void mem_cgroup_handle_over_high(void); + void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p); static inline void mem_cgroup_oom_enable(void) { - WARN_ON(current->memcg_oom.may_oom); - current->memcg_oom.may_oom = 1; + WARN_ON(current->memcg_may_oom); + current->memcg_may_oom = 1; } static inline void mem_cgroup_oom_disable(void) { - WARN_ON(!current->memcg_oom.may_oom); - current->memcg_oom.may_oom = 0; + WARN_ON(!current->memcg_may_oom); + current->memcg_may_oom = 0; } static inline bool task_in_memcg_oom(struct task_struct *p) { - return p->memcg_oom.memcg; + return p->memcg_in_oom; } bool mem_cgroup_oom_synchronize(bool wait); @@ -535,9 +538,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list) { } -static inline void mem_cgroup_migrate(struct page *oldpage, - struct page *newpage, - bool lrucare) +static inline void mem_cgroup_replace_page(struct page *old, struct page *new) { } @@ -583,10 +584,10 @@ static inline bool mem_cgroup_disabled(void) return true; } -static inline int +static inline bool mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) { - return 1; + return true; } static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) @@ -620,6 +621,10 @@ static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) { } +static inline void mem_cgroup_handle_over_high(void) +{ +} + static inline void mem_cgroup_oom_enable(void) { } @@ -746,11 +751,10 @@ static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) * conditions, but because they are pretty simple, they are expected to be * fast. */ -bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, - int order); -void __memcg_kmem_commit_charge(struct page *page, - struct mem_cgroup *memcg, int order); -void __memcg_kmem_uncharge_pages(struct page *page, int order); +int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, + struct mem_cgroup *memcg); +int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); +void __memcg_kmem_uncharge(struct page *page, int order); /* * helper for acessing a memcg's index. It will be used as an index in the @@ -765,77 +769,42 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); void __memcg_kmem_put_cache(struct kmem_cache *cachep); -struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr); - -int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, - unsigned long nr_pages); -void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages); - -/** - * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. - * @gfp: the gfp allocation flags. - * @memcg: a pointer to the memcg this was charged against. - * @order: allocation order. - * - * returns true if the memcg where the current task belongs can hold this - * allocation. - * - * We return true automatically if this allocation is not to be accounted to - * any memcg. - */ -static inline bool -memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) +static inline bool __memcg_kmem_bypass(gfp_t gfp) { if (!memcg_kmem_enabled()) return true; - if (gfp & __GFP_NOACCOUNT) return true; - /* - * __GFP_NOFAIL allocations will move on even if charging is not - * possible. Therefore we don't even try, and have this allocation - * unaccounted. We could in theory charge it forcibly, but we hope - * those allocations are rare, and won't be worth the trouble. - */ - if (gfp & __GFP_NOFAIL) - return true; if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) return true; - - /* If the test is dying, just let it go. */ - if (unlikely(fatal_signal_pending(current))) - return true; - - return __memcg_kmem_newpage_charge(gfp, memcg, order); + return false; } /** - * memcg_kmem_uncharge_pages: uncharge pages from memcg - * @page: pointer to struct page being freed - * @order: allocation order. + * memcg_kmem_charge: charge a kmem page + * @page: page to charge + * @gfp: reclaim mode + * @order: allocation order + * + * Returns 0 on success, an error code on failure. */ -static inline void -memcg_kmem_uncharge_pages(struct page *page, int order) +static __always_inline int memcg_kmem_charge(struct page *page, + gfp_t gfp, int order) { - if (memcg_kmem_enabled()) - __memcg_kmem_uncharge_pages(page, order); + if (__memcg_kmem_bypass(gfp)) + return 0; + return __memcg_kmem_charge(page, gfp, order); } /** - * memcg_kmem_commit_charge: embeds correct memcg in a page - * @page: pointer to struct page recently allocated - * @memcg: the memcg structure we charged against - * @order: allocation order. - * - * Needs to be called after memcg_kmem_newpage_charge, regardless of success or - * failure of the allocation. if @page is NULL, this function will revert the - * charges. Otherwise, it will commit @page to @memcg. + * memcg_kmem_uncharge: uncharge a kmem page + * @page: page to uncharge + * @order: allocation order */ -static inline void -memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) +static __always_inline void memcg_kmem_uncharge(struct page *page, int order) { - if (memcg_kmem_enabled() && memcg) - __memcg_kmem_commit_charge(page, memcg, order); + if (memcg_kmem_enabled()) + __memcg_kmem_uncharge(page, order); } /** @@ -848,17 +817,8 @@ memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) static __always_inline struct kmem_cache * memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { - if (!memcg_kmem_enabled()) - return cachep; - if (gfp & __GFP_NOACCOUNT) - return cachep; - if (gfp & __GFP_NOFAIL) + if (__memcg_kmem_bypass(gfp)) return cachep; - if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) - return cachep; - if (unlikely(fatal_signal_pending(current))) - return cachep; - return __memcg_kmem_get_cache(cachep); } @@ -867,13 +827,6 @@ static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) if (memcg_kmem_enabled()) __memcg_kmem_put_cache(cachep); } - -static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) -{ - if (!memcg_kmem_enabled()) - return NULL; - return __mem_cgroup_from_kmem(ptr); -} #else #define for_each_memcg_cache_index(_idx) \ for (; NULL; ) @@ -888,18 +841,12 @@ static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) return false; } -static inline bool -memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) +static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) { - return true; + return 0; } -static inline void memcg_kmem_uncharge_pages(struct page *page, int order) -{ -} - -static inline void -memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) +static inline void memcg_kmem_uncharge(struct page *page, int order) { } @@ -925,11 +872,5 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) { } - -static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) -{ - return NULL; -} #endif /* CONFIG_MEMCG_KMEM */ #endif /* _LINUX_MEMCONTROL_H */ - diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h index 8fcad63fab55..d409ceb2231e 100644 --- a/include/linux/mfd/88pm80x.h +++ b/include/linux/mfd/88pm80x.h @@ -21,6 +21,7 @@ enum { CHIP_INVALID = 0, CHIP_PM800, CHIP_PM805, + CHIP_PM860, CHIP_MAX, }; diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h index c7c11c900196..cd7e78eae006 100644 --- a/include/linux/mfd/arizona/registers.h +++ b/include/linux/mfd/arizona/registers.h @@ -1065,6 +1065,16 @@ #define ARIZONA_CLOCK_CONTROL 0xF00 #define ARIZONA_ANC_SRC 0xF01 #define ARIZONA_DSP_STATUS 0xF02 +#define ARIZONA_ANC_COEFF_START 0xF08 +#define ARIZONA_ANC_COEFF_END 0xF12 +#define ARIZONA_FCL_FILTER_CONTROL 0xF15 +#define ARIZONA_FCL_ADC_REFORMATTER_CONTROL 0xF17 +#define ARIZONA_FCL_COEFF_START 0xF18 +#define ARIZONA_FCL_COEFF_END 0xF69 +#define ARIZONA_FCR_FILTER_CONTROL 0xF70 +#define ARIZONA_FCR_ADC_REFORMATTER_CONTROL 0xF72 +#define ARIZONA_FCR_COEFF_START 0xF73 +#define ARIZONA_FCR_COEFF_END 0xFC4 #define ARIZONA_DSP1_CONTROL_1 0x1100 #define ARIZONA_DSP1_CLOCKING_1 0x1101 #define ARIZONA_DSP1_STATUS_1 0x1104 @@ -8051,6 +8061,66 @@ #define ARIZONA_ISRC3_NOTCH_ENA_WIDTH 1 /* ISRC3_NOTCH_ENA */ /* + * R3840 (0xF00) - Clock Control + */ +#define ARIZONA_EXT_NG_SEL_CLR 0x0080 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_CLR_MASK 0x0080 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_CLR_SHIFT 7 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_CLR_WIDTH 1 /* EXT_NG_SEL_CLR */ +#define ARIZONA_EXT_NG_SEL_SET 0x0040 /* EXT_NG_SEL_SET */ +#define ARIZONA_EXT_NG_SEL_SET_MASK 0x0040 /* EXT_NG_SEL_SET */ +#define ARIZONA_EXT_NG_SEL_SET_SHIFT 6 /* EXT_NG_SEL_SET */ +#define ARIZONA_EXT_NG_SEL_SET_WIDTH 1 /* EXT_NG_SEL_SET */ +#define ARIZONA_CLK_R_ENA_CLR 0x0020 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_CLR_MASK 0x0020 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_CLR_SHIFT 5 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_CLR_WIDTH 1 /* CLK_R_ENA_CLR */ +#define ARIZONA_CLK_R_ENA_SET 0x0010 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_R_ENA_SET_MASK 0x0010 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_R_ENA_SET_SHIFT 4 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_R_ENA_SET_WIDTH 1 /* CLK_R_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_CLR 0x0008 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_CLR_MASK 0x0008 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_CLR_SHIFT 3 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_CLR_WIDTH 1 /* CLK_NG_ENA_CLR */ +#define ARIZONA_CLK_NG_ENA_SET 0x0004 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_SET_MASK 0x0004 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_SET_SHIFT 2 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_NG_ENA_SET_WIDTH 1 /* CLK_NG_ENA_SET */ +#define ARIZONA_CLK_L_ENA_CLR 0x0002 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_CLR_MASK 0x0002 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_CLR_SHIFT 1 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_CLR_WIDTH 1 /* CLK_L_ENA_CLR */ +#define ARIZONA_CLK_L_ENA_SET 0x0001 /* CLK_L_ENA_SET */ +#define ARIZONA_CLK_L_ENA_SET_MASK 0x0001 /* CLK_L_ENA_SET */ +#define ARIZONA_CLK_L_ENA_SET_SHIFT 0 /* CLK_L_ENA_SET */ +#define ARIZONA_CLK_L_ENA_SET_WIDTH 1 /* CLK_L_ENA_SET */ + +/* + * R3841 (0xF01) - ANC SRC + */ +#define ARIZONA_IN_RXANCR_SEL_MASK 0x0070 /* IN_RXANCR_SEL - [4:6] */ +#define ARIZONA_IN_RXANCR_SEL_SHIFT 4 /* IN_RXANCR_SEL - [4:6] */ +#define ARIZONA_IN_RXANCR_SEL_WIDTH 3 /* IN_RXANCR_SEL - [4:6] */ +#define ARIZONA_IN_RXANCL_SEL_MASK 0x0007 /* IN_RXANCL_SEL - [0:2] */ +#define ARIZONA_IN_RXANCL_SEL_SHIFT 0 /* IN_RXANCL_SEL - [0:2] */ +#define ARIZONA_IN_RXANCL_SEL_WIDTH 3 /* IN_RXANCL_SEL - [0:2] */ + +/* + * R3863 (0xF17) - FCL ADC Reformatter Control + */ +#define ARIZONA_FCL_MIC_MODE_SEL 0x000C /* FCL_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCL_MIC_MODE_SEL_SHIFT 2 /* FCL_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCL_MIC_MODE_SEL_WIDTH 2 /* FCL_MIC_MODE_SEL - [2:3] */ + +/* + * R3954 (0xF72) - FCR ADC Reformatter Control + */ +#define ARIZONA_FCR_MIC_MODE_SEL 0x000C /* FCR_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCR_MIC_MODE_SEL_SHIFT 2 /* FCR_MIC_MODE_SEL - [2:3] */ +#define ARIZONA_FCR_MIC_MODE_SEL_WIDTH 2 /* FCR_MIC_MODE_SEL - [2:3] */ + +/* * R4352 (0x1100) - DSP1 Control 1 */ #define ARIZONA_DSP1_RATE_MASK 0x7800 /* DSP1_RATE - [14:11] */ diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index cc8ad1e1a307..b24c771cebd5 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -11,6 +11,8 @@ #ifndef __LINUX_MFD_AXP20X_H #define __LINUX_MFD_AXP20X_H +#include <linux/regmap.h> + enum { AXP152_ID = 0, AXP202_ID, @@ -438,4 +440,26 @@ struct axp288_extcon_pdata { struct gpio_desc *gpio_mux_cntl; }; +/* generic helper function for reading 9-16 bit wide regs */ +static inline int axp20x_read_variable_width(struct regmap *regmap, + unsigned int reg, unsigned int width) +{ + unsigned int reg_val, result; + int err; + + err = regmap_read(regmap, reg, ®_val); + if (err) + return err; + + result = reg_val << (width - 8); + + err = regmap_read(regmap, reg + 1, ®_val); + if (err) + return err; + + result |= reg_val; + + return result; +} + #endif /* __LINUX_MFD_AXP20X_H */ diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index a76bc100bf97..27dac3ff18b9 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -18,6 +18,12 @@ struct irq_domain; +/* Matches ACPI PNP id, either _HID or _CID, or ACPI _ADR */ +struct mfd_cell_acpi_match { + const char *pnpid; + const unsigned long long adr; +}; + /* * This struct describes the MFD part ("cell"). * After registration the copy of this structure will become the platform data @@ -44,8 +50,8 @@ struct mfd_cell { */ const char *of_compatible; - /* Matches ACPI PNP id, either _HID or _CID */ - const char *acpi_pnpid; + /* Matches ACPI */ + const struct mfd_cell_acpi_match *acpi_match; /* * These resources can be specified relative to the parent device. diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h index c4dd3a8add21..5010f978725c 100644 --- a/include/linux/mfd/da9052/reg.h +++ b/include/linux/mfd/da9052/reg.h @@ -65,6 +65,9 @@ #define DA9052_GPIO_2_3_REG 22 #define DA9052_GPIO_4_5_REG 23 #define DA9052_GPIO_6_7_REG 24 +#define DA9052_GPIO_8_9_REG 25 +#define DA9052_GPIO_10_11_REG 26 +#define DA9052_GPIO_12_13_REG 27 #define DA9052_GPIO_14_15_REG 28 /* POWER SEQUENCER CONTROL REGISTERS */ diff --git a/include/linux/mfd/da9150/core.h b/include/linux/mfd/da9150/core.h index 76e668933a77..1bf50caeb9fa 100644 --- a/include/linux/mfd/da9150/core.h +++ b/include/linux/mfd/da9150/core.h @@ -15,6 +15,7 @@ #define __DA9150_CORE_H #include <linux/device.h> +#include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/regmap.h> @@ -46,23 +47,39 @@ #define DA9150_IRQ_GPADC 19 #define DA9150_IRQ_WKUP 20 +/* I2C sub-device address */ +#define DA9150_QIF_I2C_ADDR_LSB 0x5 + +struct da9150_fg_pdata { + u32 update_interval; /* msecs */ + u8 warn_soc_lvl; /* % value */ + u8 crit_soc_lvl; /* % value */ +}; + struct da9150_pdata { int irq_base; + struct da9150_fg_pdata *fg_pdata; }; struct da9150 { struct device *dev; struct regmap *regmap; + struct i2c_client *core_qif; + struct regmap_irq_chip_data *regmap_irq_data; int irq; int irq_base; }; -/* Device I/O */ +/* Device I/O - Query Interface for FG and standard register access */ +void da9150_read_qif(struct da9150 *da9150, u8 addr, int count, u8 *buf); +void da9150_write_qif(struct da9150 *da9150, u8 addr, int count, const u8 *buf); + u8 da9150_reg_read(struct da9150 *da9150, u16 reg); void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val); void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val); void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf); void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf); + #endif /* __DA9150_CORE_H */ diff --git a/include/linux/mfd/intel_bxtwc.h b/include/linux/mfd/intel_bxtwc.h new file mode 100644 index 000000000000..1a0ee9d6efe9 --- /dev/null +++ b/include/linux/mfd/intel_bxtwc.h @@ -0,0 +1,69 @@ +/* + * intel_bxtwc.h - Header file for Intel Broxton Whiskey Cove PMIC + * + * Copyright (C) 2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/mfd/intel_soc_pmic.h> + +#ifndef __INTEL_BXTWC_H__ +#define __INTEL_BXTWC_H__ + +/* BXT WC devices */ +#define BXTWC_DEVICE1_ADDR 0x4E +#define BXTWC_DEVICE2_ADDR 0x4F +#define BXTWC_DEVICE3_ADDR 0x5E + +/* device1 Registers */ +#define BXTWC_CHIPID 0x4E00 +#define BXTWC_CHIPVER 0x4E01 + +#define BXTWC_SCHGRIRQ0_ADDR 0x5E1A +#define BXTWC_CHGRCTRL0_ADDR 0x5E16 +#define BXTWC_CHGRCTRL1_ADDR 0x5E17 +#define BXTWC_CHGRCTRL2_ADDR 0x5E18 +#define BXTWC_CHGRSTATUS_ADDR 0x5E19 +#define BXTWC_THRMBATZONE_ADDR 0x4F22 + +#define BXTWC_USBPATH_ADDR 0x5E19 +#define BXTWC_USBPHYCTRL_ADDR 0x5E07 +#define BXTWC_USBIDCTRL_ADDR 0x5E05 +#define BXTWC_USBIDEN_MASK 0x01 +#define BXTWC_USBIDSTAT_ADDR 0x00FF +#define BXTWC_USBSRCDETSTATUS_ADDR 0x5E29 + +#define BXTWC_DBGUSBBC1_ADDR 0x5FE0 +#define BXTWC_DBGUSBBC2_ADDR 0x5FE1 +#define BXTWC_DBGUSBBCSTAT_ADDR 0x5FE2 + +#define BXTWC_WAKESRC_ADDR 0x4E22 +#define BXTWC_WAKESRC2_ADDR 0x4EE5 +#define BXTWC_CHRTTADDR_ADDR 0x5E22 +#define BXTWC_CHRTTDATA_ADDR 0x5E23 + +#define BXTWC_STHRMIRQ0_ADDR 0x4F19 +#define WC_MTHRMIRQ1_ADDR 0x4E12 +#define WC_STHRMIRQ1_ADDR 0x4F1A +#define WC_STHRMIRQ2_ADDR 0x4F1B + +#define BXTWC_THRMZN0H_ADDR 0x4F44 +#define BXTWC_THRMZN0L_ADDR 0x4F45 +#define BXTWC_THRMZN1H_ADDR 0x4F46 +#define BXTWC_THRMZN1L_ADDR 0x4F47 +#define BXTWC_THRMZN2H_ADDR 0x4F48 +#define BXTWC_THRMZN2L_ADDR 0x4F49 +#define BXTWC_THRMZN3H_ADDR 0x4F4A +#define BXTWC_THRMZN3L_ADDR 0x4F4B +#define BXTWC_THRMZN4H_ADDR 0x4F4C +#define BXTWC_THRMZN4L_ADDR 0x4F4D + +#endif diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h index abcbfcf32d10..cf619dbeace2 100644 --- a/include/linux/mfd/intel_soc_pmic.h +++ b/include/linux/mfd/intel_soc_pmic.h @@ -25,6 +25,8 @@ struct intel_soc_pmic { int irq; struct regmap *regmap; struct regmap_irq_chip_data *irq_chip_data; + struct regmap_irq_chip_data *irq_chip_data_level2; + struct device *dev; }; #endif /* __INTEL_SOC_PMIC_H__ */ diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index ff843e7ca23d..7eb7cbac0a9a 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h @@ -589,6 +589,7 @@ #define FORCE_ASPM_NO_ASPM 0x00 #define PM_CLK_FORCE_CTL 0xFE58 #define FUNC_FORCE_CTL 0xFE59 +#define FUNC_FORCE_UPME_XMT_DBG 0x02 #define PERST_GLITCH_WIDTH 0xFE5C #define CHANGE_LINK_STATE 0xFE5B #define RESET_LOAD_REG 0xFE5E @@ -712,6 +713,7 @@ #define PHY_RCR1 0x02 #define PHY_RCR1_ADP_TIME_4 0x0400 #define PHY_RCR1_VCO_COARSE 0x001F +#define PHY_RCR1_INIT_27S 0x0A1F #define PHY_SSCCR2 0x02 #define PHY_SSCCR2_PLL_NCODE 0x0A00 #define PHY_SSCCR2_TIME0 0x001C @@ -724,6 +726,7 @@ #define PHY_RCR2_FREQSEL_12 0x0040 #define PHY_RCR2_CDR_SC_12P 0x0010 #define PHY_RCR2_CALIB_LATE 0x0002 +#define PHY_RCR2_INIT_27S 0xC152 #define PHY_SSCCR3 0x03 #define PHY_SSCCR3_STEP_IN 0x2740 #define PHY_SSCCR3_CHECK_DELAY 0x0008 @@ -800,12 +803,14 @@ #define PHY_ANA1A_RXT_BIST 0x0500 #define PHY_ANA1A_TXR_BIST 0x0040 #define PHY_ANA1A_REV 0x0006 +#define PHY_FLD0_INIT_27S 0x2546 #define PHY_FLD1 0x1B #define PHY_FLD2 0x1C #define PHY_FLD3 0x1D #define PHY_FLD3_TIMER_4 0x0800 #define PHY_FLD3_TIMER_6 0x0020 #define PHY_FLD3_RXDELINK 0x0004 +#define PHY_FLD3_INIT_27S 0x0004 #define PHY_ANA1D 0x1D #define PHY_ANA1D_DEBUG_ADDR 0x0004 #define _PHY_FLD0 0x1D @@ -824,6 +829,7 @@ #define PHY_FLD4_BER_COUNT 0x00E0 #define PHY_FLD4_BER_TIMER 0x000A #define PHY_FLD4_BER_CHK_EN 0x0001 +#define PHY_FLD4_INIT_27S 0x5C7F #define PHY_DIG1E 0x1E #define PHY_DIG1E_REV 0x4000 #define PHY_DIG1E_D0_X_D1 0x1000 diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 75115384f3fc..a06098639399 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -132,6 +132,10 @@ struct sec_platform_data { int buck2_init; int buck3_init; int buck4_init; + /* Whether or not manually set PWRHOLD to low during shutdown. */ + bool manual_poweroff; + /* Disable the WRSTBI (buck voltage warm reset) when probing? */ + bool disable_wrstbi; }; /** diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h index 7981a9d77d3f..b288965e8101 100644 --- a/include/linux/mfd/samsung/s2mps11.h +++ b/include/linux/mfd/samsung/s2mps11.h @@ -179,6 +179,7 @@ enum s2mps11_regulators { #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1) #define S2MPS11_RAMP_DELAY 25000 /* uV/us */ +#define S2MPS11_CTRL1_PWRHOLD_MASK BIT(4) #define S2MPS11_BUCK2_RAMP_SHIFT 6 #define S2MPS11_BUCK34_RAMP_SHIFT 4 diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h index b1fd675fa36f..239e977ba45d 100644 --- a/include/linux/mfd/samsung/s2mps13.h +++ b/include/linux/mfd/samsung/s2mps13.h @@ -184,5 +184,6 @@ enum s2mps13_regulators { * Let's assume that default value will be set. */ #define S2MPS13_BUCK_RAMP_DELAY 12500 +#define S2MPS13_REG_WRSTBI_MASK BIT(5) #endif /* __LINUX_MFD_S2MPS13_H */ diff --git a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h new file mode 100644 index 000000000000..4585d6105d68 --- /dev/null +++ b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2015 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_IMX7_IOMUXC_GPR_H +#define __LINUX_IMX7_IOMUXC_GPR_H + +#define IOMUXC_GPR0 0x00 +#define IOMUXC_GPR1 0x04 +#define IOMUXC_GPR2 0x08 +#define IOMUXC_GPR3 0x0c +#define IOMUXC_GPR4 0x10 +#define IOMUXC_GPR5 0x14 +#define IOMUXC_GPR6 0x18 +#define IOMUXC_GPR7 0x1c +#define IOMUXC_GPR8 0x20 +#define IOMUXC_GPR9 0x24 +#define IOMUXC_GPR10 0x28 +#define IOMUXC_GPR11 0x2c +#define IOMUXC_GPR12 0x30 +#define IOMUXC_GPR13 0x34 +#define IOMUXC_GPR14 0x38 +#define IOMUXC_GPR15 0x3c +#define IOMUXC_GPR16 0x40 +#define IOMUXC_GPR17 0x44 +#define IOMUXC_GPR18 0x48 +#define IOMUXC_GPR19 0x4c +#define IOMUXC_GPR20 0x50 +#define IOMUXC_GPR21 0x54 +#define IOMUXC_GPR22 0x58 + +/* For imx7d iomux gpr register field define */ +#define IMX7D_GPR1_IRQ_MASK (0x1 << 12) +#define IMX7D_GPR1_ENET1_TX_CLK_SEL_MASK (0x1 << 13) +#define IMX7D_GPR1_ENET2_TX_CLK_SEL_MASK (0x1 << 14) +#define IMX7D_GPR1_ENET_TX_CLK_SEL_MASK (0x3 << 13) +#define IMX7D_GPR1_ENET1_CLK_DIR_MASK (0x1 << 17) +#define IMX7D_GPR1_ENET2_CLK_DIR_MASK (0x1 << 18) +#define IMX7D_GPR1_ENET_CLK_DIR_MASK (0x3 << 17) + +#define IMX7D_GPR5_CSI_MUX_CONTROL_MIPI (0x1 << 4) + +#endif /* __LINUX_IMX7_IOMUXC_GPR_H */ diff --git a/include/linux/mfd/tps6105x.h b/include/linux/mfd/tps6105x.h index 386743dd931c..8bc51180800a 100644 --- a/include/linux/mfd/tps6105x.h +++ b/include/linux/mfd/tps6105x.h @@ -10,6 +10,7 @@ #define MFD_TPS6105X_H #include <linux/i2c.h> +#include <linux/regmap.h> #include <linux/regulator/machine.h> /* @@ -82,20 +83,15 @@ struct tps6105x_platform_data { /** * struct tps6105x - state holder for the TPS6105x drivers - * @mutex: mutex to serialize I2C accesses * @i2c_client: corresponding I2C client * @regulator: regulator device if used in voltage mode + * @regmap: used for i2c communcation on accessing registers */ struct tps6105x { struct tps6105x_platform_data *pdata; - struct mutex lock; struct i2c_client *client; struct regulator_dev *regulator; + struct regmap *regmap; }; -extern int tps6105x_set(struct tps6105x *tps6105x, u8 reg, u8 value); -extern int tps6105x_get(struct tps6105x *tps6105x, u8 reg, u8 *buf); -extern int tps6105x_mask_and_set(struct tps6105x *tps6105x, u8 reg, - u8 bitmask, u8 bitvalues); - #endif diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 81f6e427ba6b..543037465973 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -49,6 +49,7 @@ #define LOOP_CTRL_MINOR 237 #define VHOST_NET_MINOR 238 #define UHID_MINOR 239 +#define USERIO_MINOR 240 #define MISC_DYNAMIC_MINOR 255 struct device; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 5a8677bafe04..7501626ab529 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -214,6 +214,8 @@ enum { MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28, MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29, MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30, + MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31, + MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32, }; enum { diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index de45a51b3f04..fe052e234906 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -135,7 +135,10 @@ struct mlx4_rss_context { struct mlx4_qp_path { u8 fl; - u8 vlan_control; + union { + u8 vlan_control; + u8 control; + }; u8 disable_pkey_check; u8 pkey_index; u8 counter_index; @@ -156,9 +159,16 @@ struct mlx4_qp_path { }; enum { /* fl */ - MLX4_FL_CV = 1 << 6, - MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2 + MLX4_FL_CV = 1 << 6, + MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2, + MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1, + MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0, }; + +enum { /* control */ + MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER = 1 << 7, +}; + enum { /* vlan_control */ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6, MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED = 1 << 5, /* 802.1p priority tag */ @@ -254,6 +264,8 @@ enum { MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32, MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32, MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32, }; enum { /* param3 */ @@ -436,11 +448,13 @@ enum mlx4_update_qp_attr { MLX4_UPDATE_QP_VSD = 1 << 1, MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2, MLX4_UPDATE_QP_QOS_VPORT = 1 << 3, - MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 4) - 1 + MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB = 1 << 4, + MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 5) - 1 }; enum mlx4_update_qp_params_flags { - MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 0, + MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB = 1 << 0, + MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 1, }; struct mlx4_update_qp_params { diff --git a/include/linux/mm.h b/include/linux/mm.h index 80001de019ba..00bad7793788 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -139,6 +139,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ +#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ @@ -202,6 +203,9 @@ extern unsigned int kobjsize(const void *objp); /* This mask defines which mm->def_flags a process can inherit its parent */ #define VM_INIT_DEF_MASK VM_NOHUGEPAGE +/* This mask is used to clear all the VMA flags used by mlock */ +#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) + /* * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. @@ -426,46 +430,6 @@ static inline void compound_unlock_irqrestore(struct page *page, #endif } -static inline struct page *compound_head_by_tail(struct page *tail) -{ - struct page *head = tail->first_page; - - /* - * page->first_page may be a dangling pointer to an old - * compound page, so recheck that it is still a tail - * page before returning. - */ - smp_rmb(); - if (likely(PageTail(tail))) - return head; - return tail; -} - -/* - * Since either compound page could be dismantled asynchronously in THP - * or we access asynchronously arbitrary positioned struct page, there - * would be tail flag race. To handle this race, we should call - * smp_rmb() before checking tail flag. compound_head_by_tail() did it. - */ -static inline struct page *compound_head(struct page *page) -{ - if (unlikely(PageTail(page))) - return compound_head_by_tail(page); - return page; -} - -/* - * If we access compound page synchronously such as access to - * allocated page, there is no need to handle tail flag race, so we can - * check tail flag directly without any synchronization primitive. - */ -static inline struct page *compound_head_fast(struct page *page) -{ - if (unlikely(PageTail(page))) - return page->first_page; - return page; -} - /* * The atomic page->_mapcount, starts from -1: so that transitions * both from it and to it can be tracked, using atomic_inc_and_test @@ -514,7 +478,7 @@ static inline void get_huge_page_tail(struct page *page) VM_BUG_ON_PAGE(!PageTail(page), page); VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); - if (compound_tail_refcounted(page->first_page)) + if (compound_tail_refcounted(compound_head(page))) atomic_inc(&page->_mapcount); } @@ -537,13 +501,7 @@ static inline struct page *virt_to_head_page(const void *x) { struct page *page = virt_to_page(x); - /* - * We don't need to worry about synchronization of tail flag - * when we call virt_to_head_page() since it is only called for - * already allocated page and this page won't be freed until - * this virt_to_head_page() is finished. So use _fast variant. - */ - return compound_head_fast(page); + return compound_head(page); } /* @@ -564,28 +522,42 @@ int split_free_page(struct page *page); /* * Compound pages have a destructor function. Provide a * prototype for that function and accessor functions. - * These are _only_ valid on the head of a PG_compound page. + * These are _only_ valid on the head of a compound page. */ +typedef void compound_page_dtor(struct page *); + +/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ +enum compound_dtor_id { + NULL_COMPOUND_DTOR, + COMPOUND_PAGE_DTOR, +#ifdef CONFIG_HUGETLB_PAGE + HUGETLB_PAGE_DTOR, +#endif + NR_COMPOUND_DTORS, +}; +extern compound_page_dtor * const compound_page_dtors[]; static inline void set_compound_page_dtor(struct page *page, - compound_page_dtor *dtor) + enum compound_dtor_id compound_dtor) { - page[1].compound_dtor = dtor; + VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); + page[1].compound_dtor = compound_dtor; } static inline compound_page_dtor *get_compound_page_dtor(struct page *page) { - return page[1].compound_dtor; + VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); + return compound_page_dtors[page[1].compound_dtor]; } -static inline int compound_order(struct page *page) +static inline unsigned int compound_order(struct page *page) { if (!PageHead(page)) return 0; return page[1].compound_order; } -static inline void set_compound_order(struct page *page, unsigned long order) +static inline void set_compound_order(struct page *page, unsigned int order) { page[1].compound_order = order; } @@ -1568,8 +1540,7 @@ static inline bool ptlock_init(struct page *page) * with 0. Make sure nobody took it in use in between. * * It can happen if arch try to use slab for page table allocation: - * slab code uses page->slab_cache and page->first_page (for tail - * pages), which share storage with page->ptl. + * slab code uses page->slab_cache, which share storage with page->ptl. */ VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); if (!ptlock_alloc(page)) @@ -1606,8 +1577,10 @@ static inline void pgtable_init(void) static inline bool pgtable_page_ctor(struct page *page) { + if (!ptlock_init(page)) + return false; inc_zone_page_state(page, NR_PAGETABLE); - return ptlock_init(page); + return true; } static inline void pgtable_page_dtor(struct page *page) @@ -1837,7 +1810,8 @@ extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); extern __printf(3, 4) -void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...); +void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, + const char *fmt, ...); extern void setup_per_cpu_pageset(void); @@ -2036,8 +2010,6 @@ void page_cache_async_readahead(struct address_space *mapping, pgoff_t offset, unsigned long size); -unsigned long max_sane_readahead(unsigned long nr); - /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); @@ -2137,6 +2109,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ +#define FOLL_MLOCK 0x1000 /* lock present pages */ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 3d6baa7d4534..f8d1492a114f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -28,8 +28,6 @@ struct mem_cgroup; IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) -typedef void compound_page_dtor(struct page *); - /* * Each physical page in the system has a struct page associated with * it to keep track of whatever it is we are using the page for at the @@ -113,7 +111,13 @@ struct page { }; }; - /* Third double word block */ + /* + * Third double word block + * + * WARNING: bit 0 of the first word encode PageTail(). That means + * the rest users of the storage space MUST NOT use the bit to + * avoid collision and false-positive PageTail(). + */ union { struct list_head lru; /* Pageout list, eg. active_list * protected by zone->lru_lock ! @@ -131,18 +135,37 @@ struct page { #endif }; - struct slab *slab_page; /* slab fields */ struct rcu_head rcu_head; /* Used by SLAB * when destroying via RCU */ - /* First tail page of compound page */ + /* Tail pages of compound page */ struct { - compound_page_dtor *compound_dtor; - unsigned long compound_order; + unsigned long compound_head; /* If bit zero is set */ + + /* First tail page only */ +#ifdef CONFIG_64BIT + /* + * On 64 bit system we have enough space in struct page + * to encode compound_dtor and compound_order with + * unsigned int. It can help compiler generate better or + * smaller code on some archtectures. + */ + unsigned int compound_dtor; + unsigned int compound_order; +#else + unsigned short int compound_dtor; + unsigned short int compound_order; +#endif }; #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS - pgtable_t pmd_huge_pte; /* protected by page->ptl */ + struct { + unsigned long __pad; /* do not overlay pmd_huge_pte + * with compound_head to avoid + * possible bit 0 collision. + */ + pgtable_t pmd_huge_pte; /* protected by page->ptl */ + }; #endif }; @@ -163,7 +186,6 @@ struct page { #endif #endif struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ - struct page *first_page; /* Compound tail pages */ }; #ifdef CONFIG_MEMCG @@ -486,6 +508,9 @@ struct mm_struct { /* address of the bounds directory */ void __user *bd_addr; #endif +#ifdef CONFIG_HUGETLB_PAGE + atomic_long_t hugetlb_usage; +#endif }; static inline void mm_init_cpumask(struct mm_struct *mm) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d94347737292..e23a9e704536 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -37,10 +37,10 @@ enum { MIGRATE_UNMOVABLE, - MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, + MIGRATE_RECLAIMABLE, MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ - MIGRATE_RESERVE = MIGRATE_PCPTYPES, + MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, #ifdef CONFIG_CMA /* * MIGRATE_CMA migration type is designed to mimic the way @@ -334,13 +334,16 @@ struct zone { /* zone watermarks, access with *_wmark_pages(zone) macros */ unsigned long watermark[NR_WMARK]; + unsigned long nr_reserved_highatomic; + /* - * We don't know if the memory that we're going to allocate will be freeable - * or/and it will be released eventually, so to avoid totally wasting several - * GB of ram we must reserve some of the lower zone memory (otherwise we risk - * to run OOM on the lower zones despite there's tons of freeable ram - * on the higher zones). This array is recalculated at runtime if the - * sysctl_lowmem_reserve_ratio sysctl changes. + * We don't know if the memory that we're going to allocate will be + * freeable or/and it will be released eventually, so to avoid totally + * wasting several GB of ram we must reserve some of the lower zone + * memory (otherwise we risk to run OOM on the lower zones despite + * there being tons of freeable ram on the higher zones). This array is + * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl + * changes. */ long lowmem_reserve[MAX_NR_ZONES]; @@ -429,12 +432,6 @@ struct zone { const char *name; - /* - * Number of MIGRATE_RESERVE page block. To maintain for just - * optimization. Protected by zone->lock. - */ - int nr_migrate_reserve_block; - #ifdef CONFIG_MEMORY_ISOLATION /* * Number of isolated pageblock. It is used to solve incorrect @@ -589,75 +586,8 @@ static inline bool zone_is_empty(struct zone *zone) * [1] : No fallback (__GFP_THISNODE) */ #define MAX_ZONELISTS 2 - - -/* - * We cache key information from each zonelist for smaller cache - * footprint when scanning for free pages in get_page_from_freelist(). - * - * 1) The BITMAP fullzones tracks which zones in a zonelist have come - * up short of free memory since the last time (last_fullzone_zap) - * we zero'd fullzones. - * 2) The array z_to_n[] maps each zone in the zonelist to its node - * id, so that we can efficiently evaluate whether that node is - * set in the current tasks mems_allowed. - * - * Both fullzones and z_to_n[] are one-to-one with the zonelist, - * indexed by a zones offset in the zonelist zones[] array. - * - * The get_page_from_freelist() routine does two scans. During the - * first scan, we skip zones whose corresponding bit in 'fullzones' - * is set or whose corresponding node in current->mems_allowed (which - * comes from cpusets) is not set. During the second scan, we bypass - * this zonelist_cache, to ensure we look methodically at each zone. - * - * Once per second, we zero out (zap) fullzones, forcing us to - * reconsider nodes that might have regained more free memory. - * The field last_full_zap is the time we last zapped fullzones. - * - * This mechanism reduces the amount of time we waste repeatedly - * reexaming zones for free memory when they just came up low on - * memory momentarilly ago. - * - * The zonelist_cache struct members logically belong in struct - * zonelist. However, the mempolicy zonelists constructed for - * MPOL_BIND are intentionally variable length (and usually much - * shorter). A general purpose mechanism for handling structs with - * multiple variable length members is more mechanism than we want - * here. We resort to some special case hackery instead. - * - * The MPOL_BIND zonelists don't need this zonelist_cache (in good - * part because they are shorter), so we put the fixed length stuff - * at the front of the zonelist struct, ending in a variable length - * zones[], as is needed by MPOL_BIND. - * - * Then we put the optional zonelist cache on the end of the zonelist - * struct. This optional stuff is found by a 'zlcache_ptr' pointer in - * the fixed length portion at the front of the struct. This pointer - * both enables us to find the zonelist cache, and in the case of - * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL) - * to know that the zonelist cache is not there. - * - * The end result is that struct zonelists come in two flavors: - * 1) The full, fixed length version, shown below, and - * 2) The custom zonelists for MPOL_BIND. - * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache. - * - * Even though there may be multiple CPU cores on a node modifying - * fullzones or last_full_zap in the same zonelist_cache at the same - * time, we don't lock it. This is just hint data - if it is wrong now - * and then, the allocator will still function, perhaps a bit slower. - */ - - -struct zonelist_cache { - unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */ - DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */ - unsigned long last_full_zap; /* when last zap'd (jiffies) */ -}; #else #define MAX_ZONELISTS 1 -struct zonelist_cache; #endif /* @@ -675,9 +605,6 @@ struct zoneref { * allocation, the other zones are fallback zones, in decreasing * priority. * - * If zlcache_ptr is not NULL, then it is just the address of zlcache, - * as explained above. If zlcache_ptr is NULL, there is no zlcache. - * * * To speed the reading of the zonelist, the zonerefs contain the zone index * of the entry being read. Helper functions to access information given * a struct zoneref are @@ -687,11 +614,7 @@ struct zoneref { * zonelist_node_idx() - Return the index of the node for an entry */ struct zonelist { - struct zonelist_cache *zlcache_ptr; // NULL or &zlcache struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; -#ifdef CONFIG_NUMA - struct zonelist_cache zlcache; // optional ... -#endif }; #ifndef CONFIG_DISCONTIGMEM @@ -817,14 +740,13 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx, int alloc_flags); bool zone_watermark_ok_safe(struct zone *z, unsigned int order, - unsigned long mark, int classzone_idx, int alloc_flags); + unsigned long mark, int classzone_idx); enum memmap_context { MEMMAP_EARLY, MEMMAP_HOTPLUG, }; extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, - unsigned long size, - enum memmap_context context); + unsigned long size); extern void lruvec_init(struct lruvec *lruvec); diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 6975cbf1435b..64f36e09a790 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -219,6 +219,14 @@ struct serio_device_id { __u8 proto; }; +struct hda_device_id { + __u32 vendor_id; + __u32 rev_id; + __u8 api_version; + const char *name; + unsigned long driver_data; +}; + /* * Struct used for matching a device */ diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index c12f2147c350..52666d90ca94 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -386,6 +386,7 @@ extern int param_get_ullong(char *buffer, const struct kernel_param *kp); extern const struct kernel_param_ops param_ops_charp; extern int param_set_charp(const char *val, const struct kernel_param *kp); extern int param_get_charp(char *buffer, const struct kernel_param *kp); +extern void param_free_charp(void *arg); #define param_check_charp(name, p) __param_check(name, p, char *) /* We used to allow int as well as bool. We're taking that away! */ diff --git a/include/linux/msi.h b/include/linux/msi.h index 0b4460374020..f71a25e5fd25 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -163,6 +163,8 @@ struct msi_controller { int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, struct msi_desc *desc); + int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, + int nvec, int type); void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); }; diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 272f42952f34..5a9d1d4c2487 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -504,16 +504,16 @@ struct nand_ecc_ctrl { int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page); int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required); + const uint8_t *buf, int oob_required, int page); int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page); int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offs, uint32_t len, uint8_t *buf, int page); int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offset, uint32_t data_len, - const uint8_t *data_buf, int oob_required); + const uint8_t *data_buf, int oob_required, int page); int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required); + const uint8_t *buf, int oob_required, int page); int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, int page); int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, @@ -544,7 +544,7 @@ struct nand_buffers { * flash device * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the * flash device. - * @dn: [BOARDSPECIFIC] device node describing this instance + * @flash_node: [BOARDSPECIFIC] device node describing this instance * @read_byte: [REPLACEABLE] read one byte from the chip * @read_word: [REPLACEABLE] read one word from the chip * @write_byte: [REPLACEABLE] write a single byte to the chip on the @@ -556,10 +556,6 @@ struct nand_buffers { * @block_markbad: [REPLACEABLE] mark a block bad * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling * ALE/CLE/nCE. Also used to write command and address - * @init_size: [BOARDSPECIFIC] hardwarespecific function for setting - * mtd->oobsize, mtd->writesize and so on. - * @id_data contains the 8 bytes values of NAND_CMD_READID. - * Return with the bus width. * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing * device ready/busy line. If set to NULL no access to * ready/busy is available and the ready/busy information @@ -647,7 +643,7 @@ struct nand_chip { void __iomem *IO_ADDR_R; void __iomem *IO_ADDR_W; - struct device_node *dn; + struct device_node *flash_node; uint8_t (*read_byte)(struct mtd_info *mtd); u16 (*read_word)(struct mtd_info *mtd); @@ -658,8 +654,6 @@ struct nand_chip { int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip); int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); - int (*init_size)(struct mtd_info *mtd, struct nand_chip *this, - u8 *id_data); int (*dev_ready)(struct mtd_info *mtd); void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, int page_addr); @@ -1030,4 +1024,9 @@ struct nand_sdr_timings { /* get timing characteristics from ONFI timing mode. */ const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode); + +int nand_check_erased_ecc_chunk(void *data, int datalen, + void *ecc, int ecclen, + void *extraoob, int extraooblen, + int threshold); #endif /* __LINUX_MTD_NAND_H */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index e5409524bb0a..c8723b62c4cd 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -10,6 +10,23 @@ #ifndef __LINUX_MTD_SPI_NOR_H #define __LINUX_MTD_SPI_NOR_H +#include <linux/bitops.h> +#include <linux/mtd/cfi.h> + +/* + * Manufacturer IDs + * + * The first byte returned from the flash after sending opcode SPINOR_OP_RDID. + * Sometimes these are the same as CFI IDs, but sometimes they aren't. + */ +#define SNOR_MFR_ATMEL CFI_MFR_ATMEL +#define SNOR_MFR_INTEL CFI_MFR_INTEL +#define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */ +#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX +#define SNOR_MFR_SPANSION CFI_MFR_AMD +#define SNOR_MFR_SST CFI_MFR_SST +#define SNOR_MFR_WINBOND 0xef + /* * Note on opcode nomenclature: some opcodes have a format like * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number @@ -61,24 +78,24 @@ #define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */ /* Status Register bits. */ -#define SR_WIP 1 /* Write in progress */ -#define SR_WEL 2 /* Write enable latch */ +#define SR_WIP BIT(0) /* Write in progress */ +#define SR_WEL BIT(1) /* Write enable latch */ /* meaning of other SR_* bits may differ between vendors */ -#define SR_BP0 4 /* Block protect 0 */ -#define SR_BP1 8 /* Block protect 1 */ -#define SR_BP2 0x10 /* Block protect 2 */ -#define SR_SRWD 0x80 /* SR write protect */ +#define SR_BP0 BIT(2) /* Block protect 0 */ +#define SR_BP1 BIT(3) /* Block protect 1 */ +#define SR_BP2 BIT(4) /* Block protect 2 */ +#define SR_SRWD BIT(7) /* SR write protect */ -#define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */ +#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */ /* Enhanced Volatile Configuration Register bits */ -#define EVCR_QUAD_EN_MICRON 0x80 /* Micron Quad I/O */ +#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ /* Flag Status Register bits */ -#define FSR_READY 0x80 +#define FSR_READY BIT(7) /* Configuration Register bits. */ -#define CR_QUAD_EN_SPAN 0x2 /* Spansion Quad I/O */ +#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */ enum read_mode { SPI_NOR_NORMAL = 0, @@ -87,33 +104,6 @@ enum read_mode { SPI_NOR_QUAD, }; -/** - * struct spi_nor_xfer_cfg - Structure for defining a Serial Flash transfer - * @wren: command for "Write Enable", or 0x00 for not required - * @cmd: command for operation - * @cmd_pins: number of pins to send @cmd (1, 2, 4) - * @addr: address for operation - * @addr_pins: number of pins to send @addr (1, 2, 4) - * @addr_width: number of address bytes - * (3,4, or 0 for address not required) - * @mode: mode data - * @mode_pins: number of pins to send @mode (1, 2, 4) - * @mode_cycles: number of mode cycles (0 for mode not required) - * @dummy_cycles: number of dummy cycles (0 for dummy not required) - */ -struct spi_nor_xfer_cfg { - u8 wren; - u8 cmd; - u8 cmd_pins; - u32 addr; - u8 addr_pins; - u8 addr_width; - u8 mode; - u8 mode_pins; - u8 mode_cycles; - u8 dummy_cycles; -}; - #define SPI_NOR_MAX_CMD_SIZE 8 enum spi_nor_ops { SPI_NOR_OPS_READ = 0, @@ -127,11 +117,14 @@ enum spi_nor_option_flags { SNOR_F_USE_FSR = BIT(0), }; +struct mtd_info; + /** * struct spi_nor - Structure for defining a the SPI NOR layer * @mtd: point to a mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations * @dev: point to a spi device, or a spi nor controller device. + * @flash_node: point to a device node describing this flash instance. * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector @@ -141,28 +134,28 @@ enum spi_nor_option_flags { * @flash_read: the mode of the read * @sst_write_second: used by the SST write operation * @flags: flag options for the current SPI-NOR (SNOR_F_*) - * @cfg: used by the read_xfer/write_xfer * @cmd_buf: used by the write_reg * @prepare: [OPTIONAL] do some preparations for the * read/write/erase/lock/unlock operations * @unprepare: [OPTIONAL] do some post work after the * read/write/erase/lock/unlock operations - * @read_xfer: [OPTIONAL] the read fundamental primitive - * @write_xfer: [OPTIONAL] the writefundamental primitive * @read_reg: [DRIVER-SPECIFIC] read out the register * @write_reg: [DRIVER-SPECIFIC] write data to the register * @read: [DRIVER-SPECIFIC] read data from the SPI NOR * @write: [DRIVER-SPECIFIC] write data to the SPI NOR * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR * at the offset @offs - * @lock: [FLASH-SPECIFIC] lock a region of the SPI NOR - * @unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR + * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR + * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR + * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is + * completely locked * @priv: the private data */ struct spi_nor { - struct mtd_info *mtd; + struct mtd_info mtd; struct mutex lock; struct device *dev; + struct device_node *flash_node; u32 page_size; u8 addr_width; u8 erase_opcode; @@ -172,18 +165,12 @@ struct spi_nor { enum read_mode flash_read; bool sst_write_second; u32 flags; - struct spi_nor_xfer_cfg cfg; u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); - int (*read_xfer)(struct spi_nor *nor, struct spi_nor_xfer_cfg *cfg, - u8 *buf, size_t len); - int (*write_xfer)(struct spi_nor *nor, struct spi_nor_xfer_cfg *cfg, - u8 *buf, size_t len); int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); - int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len, - int write_enable); + int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); int (*read)(struct spi_nor *nor, loff_t from, size_t len, size_t *retlen, u_char *read_buf); @@ -193,6 +180,7 @@ struct spi_nor { int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); void *priv; }; diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 00121f298269..e7e78537aea2 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -130,6 +130,7 @@ enum nfs_opnum4 { OP_READ_PLUS = 68, OP_SEEK = 69, OP_WRITE_SAME = 70, + OP_CLONE = 71, OP_ILLEGAL = 10044, }; @@ -421,6 +422,7 @@ enum lock_type4 { #define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0) #define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) +#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13) #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) /* MDS threshold bitmap bits */ @@ -501,6 +503,7 @@ enum { NFSPROC4_CLNT_ALLOCATE, NFSPROC4_CLNT_DEALLOCATE, NFSPROC4_CLNT_LAYOUTSTATS, + NFSPROC4_CLNT_CLONE, }; /* nfs41 types */ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 570a7df2775b..2469ab0bb3a1 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -147,6 +147,7 @@ struct nfs_server { unsigned int acdirmax; unsigned int namelen; unsigned int options; /* extra options enabled by mount */ + unsigned int clone_blksize; /* granularity of a CLONE operation */ #define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */ #define NFS_OPTION_MIGRATION 0x00000002 /* - NFSv4 migration enabled */ @@ -243,5 +244,6 @@ struct nfs_server { #define NFS_CAP_ALLOCATE (1U << 20) #define NFS_CAP_DEALLOCATE (1U << 21) #define NFS_CAP_LAYOUTSTATS (1U << 22) +#define NFS_CAP_CLONE (1U << 23) #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 52faf7e96c65..570d630f98ae 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -141,6 +141,7 @@ struct nfs_fsinfo { __u32 lease_time; /* in seconds */ __u32 layouttype; /* supported pnfs layout driver */ __u32 blksize; /* preferred pnfs io block size */ + __u32 clone_blksize; /* granularity of a CLONE operation */ }; struct nfs_fsstat { @@ -359,6 +360,25 @@ struct nfs42_layoutstat_data { struct nfs42_layoutstat_res res; }; +struct nfs42_clone_args { + struct nfs4_sequence_args seq_args; + struct nfs_fh *src_fh; + struct nfs_fh *dst_fh; + nfs4_stateid src_stateid; + nfs4_stateid dst_stateid; + __u64 src_offset; + __u64 dst_offset; + __u64 count; + const u32 *dst_bitmask; +}; + +struct nfs42_clone_res { + struct nfs4_sequence_res seq_res; + unsigned int rpc_status; + struct nfs_fattr *dst_fattr; + const struct nfs_server *server; +}; + struct stateowner_id { __u64 create_time; __u32 uniquifier; @@ -528,7 +548,7 @@ struct nfs4_delegreturnargs { struct nfs4_delegreturnres { struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; - const struct nfs_server *server; + struct nfs_server *server; }; /* @@ -601,7 +621,7 @@ struct nfs_removeargs { struct nfs_removeres { struct nfs4_sequence_res seq_res; - const struct nfs_server *server; + struct nfs_server *server; struct nfs_fattr *dir_attr; struct nfs4_change_info cinfo; }; @@ -619,7 +639,7 @@ struct nfs_renameargs { struct nfs_renameres { struct nfs4_sequence_res seq_res; - const struct nfs_server *server; + struct nfs_server *server; struct nfs4_change_info old_cinfo; struct nfs_fattr *old_fattr; struct nfs4_change_info new_cinfo; @@ -685,7 +705,6 @@ struct nfs_setaclargs { struct nfs4_sequence_args seq_args; struct nfs_fh * fh; size_t acl_len; - unsigned int acl_pgbase; struct page ** acl_pages; }; @@ -697,7 +716,6 @@ struct nfs_getaclargs { struct nfs4_sequence_args seq_args; struct nfs_fh * fh; size_t acl_len; - unsigned int acl_pgbase; struct page ** acl_pages; }; diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 78488e099ce7..7ec5b86735f3 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -73,6 +73,7 @@ extern int watchdog_user_enabled; extern int watchdog_thresh; extern unsigned long *watchdog_cpumask_bits; extern int sysctl_softlockup_all_cpu_backtrace; +extern int sysctl_hardlockup_all_cpu_backtrace; struct ctl_table; extern int proc_watchdog(struct ctl_table *, int , void __user *, size_t *, loff_t *); diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index 98ba7525929e..36112cdd665a 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h @@ -34,7 +34,7 @@ struct of_dma_filter_info { dma_filter_fn filter_fn; }; -#ifdef CONFIG_OF +#ifdef CONFIG_DMA_OF extern int of_dma_controller_register(struct device_node *np, struct dma_chan *(*of_dma_xlate) (struct of_phandle_args *, struct of_dma *), diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 65d969246a4d..039f2eec49ce 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -51,6 +51,7 @@ extern struct irq_domain *of_msi_get_domain(struct device *dev, enum irq_domain_bus_token token); extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 rid); +extern void of_msi_configure(struct device *dev, struct device_node *np); #else static inline int of_irq_count(struct device_node *dev) { @@ -80,31 +81,27 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev { return NULL; } +static inline void of_msi_configure(struct device *dev, struct device_node *np) +{ +} #endif -#if defined(CONFIG_OF) +#if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC) /* * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC * implements it differently. However, the prototype is the same for all, * so declare it here regardless of the CONFIG_OF_IRQ setting. */ extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); -extern struct device_node *of_irq_find_parent(struct device_node *child); -extern void of_msi_configure(struct device *dev, struct device_node *np); u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); -#else /* !CONFIG_OF */ +#else /* !CONFIG_OF && !CONFIG_SPARC */ static inline unsigned int irq_of_parse_and_map(struct device_node *dev, int index) { return 0; } -static inline void *of_irq_find_parent(struct device_node *child) -{ - return NULL; -} - static inline u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in) { diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index 29fd3fe1c035..38c0533a3359 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -17,6 +17,7 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); int of_pci_parse_bus_range(struct device_node *node, struct resource *res); int of_get_pci_domain_nr(struct device_node *node); void of_pci_dma_configure(struct pci_dev *pci_dev); +void of_pci_check_probe_only(void); #else static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) { @@ -53,6 +54,8 @@ of_get_pci_domain_nr(struct device_node *node) } static inline void of_pci_dma_configure(struct pci_dev *pci_dev) { } + +static inline void of_pci_check_probe_only(void) { } #endif #if defined(CONFIG_OF_ADDRESS) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 416509e26d6d..bb53c7b86315 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -86,12 +86,7 @@ enum pageflags { PG_private, /* If pagecache, has fs-private data */ PG_private_2, /* If pagecache, has fs aux data */ PG_writeback, /* Page is under writeback */ -#ifdef CONFIG_PAGEFLAGS_EXTENDED PG_head, /* A head page */ - PG_tail, /* A tail page */ -#else - PG_compound, /* A compound page */ -#endif PG_swapcache, /* Swap page: swp_entry_t in private */ PG_mappedtodisk, /* Has blocks allocated on-disk */ PG_reclaim, /* To be reclaimed asap */ @@ -256,7 +251,7 @@ PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim) * Must use a macro here due to header dependency issues. page_zone() is not * available at this point. */ -#define PageHighMem(__p) is_highmem(page_zone(__p)) +#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) #else PAGEFLAG_FALSE(HighMem) #endif @@ -398,85 +393,46 @@ static inline void set_page_writeback_keepwrite(struct page *page) test_set_page_writeback_keepwrite(page); } -#ifdef CONFIG_PAGEFLAGS_EXTENDED -/* - * System with lots of page flags available. This allows separate - * flags for PageHead() and PageTail() checks of compound pages so that bit - * tests can be used in performance sensitive paths. PageCompound is - * generally not used in hot code paths except arch/powerpc/mm/init_64.c - * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages - * and avoid handling those in real mode. - */ __PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head) -__PAGEFLAG(Tail, tail) -static inline int PageCompound(struct page *page) -{ - return page->flags & ((1L << PG_head) | (1L << PG_tail)); - -} -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -static inline void ClearPageCompound(struct page *page) +static inline int PageTail(struct page *page) { - BUG_ON(!PageHead(page)); - ClearPageHead(page); + return READ_ONCE(page->compound_head) & 1; } -#endif - -#define PG_head_mask ((1L << PG_head)) -#else -/* - * Reduce page flag use as much as possible by overlapping - * compound page flags with the flags used for page cache pages. Possible - * because PageCompound is always set for compound pages and not for - * pages on the LRU and/or pagecache. - */ -TESTPAGEFLAG(Compound, compound) -__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound) - -/* - * PG_reclaim is used in combination with PG_compound to mark the - * head and tail of a compound page. This saves one page flag - * but makes it impossible to use compound pages for the page cache. - * The PG_reclaim bit would have to be used for reclaim or readahead - * if compound pages enter the page cache. - * - * PG_compound & PG_reclaim => Tail page - * PG_compound & ~PG_reclaim => Head page - */ -#define PG_head_mask ((1L << PG_compound)) -#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) - -static inline int PageHead(struct page *page) +static inline void set_compound_head(struct page *page, struct page *head) { - return ((page->flags & PG_head_tail_mask) == PG_head_mask); + WRITE_ONCE(page->compound_head, (unsigned long)head + 1); } -static inline int PageTail(struct page *page) +static inline void clear_compound_head(struct page *page) { - return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask); + WRITE_ONCE(page->compound_head, 0); } -static inline void __SetPageTail(struct page *page) +static inline struct page *compound_head(struct page *page) { - page->flags |= PG_head_tail_mask; + unsigned long head = READ_ONCE(page->compound_head); + + if (unlikely(head & 1)) + return (struct page *) (head - 1); + return page; } -static inline void __ClearPageTail(struct page *page) +static inline int PageCompound(struct page *page) { - page->flags &= ~PG_head_tail_mask; -} + return PageHead(page) || PageTail(page); +} #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void ClearPageCompound(struct page *page) { - BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound)); - clear_bit(PG_compound, &page->flags); + BUG_ON(!PageHead(page)); + ClearPageHead(page); } #endif -#endif /* !PAGEFLAGS_EXTENDED */ +#define PG_head_mask ((1L << PG_head)) #ifdef CONFIG_HUGETLB_PAGE int PageHuge(struct page *page); diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index 17fa4f8de3a6..7e62920a3a94 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -36,9 +36,9 @@ static inline unsigned long page_counter_read(struct page_counter *counter) void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); -int page_counter_try_charge(struct page_counter *counter, - unsigned long nr_pages, - struct page_counter **fail); +bool page_counter_try_charge(struct page_counter *counter, + unsigned long nr_pages, + struct page_counter **fail); void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); int page_counter_limit(struct page_counter *counter, unsigned long limit); int page_counter_memparse(const char *buf, const char *max, diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 2baeee12f48e..e942558b3585 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -44,7 +44,7 @@ enum pageblock_bits { #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE /* Huge page sizes are variable */ -extern int pageblock_order; +extern unsigned int pageblock_order; #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a6c78e00ea96..26eabf5ec718 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -69,6 +69,13 @@ static inline gfp_t mapping_gfp_mask(struct address_space * mapping) return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; } +/* Restricts the given gfp_mask to what the mapping allows. */ +static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, + gfp_t gfp_mask) +{ + return mapping_gfp_mask(mapping) & gfp_mask; +} + /* * This is non-atomic. Only to be used before the mapping is activated. * Probably needs a barrier... diff --git a/include/linux/pci.h b/include/linux/pci.h index e90eb22de628..e828e7b4afec 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -820,6 +820,7 @@ void pci_bus_add_device(struct pci_dev *dev); void pci_read_bridge_bases(struct pci_bus *child); struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res); +struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev); u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin); int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); @@ -1192,6 +1193,17 @@ void pci_unregister_driver(struct pci_driver *dev); module_driver(__pci_driver, pci_register_driver, \ pci_unregister_driver) +/** + * builtin_pci_driver() - Helper macro for registering a PCI driver + * @__pci_driver: pci_driver struct + * + * Helper macro for PCI drivers which do not do anything special in their + * init code. This eliminates a lot of boilerplate. Each driver may only + * use this macro once, and calling it replaces device_initcall(...) + */ +#define builtin_pci_driver(__pci_driver) \ + builtin_driver(__pci_driver, pci_register_driver) + struct pci_driver *pci_dev_driver(const struct pci_dev *dev); int pci_add_dynid(struct pci_driver *drv, unsigned int vendor, unsigned int device, diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h index 91b16adab0cd..3c8825b67298 100644 --- a/include/linux/platform_data/atmel.h +++ b/include/linux/platform_data/atmel.h @@ -9,15 +9,7 @@ #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> -#include <linux/device.h> -#include <linux/i2c.h> -#include <linux/leds.h> -#include <linux/spi/spi.h> -#include <linux/usb/atmel_usba_udc.h> -#include <linux/atmel-mci.h> -#include <sound/atmel-ac97c.h> #include <linux/serial.h> -#include <linux/platform_data/macb.h> /* Compact Flash */ struct at91_cf_data { diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 87ac14c584f2..03b6095d3b18 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -37,6 +37,7 @@ struct dw_dma_slave { * @nr_channels: Number of channels supported by hardware (max 8) * @is_private: The device channels should be marked as private and not for * by the general purpose DMA channel allocator. + * @is_memcpy: The device channels do support memory-to-memory transfers. * @chan_allocation_order: Allocate channels starting from 0 or 7 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. * @block_size: Maximum block size supported by the controller @@ -47,6 +48,7 @@ struct dw_dma_slave { struct dw_dma_platform_data { unsigned int nr_channels; bool is_private; + bool is_memcpy; #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ unsigned char chan_allocation_order; diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index bdb2710e2aab..e2878baeb90e 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h @@ -41,51 +41,6 @@ #ifndef EDMA_H_ #define EDMA_H_ -/* PaRAM slots are laid out like this */ -struct edmacc_param { - u32 opt; - u32 src; - u32 a_b_cnt; - u32 dst; - u32 src_dst_bidx; - u32 link_bcntrld; - u32 src_dst_cidx; - u32 ccnt; -} __packed; - -/* fields in edmacc_param.opt */ -#define SAM BIT(0) -#define DAM BIT(1) -#define SYNCDIM BIT(2) -#define STATIC BIT(3) -#define EDMA_FWID (0x07 << 8) -#define TCCMODE BIT(11) -#define EDMA_TCC(t) ((t) << 12) -#define TCINTEN BIT(20) -#define ITCINTEN BIT(21) -#define TCCHEN BIT(22) -#define ITCCHEN BIT(23) - -/*ch_status paramater of callback function possible values*/ -#define EDMA_DMA_COMPLETE 1 -#define EDMA_DMA_CC_ERROR 2 -#define EDMA_DMA_TC1_ERROR 3 -#define EDMA_DMA_TC2_ERROR 4 - -enum address_mode { - INCR = 0, - FIFO = 1 -}; - -enum fifo_width { - W8BIT = 0, - W16BIT = 1, - W32BIT = 2, - W64BIT = 3, - W128BIT = 4, - W256BIT = 5 -}; - enum dma_event_q { EVENTQ_0 = 0, EVENTQ_1 = 1, @@ -94,64 +49,10 @@ enum dma_event_q { EVENTQ_DEFAULT = -1 }; -enum sync_dimension { - ASYNC = 0, - ABSYNC = 1 -}; - #define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan)) #define EDMA_CTLR(i) ((i) >> 16) #define EDMA_CHAN_SLOT(i) ((i) & 0xffff) -#define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */ -#define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */ -#define EDMA_CONT_PARAMS_ANY 1001 -#define EDMA_CONT_PARAMS_FIXED_EXACT 1002 -#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 - -#define EDMA_MAX_CC 2 - -/* alloc/free DMA channels and their dedicated parameter RAM slots */ -int edma_alloc_channel(int channel, - void (*callback)(unsigned channel, u16 ch_status, void *data), - void *data, enum dma_event_q); -void edma_free_channel(unsigned channel); - -/* alloc/free parameter RAM slots */ -int edma_alloc_slot(unsigned ctlr, int slot); -void edma_free_slot(unsigned slot); - -/* alloc/free a set of contiguous parameter RAM slots */ -int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count); -int edma_free_cont_slots(unsigned slot, int count); - -/* calls that operate on part of a parameter RAM slot */ -void edma_set_src(unsigned slot, dma_addr_t src_port, - enum address_mode mode, enum fifo_width); -void edma_set_dest(unsigned slot, dma_addr_t dest_port, - enum address_mode mode, enum fifo_width); -dma_addr_t edma_get_position(unsigned slot, bool dst); -void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx); -void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx); -void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt, - u16 bcnt_rld, enum sync_dimension sync_mode); -void edma_link(unsigned from, unsigned to); -void edma_unlink(unsigned from); - -/* calls that operate on an entire parameter RAM slot */ -void edma_write_slot(unsigned slot, const struct edmacc_param *params); -void edma_read_slot(unsigned slot, struct edmacc_param *params); - -/* channel control operations */ -int edma_start(unsigned channel); -void edma_stop(unsigned channel); -void edma_clean_channel(unsigned channel); -void edma_clear_event(unsigned channel); -void edma_pause(unsigned channel); -void edma_resume(unsigned channel); - -void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no); - struct edma_rsv_info { const s16 (*rsv_chans)[2]; @@ -170,10 +71,11 @@ struct edma_soc_info { /* Resource reservation for other cores */ struct edma_rsv_info *rsv; + /* List of channels allocated for memcpy, terminated with -1 */ + s16 *memcpy_channels; + s8 (*queue_priority_mapping)[2]; const s16 (*xbar_chans)[2]; }; -int edma_trigger_channel(unsigned); - #endif diff --git a/include/linux/platform_data/mtd-nand-pxa3xx.h b/include/linux/platform_data/mtd-nand-pxa3xx.h index ac4ea2e641c7..394d15597dc7 100644 --- a/include/linux/platform_data/mtd-nand-pxa3xx.h +++ b/include/linux/platform_data/mtd-nand-pxa3xx.h @@ -4,30 +4,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> -struct pxa3xx_nand_timing { - unsigned int tCH; /* Enable signal hold time */ - unsigned int tCS; /* Enable signal setup time */ - unsigned int tWH; /* ND_nWE high duration */ - unsigned int tWP; /* ND_nWE pulse time */ - unsigned int tRH; /* ND_nRE high duration */ - unsigned int tRP; /* ND_nRE pulse width */ - unsigned int tR; /* ND_nWE high to ND_nRE low for read */ - unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */ - unsigned int tAR; /* ND_ALE low to ND_nRE low delay */ -}; - -struct pxa3xx_nand_flash { - char *name; - uint32_t chip_id; - unsigned int page_per_block; /* Pages per block (PG_PER_BLK) */ - unsigned int page_size; /* Page size in bytes (PAGE_SZ) */ - unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */ - unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */ - unsigned int num_blocks; /* Number of physical blocks in Flash */ - - struct pxa3xx_nand_timing *timing; /* NAND Flash timing */ -}; - /* * Current pxa3xx_nand controller has two chip select which * both be workable. @@ -63,9 +39,6 @@ struct pxa3xx_nand_platform_data { const struct mtd_partition *parts[NUM_CHIP_SELECT]; unsigned int nr_parts[NUM_CHIP_SELECT]; - - const struct pxa3xx_nand_flash * flash; - size_t num_flash; }; extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info); diff --git a/include/linux/pmem.h b/include/linux/pmem.h index 85f810b33917..acfea8ce4a07 100644 --- a/include/linux/pmem.h +++ b/include/linux/pmem.h @@ -65,11 +65,6 @@ static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t si memcpy(dst, (void __force const *) src, size); } -static inline void memunmap_pmem(struct device *dev, void __pmem *addr) -{ - devm_memunmap(dev, (void __force *) addr); -} - static inline bool arch_has_pmem_api(void) { return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); @@ -93,7 +88,7 @@ static inline bool arch_has_wmb_pmem(void) * These defaults seek to offer decent performance and minimize the * window between i/o completion and writes being durable on media. * However, it is undefined / architecture specific whether - * default_memremap_pmem + default_memcpy_to_pmem is sufficient for + * ARCH_MEMREMAP_PMEM + default_memcpy_to_pmem is sufficient for * making data durable relative to i/o completion. */ static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src, @@ -117,25 +112,6 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size) } /** - * memremap_pmem - map physical persistent memory for pmem api - * @offset: physical address of persistent memory - * @size: size of the mapping - * - * Establish a mapping of the architecture specific memory type expected - * by memcpy_to_pmem() and wmb_pmem(). For example, it may be - * the case that an uncacheable or writethrough mapping is sufficient, - * or a writeback mapping provided memcpy_to_pmem() and - * wmb_pmem() arrange for the data to be written through the - * cache to persistent media. - */ -static inline void __pmem *memremap_pmem(struct device *dev, - resource_size_t offset, unsigned long size) -{ - return (void __pmem *) devm_memremap(dev, offset, size, - ARCH_MEMREMAP_PMEM); -} - -/** * memcpy_to_pmem - copy data to persistent memory * @dst: destination buffer for the copy * @src: source buffer for the copy diff --git a/include/linux/power/bq27x00_battery.h b/include/linux/power/bq27x00_battery.h deleted file mode 100644 index a857f719bf40..000000000000 --- a/include/linux/power/bq27x00_battery.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef __LINUX_BQ27X00_BATTERY_H__ -#define __LINUX_BQ27X00_BATTERY_H__ - -/** - * struct bq27000_plaform_data - Platform data for bq27000 devices - * @name: Name of the battery. If NULL the driver will fallback to "bq27000". - * @read: HDQ read callback. - * This function should provide access to the HDQ bus the battery is - * connected to. - * The first parameter is a pointer to the battery device, the second the - * register to be read. The return value should either be the content of - * the passed register or an error value. - */ -struct bq27000_platform_data { - const char *name; - int (*read)(struct device *dev, unsigned int); -}; - -#endif diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h new file mode 100644 index 000000000000..45f6a7b5b3cb --- /dev/null +++ b/include/linux/power/bq27xxx_battery.h @@ -0,0 +1,31 @@ +#ifndef __LINUX_BQ27X00_BATTERY_H__ +#define __LINUX_BQ27X00_BATTERY_H__ + +/** + * struct bq27xxx_plaform_data - Platform data for bq27xxx devices + * @name: Name of the battery. + * @chip: Chip class number of this device. + * @read: HDQ read callback. + * This function should provide access to the HDQ bus the battery is + * connected to. + * The first parameter is a pointer to the battery device, the second the + * register to be read. The return value should either be the content of + * the passed register or an error value. + */ +enum bq27xxx_chip { + BQ27000 = 1, /* bq27000, bq27200 */ + BQ27010, /* bq27010, bq27210 */ + BQ27500, /* bq27500, bq27510, bq27520 */ + BQ27530, /* bq27530, bq27531 */ + BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ + BQ27545, /* bq27545 */ + BQ27421, /* bq27421, bq27425, bq27441, bq27621 */ +}; + +struct bq27xxx_platform_data { + const char *name; + enum bq27xxx_chip chip; + int (*read)(struct device *dev, unsigned int); +}; + +#endif diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h index eadf28cb2fc9..c4fa907c8f14 100644 --- a/include/linux/power/charger-manager.h +++ b/include/linux/power/charger-manager.h @@ -65,7 +65,7 @@ struct charger_cable { const char *extcon_name; const char *name; - /* The charger-manager use Exton framework*/ + /* The charger-manager use Extcon framework */ struct extcon_specific_cable_nb extcon_dev; struct work_struct wq; struct notifier_block nb; @@ -94,7 +94,7 @@ struct charger_cable { * the charger will be maintained with disabled state. * @cables: * the array of charger cables to enable/disable charger - * and set current limit according to constratint data of + * and set current limit according to constraint data of * struct charger_cable if only charger cable included * in the array of charger cables is attached/detached. * @num_cables: the number of charger cables. @@ -148,7 +148,7 @@ struct charger_regulator { * @polling_interval_ms: interval in millisecond at which * charger manager will monitor battery health * @battery_present: - * Specify where information for existance of battery can be obtained + * Specify where information for existence of battery can be obtained * @psy_charger_stat: the names of power-supply for chargers * @num_charger_regulator: the number of entries in charger_regulators * @charger_regulators: array of charger regulators @@ -156,7 +156,7 @@ struct charger_regulator { * @thermal_zone : the name of thermal zone for battery * @temp_min : Minimum battery temperature for charging. * @temp_max : Maximum battery temperature for charging. - * @temp_diff : Temperature diffential to restart charging. + * @temp_diff : Temperature difference to restart charging. * @measure_battery_temp: * true: measure battery temperature * false: measure ambient temperature diff --git a/include/linux/psci.h b/include/linux/psci.h index a682fcc91c33..12c4865457ad 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -21,6 +21,8 @@ #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 bool psci_tos_resident_on(int cpu); +bool psci_power_state_loses_context(u32 state); +bool psci_power_state_is_valid(u32 state); struct psci_operations { int (*cpu_suspend)(u32 state, unsigned long entry_point); diff --git a/include/linux/pstore.h b/include/linux/pstore.h index 8e7a25b068b0..831479f8df8f 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -75,20 +75,8 @@ struct pstore_info { #define PSTORE_FLAGS_FRAGILE 1 -#ifdef CONFIG_PSTORE extern int pstore_register(struct pstore_info *); +extern void pstore_unregister(struct pstore_info *); extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); -#else -static inline int -pstore_register(struct pstore_info *psi) -{ - return -ENODEV; -} -static inline bool -pstore_cannot_block_path(enum kmsg_dump_reason reason) -{ - return false; -} -#endif #endif /*_LINUX_PSTORE_H*/ diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index 92273776bce6..c2f2574ff61c 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -198,6 +198,7 @@ enum pxa_ssp_type { LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ LPSS_BYT_SSP, LPSS_SPT_SSP, + LPSS_BXT_SSP, }; struct ssp_device { diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 6e7d5ec65838..9e12000914b3 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -23,6 +23,8 @@ struct qcom_scm_hdcp_req { u32 val; }; +extern bool qcom_scm_is_available(void); + extern bool qcom_scm_hdcp_available(void); extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index 830c4992088d..a5aa7ae671f4 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -101,13 +101,21 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent }) /** - * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of - * given type safe against removal of rb_node entry + * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of + * given type allowing the backing memory of @pos to be invalidated * * @pos: the 'type *' to use as a loop cursor. * @n: another 'type *' to use as temporary storage * @root: 'rb_root *' of the rbtree. * @field: the name of the rb_node field within 'type'. + * + * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as + * list_for_each_entry_safe() and allows the iteration to continue independent + * of changes to @pos by the body of the loop. + * + * Note, however, that it cannot handle other modifications that re-order the + * rbtree it is iterating over. This includes calling rb_erase() on @pos, as + * rb_erase() may rebalance the tree, causing us to miss some nodes. */ #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 45932228cbf5..9c2903e58adb 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -245,6 +245,7 @@ enum regulator_type { * @linear_min_sel: Minimal selector for starting linear mapping * @fixed_uV: Fixed voltage of rails. * @ramp_delay: Time to settle down after voltage change (unit: uV/us) + * @min_dropout_uV: The minimum dropout voltage this regulator can handle * @linear_ranges: A constant table of possible voltage ranges. * @n_linear_ranges: Number of entries in the @linear_ranges table. * @volt_table: Voltage mapping table (if table based mapping) @@ -292,6 +293,7 @@ struct regulator_desc { unsigned int linear_min_sel; int fixed_uV; unsigned int ramp_delay; + int min_dropout_uV; const struct regulator_linear_range *linear_ranges; int n_linear_ranges; diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index e2c13cd863bd..4acc552e9279 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -154,8 +154,8 @@ ring_buffer_swap_cpu(struct ring_buffer *buffer_a, } #endif -int ring_buffer_empty(struct ring_buffer *buffer); -int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); +bool ring_buffer_empty(struct ring_buffer *buffer); +bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_disable(struct ring_buffer *buffer); void ring_buffer_record_enable(struct ring_buffer *buffer); diff --git a/include/linux/rotary_encoder.h b/include/linux/rotary_encoder.h index 3f594dce5716..fe3dc64e5aeb 100644 --- a/include/linux/rotary_encoder.h +++ b/include/linux/rotary_encoder.h @@ -8,9 +8,10 @@ struct rotary_encoder_platform_data { unsigned int gpio_b; unsigned int inverted_a; unsigned int inverted_b; + unsigned int steps_per_period; bool relative_axis; bool rollover; - bool half_period; + bool wakeup_source; }; #endif /* __ROTARY_ENCODER_H__ */ diff --git a/include/linux/sched.h b/include/linux/sched.h index c115d617739d..edad7a43edea 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -384,6 +384,7 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); extern unsigned int softlockup_panic; +extern unsigned int hardlockup_panic; void lockup_detector_init(void); #else static inline void touch_softlockup_watchdog(void) @@ -483,9 +484,11 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_DUMP_ELF_HEADERS 6 #define MMF_DUMP_HUGETLB_PRIVATE 7 #define MMF_DUMP_HUGETLB_SHARED 8 +#define MMF_DUMP_DAX_PRIVATE 9 +#define MMF_DUMP_DAX_SHARED 10 #define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS -#define MMF_DUMP_FILTER_BITS 7 +#define MMF_DUMP_FILTER_BITS 9 #define MMF_DUMP_FILTER_MASK \ (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) #define MMF_DUMP_FILTER_DEFAULT \ @@ -771,18 +774,6 @@ struct signal_struct { unsigned audit_tty_log_passwd; struct tty_audit_buf *tty_audit_buf; #endif -#ifdef CONFIG_CGROUPS - /* - * group_rwsem prevents new tasks from entering the threadgroup and - * member tasks from exiting,a more specifically, setting of - * PF_EXITING. fork and exit paths are protected with this rwsem - * using threadgroup_change_begin/end(). Users which require - * threadgroup to remain stable should use threadgroup_[un]lock() - * which also takes care of exec path. Currently, cgroup is the - * only user. - */ - struct rw_semaphore group_rwsem; -#endif oom_flags_t oom_flags; short oom_score_adj; /* OOM kill score adjustment */ @@ -1472,7 +1463,9 @@ struct task_struct { unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; - +#ifdef CONFIG_MEMCG + unsigned memcg_may_oom:1; +#endif #ifdef CONFIG_MEMCG_KMEM unsigned memcg_kmem_skip_account:1; #endif @@ -1579,9 +1572,7 @@ struct task_struct { unsigned long sas_ss_sp; size_t sas_ss_size; - int (*notifier)(void *priv); - void *notifier_data; - sigset_t *notifier_mask; + struct callback_head *task_works; struct audit_context *audit_context; @@ -1803,12 +1794,12 @@ struct task_struct { unsigned long trace_recursion; #endif /* CONFIG_TRACING */ #ifdef CONFIG_MEMCG - struct memcg_oom_info { - struct mem_cgroup *memcg; - gfp_t gfp_mask; - int order; - unsigned int may_oom:1; - } memcg_oom; + struct mem_cgroup *memcg_in_oom; + gfp_t memcg_oom_gfp_mask; + int memcg_oom_order; + + /* number of pages to reclaim on returning to userland */ + unsigned int memcg_nr_pages_over_high; #endif #ifdef CONFIG_UPROBES struct uprobe_task *utask; @@ -2473,21 +2464,29 @@ extern void ignore_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *, int force_default); extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); -static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) +static inline int kernel_dequeue_signal(siginfo_t *info) { - unsigned long flags; + struct task_struct *tsk = current; + siginfo_t __info; int ret; - spin_lock_irqsave(&tsk->sighand->siglock, flags); - ret = dequeue_signal(tsk, mask, info); - spin_unlock_irqrestore(&tsk->sighand->siglock, flags); + spin_lock_irq(&tsk->sighand->siglock); + ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); + spin_unlock_irq(&tsk->sighand->siglock); return ret; } -extern void block_all_signals(int (*notifier)(void *priv), void *priv, - sigset_t *mask); -extern void unblock_all_signals(void); +static inline void kernel_signal_stop(void) +{ + spin_lock_irq(¤t->sighand->siglock); + if (current->jobctl & JOBCTL_STOP_DEQUEUED) + __set_current_state(TASK_STOPPED); + spin_unlock_irq(¤t->sighand->siglock); + + schedule(); +} + extern void release_task(struct task_struct * p); extern int send_sig_info(int, struct siginfo *, struct task_struct *); extern int force_sigsegv(int, struct task_struct *); diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h new file mode 100644 index 000000000000..80af3cd35ae4 --- /dev/null +++ b/include/linux/scpi_protocol.h @@ -0,0 +1,78 @@ +/* + * SCPI Message Protocol driver header + * + * Copyright (C) 2014 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/types.h> + +struct scpi_opp { + u32 freq; + u32 m_volt; +} __packed; + +struct scpi_dvfs_info { + unsigned int count; + unsigned int latency; /* in nanoseconds */ + struct scpi_opp *opps; +}; + +enum scpi_sensor_class { + TEMPERATURE, + VOLTAGE, + CURRENT, + POWER, +}; + +struct scpi_sensor_info { + u16 sensor_id; + u8 class; + u8 trigger_type; + char name[20]; +} __packed; + +/** + * struct scpi_ops - represents the various operations provided + * by SCP through SCPI message protocol + * @get_version: returns the major and minor revision on the SCPI + * message protocol + * @clk_get_range: gets clock range limit(min - max in Hz) + * @clk_get_val: gets clock value(in Hz) + * @clk_set_val: sets the clock value, setting to 0 will disable the + * clock (if supported) + * @dvfs_get_idx: gets the Operating Point of the given power domain. + * OPP is an index to the list return by @dvfs_get_info + * @dvfs_set_idx: sets the Operating Point of the given power domain. + * OPP is an index to the list return by @dvfs_get_info + * @dvfs_get_info: returns the DVFS capabilities of the given power + * domain. It includes the OPP list and the latency information + */ +struct scpi_ops { + u32 (*get_version)(void); + int (*clk_get_range)(u16, unsigned long *, unsigned long *); + unsigned long (*clk_get_val)(u16); + int (*clk_set_val)(u16, unsigned long); + int (*dvfs_get_idx)(u8); + int (*dvfs_set_idx)(u8, u8); + struct scpi_dvfs_info *(*dvfs_get_info)(u8); + int (*sensor_get_capability)(u16 *sensors); + int (*sensor_get_info)(u16 sensor_id, struct scpi_sensor_info *); + int (*sensor_get_value)(u16, u32 *); +}; + +#if IS_ENABLED(CONFIG_ARM_SCPI_PROTOCOL) +struct scpi_ops *get_scpi_ops(void); +#else +static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } +#endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 24f4dfd94c51..4355129fff91 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1224,7 +1224,7 @@ static inline int skb_cloned(const struct sk_buff *skb) static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) { - might_sleep_if(pri & __GFP_WAIT); + might_sleep_if(gfpflags_allow_blocking(pri)); if (skb_cloned(skb)) return pskb_expand_head(skb, 0, 0, pri); @@ -1308,7 +1308,7 @@ static inline int skb_shared(const struct sk_buff *skb) */ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) { - might_sleep_if(pri & __GFP_WAIT); + might_sleep_if(gfpflags_allow_blocking(pri)); if (skb_shared(skb)) { struct sk_buff *nskb = skb_clone(skb, pri); @@ -1344,7 +1344,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) static inline struct sk_buff *skb_unshare(struct sk_buff *skb, gfp_t pri) { - might_sleep_if(pri & __GFP_WAIT); + might_sleep_if(gfpflags_allow_blocking(pri)); if (skb_cloned(skb)) { struct sk_buff *nskb = skb_copy(skb, pri); diff --git a/include/linux/slab.h b/include/linux/slab.h index 7e37d448ed91..7c82e3b307a3 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -111,7 +111,7 @@ struct mem_cgroup; * struct kmem_cache related prototypes */ void __init kmem_cache_init(void); -int slab_is_available(void); +bool slab_is_available(void); struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h new file mode 100644 index 000000000000..337ce414e898 --- /dev/null +++ b/include/linux/soc/brcmstb/brcmstb.h @@ -0,0 +1,10 @@ +#ifndef __BRCMSTB_SOC_H +#define __BRCMSTB_SOC_H + +/* + * Bus Interface Unit control register setup, must happen early during boot, + * before SMP is brought up, called by machine entry point. + */ +void brcmstb_biuctrl_init(void); + +#endif /* __BRCMSTB_SOC_H */ diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h index d7e50aa6a4ac..d0cb6d189a0a 100644 --- a/include/linux/soc/qcom/smd.h +++ b/include/linux/soc/qcom/smd.h @@ -9,6 +9,14 @@ struct qcom_smd_channel; struct qcom_smd_lookup; /** + * struct qcom_smd_id - struct used for matching a smd device + * @name: name of the channel + */ +struct qcom_smd_id { + char name[20]; +}; + +/** * struct qcom_smd_device - smd device struct * @dev: the device struct * @channel: handle to the smd channel for this device @@ -21,6 +29,7 @@ struct qcom_smd_device { /** * struct qcom_smd_driver - smd driver struct * @driver: underlying device driver + * @smd_match_table: static channel match table * @probe: invoked when the smd channel is found * @remove: invoked when the smd channel is closed * @callback: invoked when an inbound message is received on the channel, @@ -29,6 +38,8 @@ struct qcom_smd_device { */ struct qcom_smd_driver { struct device_driver driver; + const struct qcom_smd_id *smd_match_table; + int (*probe)(struct qcom_smd_device *dev); void (*remove)(struct qcom_smd_device *dev); int (*callback)(struct qcom_smd_device *, const void *, size_t); diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h index bc9630d3aced..785e196ee2ca 100644 --- a/include/linux/soc/qcom/smem.h +++ b/include/linux/soc/qcom/smem.h @@ -4,7 +4,7 @@ #define QCOM_SMEM_HOST_ANY -1 int qcom_smem_alloc(unsigned host, unsigned item, size_t size); -int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size); +void *qcom_smem_get(unsigned host, unsigned item, size_t *size); int qcom_smem_get_free_space(unsigned host); diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index 6d36dacec4ba..9ec4c147abbc 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -23,7 +23,6 @@ struct dma_chan; /* device.platform_data for SSP controller devices */ struct pxa2xx_spi_master { - u32 clock_enable; u16 num_chipselect; u8 enable_dma; diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 6b00f18f5e6b..cce80e6dc7d1 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -51,6 +51,8 @@ extern struct bus_type spi_bus_type; * @bytes_tx: number of bytes sent to device * @bytes_rx: number of bytes received from device * + * @transfer_bytes_histo: + * transfer bytes histogramm */ struct spi_statistics { spinlock_t lock; /* lock for the whole structure */ @@ -68,6 +70,8 @@ struct spi_statistics { unsigned long long bytes_rx; unsigned long long bytes_tx; +#define SPI_STATISTICS_HISTO_SIZE 17 + unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; }; void spi_statistics_add_transfer_stats(struct spi_statistics *stats, @@ -250,7 +254,7 @@ static inline struct spi_driver *to_spi_driver(struct device_driver *drv) return drv ? container_of(drv, struct spi_driver, driver) : NULL; } -extern int spi_register_driver(struct spi_driver *sdrv); +extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv); /** * spi_unregister_driver - reverse effect of spi_register_driver @@ -263,6 +267,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) driver_unregister(&sdrv->driver); } +/* use a define to avoid include chaining to get THIS_MODULE */ +#define spi_register_driver(driver) \ + __spi_register_driver(THIS_MODULE, driver) + /** * module_spi_driver() - Helper macro for registering a SPI driver * @__spi_driver: spi_driver struct @@ -843,8 +851,10 @@ extern int spi_bus_unlock(struct spi_master *master); * @len: data buffer size * Context: can sleep * - * This writes the buffer and returns zero or a negative error code. + * This function writes the buffer @buf. * Callable only from contexts that can sleep. + * + * Return: zero on success, else a negative error code. */ static inline int spi_write(struct spi_device *spi, const void *buf, size_t len) @@ -867,8 +877,10 @@ spi_write(struct spi_device *spi, const void *buf, size_t len) * @len: data buffer size * Context: can sleep * - * This reads the buffer and returns zero or a negative error code. + * This function reads the buffer @buf. * Callable only from contexts that can sleep. + * + * Return: zero on success, else a negative error code. */ static inline int spi_read(struct spi_device *spi, void *buf, size_t len) @@ -895,7 +907,7 @@ spi_read(struct spi_device *spi, void *buf, size_t len) * * For more specific semantics see spi_sync(). * - * It returns zero on success, else a negative error code. + * Return: Return: zero on success, else a negative error code. */ static inline int spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers, @@ -919,9 +931,10 @@ extern int spi_write_then_read(struct spi_device *spi, * @cmd: command to be written before data is read back * Context: can sleep * - * This returns the (unsigned) eight bit number returned by the - * device, or else a negative error code. Callable only from - * contexts that can sleep. + * Callable only from contexts that can sleep. + * + * Return: the (unsigned) eight bit number returned by the + * device, or else a negative error code. */ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) { @@ -940,12 +953,13 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd) * @cmd: command to be written before data is read back * Context: can sleep * - * This returns the (unsigned) sixteen bit number returned by the - * device, or else a negative error code. Callable only from - * contexts that can sleep. - * * The number is returned in wire-order, which is at least sometimes * big-endian. + * + * Callable only from contexts that can sleep. + * + * Return: the (unsigned) sixteen bit number returned by the + * device, or else a negative error code. */ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) { @@ -964,13 +978,13 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd) * @cmd: command to be written before data is read back * Context: can sleep * - * This returns the (unsigned) sixteen bit number returned by the device in cpu - * endianness, or else a negative error code. Callable only from contexts that - * can sleep. - * * This function is similar to spi_w8r16, with the exception that it will * convert the read 16 bit data word from big-endian to native endianness. * + * Callable only from contexts that can sleep. + * + * Return: the (unsigned) sixteen bit number returned by the device in cpu + * endianness, or else a negative error code. */ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd) diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h index 85578d4be034..154788ed218c 100644 --- a/include/linux/spi/spi_bitbang.h +++ b/include/linux/spi/spi_bitbang.h @@ -4,7 +4,7 @@ #include <linux/workqueue.h> struct spi_bitbang { - spinlock_t lock; + struct mutex lock; u8 busy; u8 use_dma; u8 flags; /* extra spi->mode support */ diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index 8df43c9f11dc..4397a4824c81 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h @@ -38,6 +38,11 @@ void xprt_free_bc_request(struct rpc_rqst *req); int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); +/* Socket backchannel transport methods */ +int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs); +void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs); +void xprt_free_bc_rqst(struct rpc_rqst *req); + /* * Determine if a shared backchannel is in use */ diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 7ccc961f33e9..f869807a0d0e 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -105,11 +105,9 @@ struct svc_rdma_chunk_sge { }; struct svc_rdma_fastreg_mr { struct ib_mr *mr; - void *kva; - struct ib_fast_reg_page_list *page_list; - int page_list_len; + struct scatterlist *sg; + int sg_nents; unsigned long access_flags; - unsigned long map_len; enum dma_data_direction direction; struct list_head frmr_list; }; @@ -228,9 +226,13 @@ extern void svc_rdma_put_frmr(struct svcxprt_rdma *, struct svc_rdma_fastreg_mr *); extern void svc_sq_reap(struct svcxprt_rdma *); extern void svc_rq_reap(struct svcxprt_rdma *); -extern struct svc_xprt_class svc_rdma_class; extern void svc_rdma_prep_reply_hdr(struct svc_rqst *); +extern struct svc_xprt_class svc_rdma_class; +#ifdef CONFIG_SUNRPC_BACKCHANNEL +extern struct svc_xprt_class svc_rdma_bc_class; +#endif + /* svc_rdma.c */ extern int svc_rdma_init(void); extern void svc_rdma_cleanup(void); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 0fb9acbb4780..69ef5b3ab038 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -54,6 +54,8 @@ enum rpc_display_format_t { struct rpc_task; struct rpc_xprt; struct seq_file; +struct svc_serv; +struct net; /* * This describes a complete RPC request @@ -136,6 +138,12 @@ struct rpc_xprt_ops { int (*enable_swap)(struct rpc_xprt *xprt); void (*disable_swap)(struct rpc_xprt *xprt); void (*inject_disconnect)(struct rpc_xprt *xprt); + int (*bc_setup)(struct rpc_xprt *xprt, + unsigned int min_reqs); + int (*bc_up)(struct svc_serv *serv, struct net *net); + void (*bc_free_rqst)(struct rpc_rqst *rqst); + void (*bc_destroy)(struct rpc_xprt *xprt, + unsigned int max_reqs); }; /* @@ -153,6 +161,7 @@ enum xprt_transports { XPRT_TRANSPORT_TCP = IPPROTO_TCP, XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC, XPRT_TRANSPORT_RDMA = 256, + XPRT_TRANSPORT_BC_RDMA = XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC, XPRT_TRANSPORT_LOCAL = 257, }; diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 357e44c1a46b..0ece4ba06f06 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -44,6 +44,8 @@ struct sock_xprt { */ unsigned long sock_state; struct delayed_work connect_worker; + struct work_struct recv_worker; + struct mutex recv_mutex; struct sockaddr_storage srcaddr; unsigned short srcport; diff --git a/include/linux/sunxi-rsb.h b/include/linux/sunxi-rsb.h new file mode 100644 index 000000000000..7e75bb0346d0 --- /dev/null +++ b/include/linux/sunxi-rsb.h @@ -0,0 +1,105 @@ +/* + * Allwinner Reduced Serial Bus Driver + * + * Copyright (c) 2015 Chen-Yu Tsai + * + * Author: Chen-Yu Tsai <wens@csie.org> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ +#ifndef _SUNXI_RSB_H +#define _SUNXI_RSB_H + +#include <linux/device.h> +#include <linux/regmap.h> +#include <linux/types.h> + +struct sunxi_rsb; + +/** + * struct sunxi_rsb_device - Basic representation of an RSB device + * @dev: Driver model representation of the device. + * @ctrl: RSB controller managing the bus hosting this device. + * @rtaddr: This device's runtime address + * @hwaddr: This device's hardware address + */ +struct sunxi_rsb_device { + struct device dev; + struct sunxi_rsb *rsb; + int irq; + u8 rtaddr; + u16 hwaddr; +}; + +static inline struct sunxi_rsb_device *to_sunxi_rsb_device(struct device *d) +{ + return container_of(d, struct sunxi_rsb_device, dev); +} + +static inline void *sunxi_rsb_device_get_drvdata(const struct sunxi_rsb_device *rdev) +{ + return dev_get_drvdata(&rdev->dev); +} + +static inline void sunxi_rsb_device_set_drvdata(struct sunxi_rsb_device *rdev, + void *data) +{ + dev_set_drvdata(&rdev->dev, data); +} + +/** + * struct sunxi_rsb_driver - RSB slave device driver + * @driver: RSB device drivers should initialize name and owner field of + * this structure. + * @probe: binds this driver to a RSB device. + * @remove: unbinds this driver from the RSB device. + */ +struct sunxi_rsb_driver { + struct device_driver driver; + int (*probe)(struct sunxi_rsb_device *rdev); + int (*remove)(struct sunxi_rsb_device *rdev); +}; + +static inline struct sunxi_rsb_driver *to_sunxi_rsb_driver(struct device_driver *d) +{ + return container_of(d, struct sunxi_rsb_driver, driver); +} + +int sunxi_rsb_driver_register(struct sunxi_rsb_driver *rdrv); + +/** + * sunxi_rsb_driver_unregister() - unregister an RSB client driver + * @rdrv: the driver to unregister + */ +static inline void sunxi_rsb_driver_unregister(struct sunxi_rsb_driver *rdrv) +{ + if (rdrv) + driver_unregister(&rdrv->driver); +} + +#define module_sunxi_rsb_driver(__sunxi_rsb_driver) \ + module_driver(__sunxi_rsb_driver, sunxi_rsb_driver_register, \ + sunxi_rsb_driver_unregister) + +struct regmap *__devm_regmap_init_sunxi_rsb(struct sunxi_rsb_device *rdev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); + +/** + * devm_regmap_init_sunxi_rsb(): Initialise managed register map + * + * @rdev: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_sunxi_rsb(rdev, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_sunxi_rsb, #config, \ + rdev, config) + +#endif /* _SUNXI_RSB_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a460e2ef2843..a156b82dd14c 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -887,4 +887,6 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename, asmlinkage long sys_membarrier(int cmd, int flags); +asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags); + #endif diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 9f65758311a4..ea090eaf468c 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -268,6 +268,9 @@ int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, struct kobject *target, const char *link_name); void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, const char *link_name); +int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name); void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); @@ -451,6 +454,14 @@ static inline void sysfs_remove_link_from_group(struct kobject *kobj, { } +static inline int __compat_only_sysfs_link_entry_to_kobj( + struct kobject *kobj, + struct kobject *target_kobj, + const char *target_name) +{ + return 0; +} + static inline void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr) { diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 8350c538b486..706e63eea080 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -30,6 +30,8 @@ #define TPM_ANY_NUM 0xFFFF struct tpm_chip; +struct trusted_key_payload; +struct trusted_key_options; struct tpm_class_ops { const u8 req_complete_mask; @@ -46,11 +48,22 @@ struct tpm_class_ops { #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) +extern int tpm_is_tpm2(u32 chip_num); extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); extern int tpm_send(u32 chip_num, void *cmd, size_t buflen); extern int tpm_get_random(u32 chip_num, u8 *data, size_t max); +extern int tpm_seal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options); +extern int tpm_unseal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options); #else +static inline int tpm_is_tpm2(u32 chip_num) +{ + return -ENODEV; +} static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) { return -ENODEV; } @@ -63,5 +76,18 @@ static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) { static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) { return -ENODEV; } + +static inline int tpm_seal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options) +{ + return -ENODEV; +} +static inline int tpm_unseal_trusted(u32 chip_num, + struct trusted_key_payload *payload, + struct trusted_key_options *options) +{ + return -ENODEV; +} #endif #endif diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index ed27917cabc9..429fdfc3baf5 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -168,13 +168,12 @@ struct ring_buffer_event * trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, int type, unsigned long len, unsigned long flags, int pc); -void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, - struct ring_buffer_event *event, - unsigned long flags, int pc); -void trace_buffer_unlock_commit(struct ring_buffer *buffer, +void trace_buffer_unlock_commit(struct trace_array *tr, + struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc); -void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, +void trace_buffer_unlock_commit_regs(struct trace_array *tr, + struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc, struct pt_regs *regs); @@ -329,6 +328,7 @@ enum { EVENT_FILE_FL_SOFT_DISABLED_BIT, EVENT_FILE_FL_TRIGGER_MODE_BIT, EVENT_FILE_FL_TRIGGER_COND_BIT, + EVENT_FILE_FL_PID_FILTER_BIT, }; /* @@ -342,6 +342,7 @@ enum { * tracepoint may be enabled) * TRIGGER_MODE - When set, invoke the triggers associated with the event * TRIGGER_COND - When set, one or more triggers has an associated filter + * PID_FILTER - When set, the event is filtered based on pid */ enum { EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), @@ -352,6 +353,7 @@ enum { EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT), EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), + EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT), }; struct trace_event_file { @@ -430,6 +432,8 @@ extern enum event_trigger_type event_triggers_call(struct trace_event_file *file extern void event_triggers_post_call(struct trace_event_file *file, enum event_trigger_type tt); +bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); + /** * trace_trigger_soft_disabled - do triggers and test if soft disabled * @file: The file pointer of the event to test @@ -449,6 +453,8 @@ trace_trigger_soft_disabled(struct trace_event_file *file) event_triggers_call(file, NULL); if (eflags & EVENT_FILE_FL_SOFT_DISABLED) return true; + if (eflags & EVENT_FILE_FL_PID_FILTER) + return trace_event_ignore_this_pid(file); } return false; } @@ -508,7 +514,7 @@ event_trigger_unlock_commit(struct trace_event_file *file, enum event_trigger_type tt = ETT_NONE; if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) - trace_buffer_unlock_commit(buffer, event, irq_flags, pc); + trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); if (tt) event_triggers_post_call(file, tt); @@ -540,7 +546,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file, enum event_trigger_type tt = ETT_NONE; if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) - trace_buffer_unlock_commit_regs(buffer, event, + trace_buffer_unlock_commit_regs(file->tr, buffer, event, irq_flags, pc, regs); if (tt) diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 84d497297c5f..26c152122a42 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -50,6 +50,7 @@ #include <linux/ptrace.h> #include <linux/security.h> #include <linux/task_work.h> +#include <linux/memcontrol.h> struct linux_binprm; /* @@ -188,6 +189,8 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) smp_mb__after_atomic(); if (unlikely(current->task_works)) task_work_run(); + + mem_cgroup_handle_over_high(); } #endif /* <linux/tracehook.h> */ diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index a5f7f3ecafa3..696a339c592c 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -26,6 +26,7 @@ struct notifier_block; struct tracepoint_func { void *func; void *data; + int prio; }; struct tracepoint { @@ -42,9 +43,14 @@ struct trace_enum_map { unsigned long enum_value; }; +#define TRACEPOINT_DEFAULT_PRIO 10 + extern int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data); extern int +tracepoint_probe_register_prio(struct tracepoint *tp, void *probe, void *data, + int prio); +extern int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data); extern void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), @@ -111,7 +117,18 @@ extern void syscall_unregfunc(void); #define TP_ARGS(args...) args #define TP_CONDITION(args...) args -#ifdef CONFIG_TRACEPOINTS +/* + * Individual subsystem my have a separate configuration to + * enable their tracepoints. By default, this file will create + * the tracepoints if CONFIG_TRACEPOINT is defined. If a subsystem + * wants to be able to disable its tracepoints from being created + * it can define NOTRACE before including the tracepoint headers. + */ +#if defined(CONFIG_TRACEPOINTS) && !defined(NOTRACE) +#define TRACEPOINTS_ENABLED +#endif + +#ifdef TRACEPOINTS_ENABLED /* * it_func[0] is never NULL because there is at least one element in the array @@ -167,10 +184,11 @@ extern void syscall_unregfunc(void); * structure. Force alignment to the same alignment as the section start. * * When lockdep is enabled, we make sure to always do the RCU portions of - * the tracepoint code, regardless of whether tracing is on or we match the - * condition. This lets us find RCU issues triggered with tracepoints even - * when this tracepoint is off. This code has no purpose other than poking - * RCU a bit. + * the tracepoint code, regardless of whether tracing is on. However, + * don't check if the condition is false, due to interaction with idle + * instrumentation. This lets us find RCU issues triggered with tracepoints + * even when this tracepoint is off. This code has no purpose other than + * poking RCU a bit. */ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ extern struct tracepoint __tracepoint_##name; \ @@ -196,6 +214,13 @@ extern void syscall_unregfunc(void); (void *)probe, data); \ } \ static inline int \ + register_trace_prio_##name(void (*probe)(data_proto), void *data,\ + int prio) \ + { \ + return tracepoint_probe_register_prio(&__tracepoint_##name, \ + (void *)probe, data, prio); \ + } \ + static inline int \ unregister_trace_##name(void (*probe)(data_proto), void *data) \ { \ return tracepoint_probe_unregister(&__tracepoint_##name,\ @@ -234,7 +259,7 @@ extern void syscall_unregfunc(void); #define EXPORT_TRACEPOINT_SYMBOL(name) \ EXPORT_SYMBOL(__tracepoint_##name) -#else /* !CONFIG_TRACEPOINTS */ +#else /* !TRACEPOINTS_ENABLED */ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ static inline void trace_##name(proto) \ { } \ @@ -266,7 +291,7 @@ extern void syscall_unregfunc(void); #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) #define EXPORT_TRACEPOINT_SYMBOL(name) -#endif /* CONFIG_TRACEPOINTS */ +#endif /* TRACEPOINTS_ENABLED */ #ifdef CONFIG_TRACING /** diff --git a/include/linux/types.h b/include/linux/types.h index c314989d9158..70d8500bddf1 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -205,11 +205,25 @@ struct ustat { * struct callback_head - callback structure for use with RCU and task_work * @next: next update requests in a list * @func: actual update function to call after the grace period. + * + * The struct is aligned to size of pointer. On most architectures it happens + * naturally due ABI requirements, but some architectures (like CRIS) have + * weird ABI and we need to ask it explicitly. + * + * The alignment is required to guarantee that bits 0 and 1 of @next will be + * clear under normal conditions -- as long as we use call_rcu(), + * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback. + * + * This guarantee is important for few reasons: + * - future call_rcu_lazy() will make use of lower bits in the pointer; + * - the structure shares storage spacer in struct page with @compound_head, + * which encode PageTail() in bit 0. The guarantee is needed to avoid + * false-positive PageTail(). */ struct callback_head { struct callback_head *next; void (*func)(struct callback_head *head); -}; +} __attribute__((aligned(sizeof(void *)))); #define rcu_head callback_head typedef void (*rcu_callback_t)(struct rcu_head *head); diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index d6f2c2c5b043..558129af828a 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -75,36 +75,6 @@ static inline unsigned long __copy_from_user_nocache(void *to, #endif /* ARCH_HAS_NOCACHE_UACCESS */ -/** - * probe_kernel_address(): safely attempt to read from a location - * @addr: address to read from - its type is type typeof(retval)* - * @retval: read into this variable - * - * Safely read from address @addr into variable @revtal. If a kernel fault - * happens, handle that and return -EFAULT. - * We ensure that the __get_user() is executed in atomic context so that - * do_page_fault() doesn't attempt to take mmap_sem. This makes - * probe_kernel_address() suitable for use within regions where the caller - * already holds mmap_sem, or other locks which nest inside mmap_sem. - * This must be a macro because __get_user() needs to know the types of the - * args. - * - * We don't include enough header files to be able to do the set_fs(). We - * require that the probe_kernel_address() caller will do that. - */ -#define probe_kernel_address(addr, retval) \ - ({ \ - long ret; \ - mm_segment_t old_fs = get_fs(); \ - \ - set_fs(KERNEL_DS); \ - pagefault_disable(); \ - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ - pagefault_enable(); \ - set_fs(old_fs); \ - ret; \ - }) - /* * probe_kernel_read(): safely attempt to read from a location * @dst: pointer to the buffer that shall take the data @@ -131,4 +101,14 @@ extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); +/** + * probe_kernel_address(): safely attempt to read from a location + * @addr: address to read from + * @retval: read into this variable + * + * Returns 0 on success, or -EFAULT. + */ +#define probe_kernel_address(addr, retval) \ + probe_kernel_read(&retval, addr, sizeof(retval)) + #endif /* __LINUX_UACCESS_H__ */ diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index b483abd34493..69e1d4a1f1b3 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -1,10 +1,31 @@ /* + * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs + * * Copyright (c) 2010 Red Hat Inc. * Author : Dave Airlie <airlied@redhat.com> * - * Licensed under GPLv2 + * Copyright (c) 2015 Lukas Wunner <lukas@wunner.de> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS + * IN THE SOFTWARE. * - * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs */ #ifndef _LINUX_VGA_SWITCHEROO_H_ @@ -14,28 +35,85 @@ struct pci_dev; +/** + * enum vga_switcheroo_state - client power state + * @VGA_SWITCHEROO_OFF: off + * @VGA_SWITCHEROO_ON: on + * @VGA_SWITCHEROO_NOT_FOUND: client has not registered with vga_switcheroo. + * Only used in vga_switcheroo_get_client_state() which in turn is only + * called from hda_intel.c + * + * Client power state. + */ enum vga_switcheroo_state { VGA_SWITCHEROO_OFF, VGA_SWITCHEROO_ON, /* below are referred only from vga_switcheroo_get_client_state() */ - VGA_SWITCHEROO_INIT, VGA_SWITCHEROO_NOT_FOUND, }; +/** + * enum vga_switcheroo_client_id - client identifier + * @VGA_SWITCHEROO_UNKNOWN_ID: initial identifier assigned to vga clients. + * Determining the id requires the handler, so GPUs are given their + * true id in a delayed fashion in vga_switcheroo_enable() + * @VGA_SWITCHEROO_IGD: integrated graphics device + * @VGA_SWITCHEROO_DIS: discrete graphics device + * @VGA_SWITCHEROO_MAX_CLIENTS: currently no more than two GPUs are supported + * + * Client identifier. Audio clients use the same identifier & 0x100. + */ enum vga_switcheroo_client_id { + VGA_SWITCHEROO_UNKNOWN_ID = -1, VGA_SWITCHEROO_IGD, VGA_SWITCHEROO_DIS, VGA_SWITCHEROO_MAX_CLIENTS, }; +/** + * struct vga_switcheroo_handler - handler callbacks + * @init: initialize handler. + * Optional. This gets called when vga_switcheroo is enabled, i.e. when + * two vga clients have registered. It allows the handler to perform + * some delayed initialization that depends on the existence of the + * vga clients. Currently only the radeon and amdgpu drivers use this. + * The return value is ignored + * @switchto: switch outputs to given client. + * Mandatory. For muxless machines this should be a no-op. Returning 0 + * denotes success, anything else failure (in which case the switch is + * aborted) + * @power_state: cut or reinstate power of given client. + * Optional. The return value is ignored + * @get_client_id: determine if given pci device is integrated or discrete GPU. + * Mandatory + * + * Handler callbacks. The multiplexer itself. The @switchto and @get_client_id + * methods are mandatory, all others may be set to NULL. + */ struct vga_switcheroo_handler { + int (*init)(void); int (*switchto)(enum vga_switcheroo_client_id id); int (*power_state)(enum vga_switcheroo_client_id id, enum vga_switcheroo_state state); - int (*init)(void); - int (*get_client_id)(struct pci_dev *pdev); + enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev); }; +/** + * struct vga_switcheroo_client_ops - client callbacks + * @set_gpu_state: do the equivalent of suspend/resume for the card. + * Mandatory. This should not cut power to the discrete GPU, + * which is the job of the handler + * @reprobe: poll outputs. + * Optional. This gets called after waking the GPU and switching + * the outputs to it + * @can_switch: check if the device is in a position to switch now. + * Mandatory. The client should return false if a user space process + * has one of its device files open + * + * Client callbacks. A client can be either a GPU or an audio device on a GPU. + * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be + * set to NULL. For audio clients, the @reprobe member is bogus. + */ struct vga_switcheroo_client_ops { void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state); void (*reprobe)(struct pci_dev *dev); @@ -49,17 +127,17 @@ int vga_switcheroo_register_client(struct pci_dev *dev, bool driver_power_control); int vga_switcheroo_register_audio_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, - int id, bool active); + enum vga_switcheroo_client_id id); void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info); -int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler); +int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler); void vga_switcheroo_unregister_handler(void); int vga_switcheroo_process_delayed_switch(void); -int vga_switcheroo_get_client_state(struct pci_dev *dev); +enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev); void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); @@ -72,13 +150,13 @@ static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} static inline int vga_switcheroo_register_client(struct pci_dev *dev, const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; } static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {} -static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; } +static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) { return 0; } static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, - int id, bool active) { return 0; } + enum vga_switcheroo_client_id id) { return 0; } static inline void vga_switcheroo_unregister_handler(void) {} static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } -static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } +static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 9246d32dc973..e623d392db0c 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -14,12 +14,12 @@ #endif #ifdef CONFIG_HIGHMEM -#define HIGHMEM_ZONE(xx) , xx##_HIGH +#define HIGHMEM_ZONE(xx) xx##_HIGH, #else #define HIGHMEM_ZONE(xx) #endif -#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE +#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, FOR_ALL_ZONES(PGALLOC), diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 82e7db7f7100..5dbc8b0ee567 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -161,30 +161,8 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, } #ifdef CONFIG_NUMA -/* - * Determine the per node value of a stat item. This function - * is called frequently in a NUMA machine, so try to be as - * frugal as possible. - */ -static inline unsigned long node_page_state(int node, - enum zone_stat_item item) -{ - struct zone *zones = NODE_DATA(node)->node_zones; - - return -#ifdef CONFIG_ZONE_DMA - zone_page_state(&zones[ZONE_DMA], item) + -#endif -#ifdef CONFIG_ZONE_DMA32 - zone_page_state(&zones[ZONE_DMA32], item) + -#endif -#ifdef CONFIG_HIGHMEM - zone_page_state(&zones[ZONE_HIGHMEM], item) + -#endif - zone_page_state(&zones[ZONE_NORMAL], item) + - zone_page_state(&zones[ZONE_MOVABLE], item); -} +extern unsigned long node_page_state(int node, enum zone_stat_item item); extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); #else @@ -269,7 +247,6 @@ static inline void __dec_zone_page_state(struct page *page, #define set_pgdat_percpu_threshold(pgdat, callback) { } -static inline void refresh_cpu_vm_stats(int cpu) { } static inline void refresh_zone_stat_thresholds(void) { } static inline void cpu_vm_stats_fold(int cpu) { } diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index d74a0e907b9e..027b1f43f12d 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -24,8 +24,8 @@ struct watchdog_device; * @stop: The routine for stopping the watchdog device. * @ping: The routine that sends a keepalive ping to the watchdog device. * @status: The routine that shows the status of the watchdog device. - * @set_timeout:The routine for setting the watchdog devices timeout value. - * @get_timeleft:The routine that get's the time that's left before a reset. + * @set_timeout:The routine for setting the watchdog devices timeout value (in seconds). + * @get_timeleft:The routine that gets the time left before a reset (in seconds). * @ref: The ref operation for dyn. allocated watchdog_device structs * @unref: The unref operation for dyn. allocated watchdog_device structs * @ioctl: The routines that handles extra ioctl calls. @@ -33,7 +33,7 @@ struct watchdog_device; * The watchdog_ops structure contains a list of low-level operations * that control a watchdog device. It also contains the module that owns * these operations. The start and stop function are mandatory, all other - * functions are optonal. + * functions are optional. */ struct watchdog_ops { struct module *owner; @@ -59,9 +59,9 @@ struct watchdog_ops { * @info: Pointer to a watchdog_info structure. * @ops: Pointer to the list of watchdog operations. * @bootstatus: Status of the watchdog device at boot. - * @timeout: The watchdog devices timeout value. - * @min_timeout:The watchdog devices minimum timeout value. - * @max_timeout:The watchdog devices maximum timeout value. + * @timeout: The watchdog devices timeout value (in seconds). + * @min_timeout:The watchdog devices minimum timeout value (in seconds). + * @max_timeout:The watchdog devices maximum timeout value (in seconds). * @driver-data:Pointer to the drivers private data. * @lock: Lock for watchdog core internal use only. * @status: Field that contains the devices internal status bits. @@ -119,8 +119,15 @@ static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool noway /* Use the following function to check if a timeout value is invalid */ static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t) { - return ((wdd->max_timeout != 0) && - (t < wdd->min_timeout || t > wdd->max_timeout)); + /* + * The timeout is invalid if + * - the requested value is smaller than the configured minimum timeout, + * or + * - a maximum timeout is configured, and the requested value is larger + * than the maximum timeout. + */ + return t < wdd->min_timeout || + (wdd->max_timeout && t > wdd->max_timeout); } /* Use the following functions to manipulate watchdog driver specific data */ diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 42f8ec992452..2e97b7707dff 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -38,10 +38,10 @@ enum zpool_mapmode { bool zpool_has_pool(char *type); -struct zpool *zpool_create_pool(char *type, char *name, +struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, const struct zpool_ops *ops); -char *zpool_get_type(struct zpool *pool); +const char *zpool_get_type(struct zpool *pool); void zpool_destroy_pool(struct zpool *pool); @@ -83,7 +83,9 @@ struct zpool_driver { atomic_t refcount; struct list_head list; - void *(*create)(char *name, gfp_t gfp, const struct zpool_ops *ops, + void *(*create)(const char *name, + gfp_t gfp, + const struct zpool_ops *ops, struct zpool *zpool); void (*destroy)(void *pool); diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 6398dfae53f1..34eb16098a33 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -41,7 +41,7 @@ struct zs_pool_stats { struct zs_pool; -struct zs_pool *zs_create_pool(char *name, gfp_t flags); +struct zs_pool *zs_create_pool(const char *name, gfp_t flags); void zs_destroy_pool(struct zs_pool *pool); unsigned long zs_malloc(struct zs_pool *pool, size_t size); diff --git a/include/linux/zutil.h b/include/linux/zutil.h index 6adfa9a6ffe9..663689521759 100644 --- a/include/linux/zutil.h +++ b/include/linux/zutil.h @@ -68,10 +68,10 @@ typedef uLong (*check_func) (uLong check, const Byte *buf, An Adler-32 checksum is almost as reliable as a CRC32 but can be computed much faster. Usage example: - uLong adler = adler32(0L, NULL, 0); + uLong adler = zlib_adler32(0L, NULL, 0); while (read_buffer(buffer, length) != EOF) { - adler = adler32(adler, buffer, length); + adler = zlib_adler32(adler, buffer, length); } if (adler != original_adler) error(); */ |