diff options
Diffstat (limited to 'include')
30 files changed, 1011 insertions, 197 deletions
diff --git a/include/linux/async.h b/include/linux/async.h index a2e3f18b2ad6..6b0226bdaadc 100644 --- a/include/linux/async.h +++ b/include/linux/async.h @@ -16,9 +16,8 @@ #include <linux/list.h> typedef u64 async_cookie_t; -typedef void (async_func_ptr) (void *data, async_cookie_t cookie); +typedef void (*async_func_t) (void *data, async_cookie_t cookie); struct async_domain { - struct list_head node; struct list_head pending; unsigned registered:1; }; @@ -27,8 +26,7 @@ struct async_domain { * domain participates in global async_synchronize_full */ #define ASYNC_DOMAIN(_name) \ - struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \ - .pending = LIST_HEAD_INIT(_name.pending), \ + struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \ .registered = 1 } /* @@ -36,12 +34,11 @@ struct async_domain { * complete, this domain does not participate in async_synchronize_full */ #define ASYNC_DOMAIN_EXCLUSIVE(_name) \ - struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \ - .pending = LIST_HEAD_INIT(_name.pending), \ + struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \ .registered = 0 } -extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data); -extern async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, +extern async_cookie_t async_schedule(async_func_t func, void *data); +extern async_cookie_t async_schedule_domain(async_func_t func, void *data, struct async_domain *domain); void async_unregister_domain(struct async_domain *domain); extern void async_synchronize_full(void); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 470073bf93d0..d86e215ca2b8 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -19,6 +19,7 @@ #include <linux/idr.h> #include <linux/workqueue.h> #include <linux/xattr.h> +#include <linux/fs.h> #ifdef CONFIG_CGROUPS @@ -30,10 +31,6 @@ struct css_id; extern int cgroup_init_early(void); extern int cgroup_init(void); -extern void cgroup_lock(void); -extern int cgroup_lock_is_held(void); -extern bool cgroup_lock_live_group(struct cgroup *cgrp); -extern void cgroup_unlock(void); extern void cgroup_fork(struct task_struct *p); extern void cgroup_post_fork(struct task_struct *p); extern void cgroup_exit(struct task_struct *p, int run_callbacks); @@ -44,14 +41,25 @@ extern void cgroup_unload_subsys(struct cgroup_subsys *ss); extern const struct file_operations proc_cgroup_operations; -/* Define the enumeration of all builtin cgroup subsystems */ +/* + * Define the enumeration of all cgroup subsystems. + * + * We define ids for builtin subsystems and then modular ones. + */ #define SUBSYS(_x) _x ## _subsys_id, -#define IS_SUBSYS_ENABLED(option) IS_ENABLED(option) enum cgroup_subsys_id { +#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) +#include <linux/cgroup_subsys.h> +#undef IS_SUBSYS_ENABLED + CGROUP_BUILTIN_SUBSYS_COUNT, + + __CGROUP_SUBSYS_TEMP_PLACEHOLDER = CGROUP_BUILTIN_SUBSYS_COUNT - 1, + +#define IS_SUBSYS_ENABLED(option) IS_MODULE(option) #include <linux/cgroup_subsys.h> +#undef IS_SUBSYS_ENABLED CGROUP_SUBSYS_COUNT, }; -#undef IS_SUBSYS_ENABLED #undef SUBSYS /* Per-subsystem/per-cgroup state maintained by the system. */ @@ -148,6 +156,13 @@ enum { * specified at mount time and thus is implemented here. */ CGRP_CPUSET_CLONE_CHILDREN, + /* see the comment above CGRP_ROOT_SANE_BEHAVIOR for details */ + CGRP_SANE_BEHAVIOR, +}; + +struct cgroup_name { + struct rcu_head rcu_head; + char name[]; }; struct cgroup { @@ -172,11 +187,23 @@ struct cgroup { struct cgroup *parent; /* my parent */ struct dentry *dentry; /* cgroup fs entry, RCU protected */ + /* + * This is a copy of dentry->d_name, and it's needed because + * we can't use dentry->d_name in cgroup_path(). + * + * You must acquire rcu_read_lock() to access cgrp->name, and + * the only place that can change it is rename(), which is + * protected by parent dir's i_mutex. + * + * Normally you should use cgroup_name() wrapper rather than + * access it directly. + */ + struct cgroup_name __rcu *name; + /* Private pointers for each registered subsystem */ struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; struct cgroupfs_root *root; - struct cgroup *top_cgroup; /* * List of cg_cgroup_links pointing at css_sets with @@ -213,6 +240,96 @@ struct cgroup { struct simple_xattrs xattrs; }; +#define MAX_CGROUP_ROOT_NAMELEN 64 + +/* cgroupfs_root->flags */ +enum { + /* + * Unfortunately, cgroup core and various controllers are riddled + * with idiosyncrasies and pointless options. The following flag, + * when set, will force sane behavior - some options are forced on, + * others are disallowed, and some controllers will change their + * hierarchical or other behaviors. + * + * The set of behaviors affected by this flag are still being + * determined and developed and the mount option for this flag is + * prefixed with __DEVEL__. The prefix will be dropped once we + * reach the point where all behaviors are compatible with the + * planned unified hierarchy, which will automatically turn on this + * flag. + * + * The followings are the behaviors currently affected this flag. + * + * - Mount options "noprefix" and "clone_children" are disallowed. + * Also, cgroupfs file cgroup.clone_children is not created. + * + * - When mounting an existing superblock, mount options should + * match. + * + * - Remount is disallowed. + * + * - memcg: use_hierarchy is on by default and the cgroup file for + * the flag is not created. + * + * The followings are planned changes. + * + * - release_agent will be disallowed once replacement notification + * mechanism is implemented. + */ + CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), + + CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ + CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ +}; + +/* + * A cgroupfs_root represents the root of a cgroup hierarchy, and may be + * associated with a superblock to form an active hierarchy. This is + * internal to cgroup core. Don't access directly from controllers. + */ +struct cgroupfs_root { + struct super_block *sb; + + /* + * The bitmask of subsystems intended to be attached to this + * hierarchy + */ + unsigned long subsys_mask; + + /* Unique id for this hierarchy. */ + int hierarchy_id; + + /* The bitmask of subsystems currently attached to this hierarchy */ + unsigned long actual_subsys_mask; + + /* A list running through the attached subsystems */ + struct list_head subsys_list; + + /* The root cgroup for this hierarchy */ + struct cgroup top_cgroup; + + /* Tracks how many cgroups are currently defined in hierarchy.*/ + int number_of_cgroups; + + /* A list running through the active hierarchies */ + struct list_head root_list; + + /* All cgroups on this root, cgroup_mutex protected */ + struct list_head allcg_list; + + /* Hierarchy-specific flags */ + unsigned long flags; + + /* IDs for cgroups in this hierarchy */ + struct ida cgroup_ida; + + /* The path to use for release notifications. */ + char release_agent_path[PATH_MAX]; + + /* The name for this hierarchy - may be empty */ + char name[MAX_CGROUP_ROOT_NAMELEN]; +}; + /* * A css_set is a structure holding pointers to a set of * cgroup_subsys_state objects. This saves space in the task struct @@ -278,6 +395,7 @@ struct cgroup_map_cb { /* cftype->flags */ #define CFTYPE_ONLY_ON_ROOT (1U << 0) /* only create on root cg */ #define CFTYPE_NOT_ON_ROOT (1U << 1) /* don't create on root cg */ +#define CFTYPE_INSANE (1U << 2) /* don't create if sane_behavior */ #define MAX_CFTYPE_NAME 64 @@ -304,9 +422,6 @@ struct cftype { /* CFTYPE_* flags */ unsigned int flags; - /* file xattrs */ - struct simple_xattrs xattrs; - int (*open)(struct inode *inode, struct file *file); ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft, struct file *file, @@ -404,18 +519,31 @@ struct cgroup_scanner { void *data; }; +/* + * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This + * function can be called as long as @cgrp is accessible. + */ +static inline bool cgroup_sane_behavior(const struct cgroup *cgrp) +{ + return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR; +} + +/* Caller should hold rcu_read_lock() */ +static inline const char *cgroup_name(const struct cgroup *cgrp) +{ + return rcu_dereference(cgrp->name)->name; +} + int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_is_removed(const struct cgroup *cgrp); +bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); int cgroup_task_count(const struct cgroup *cgrp); -/* Return true if cgrp is a descendant of the task's cgroup */ -int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task); - /* * Control Group taskset, used to pass around set of tasks to cgroup_subsys * methods. @@ -523,10 +651,16 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state( * rcu_dereference_check() conditions, such as locks used during the * cgroup_subsys::attach() methods. */ +#ifdef CONFIG_PROVE_RCU +extern struct mutex cgroup_mutex; +#define task_subsys_state_check(task, subsys_id, __c) \ + rcu_dereference_check((task)->cgroups->subsys[(subsys_id)], \ + lockdep_is_held(&(task)->alloc_lock) || \ + lockdep_is_held(&cgroup_mutex) || (__c)) +#else #define task_subsys_state_check(task, subsys_id, __c) \ - rcu_dereference_check(task->cgroups->subsys[subsys_id], \ - lockdep_is_held(&task->alloc_lock) || \ - cgroup_lock_is_held() || (__c)) + rcu_dereference((task)->cgroups->subsys[(subsys_id)]) +#endif static inline struct cgroup_subsys_state * task_subsys_state(struct task_struct *task, int subsys_id) @@ -661,8 +795,8 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp, struct cgroup_iter *it); void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); int cgroup_scan_tasks(struct cgroup_scanner *scan); -int cgroup_attach_task(struct cgroup *, struct task_struct *); int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); +int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); /* * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h index 9c7f5807824b..dd7adff76e81 100644 --- a/include/linux/clk-private.h +++ b/include/linux/clk-private.h @@ -152,7 +152,7 @@ struct clk { }, \ .reg = _reg, \ .shift = _shift, \ - .width = _width, \ + .mask = BIT(_width) - 1, \ .flags = _mux_flags, \ .lock = _lock, \ }; \ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 7f197d7addb0..11860985fecb 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -45,6 +45,14 @@ struct clk_hw; * undo any work done in the @prepare callback. Called with * prepare_lock held. * + * @is_prepared: Queries the hardware to determine if the clock is prepared. + * This function is allowed to sleep. Optional, if this op is not + * set then the prepare count will be used. + * + * @unprepare_unused: Unprepare the clock atomically. Only called from + * clk_disable_unused for prepare clocks with special needs. + * Called with prepare mutex held. This function may sleep. + * * @enable: Enable the clock atomically. This must not return until the * clock is generating a valid clock signal, usable by consumer * devices. Called with enable_lock held. This function must not @@ -108,6 +116,8 @@ struct clk_hw; struct clk_ops { int (*prepare)(struct clk_hw *hw); void (*unprepare)(struct clk_hw *hw); + int (*is_prepared)(struct clk_hw *hw); + void (*unprepare_unused)(struct clk_hw *hw); int (*enable)(struct clk_hw *hw); void (*disable)(struct clk_hw *hw); int (*is_enabled)(struct clk_hw *hw); @@ -239,9 +249,14 @@ struct clk_div_table { * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is * the raw value read from the register, with the value of zero considered - * invalid + * invalid, unless CLK_DIVIDER_ALLOW_ZERO is set. * CLK_DIVIDER_POWER_OF_TWO - clock divisor is 2 raised to the value read from * the hardware register + * CLK_DIVIDER_ALLOW_ZERO - Allow zero divisors. For dividers which have + * CLK_DIVIDER_ONE_BASED set, it is possible to end up with a zero divisor. + * Some hardware implementations gracefully handle this case and allow a + * zero divisor by not modifying their input clock + * (divide by one / bypass). */ struct clk_divider { struct clk_hw hw; @@ -255,6 +270,7 @@ struct clk_divider { #define CLK_DIVIDER_ONE_BASED BIT(0) #define CLK_DIVIDER_POWER_OF_TWO BIT(1) +#define CLK_DIVIDER_ALLOW_ZERO BIT(2) extern const struct clk_ops clk_divider_ops; struct clk *clk_register_divider(struct device *dev, const char *name, @@ -274,7 +290,7 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name, * @reg: register controlling multiplexer * @shift: shift to multiplexer bit field * @width: width of mutliplexer bit field - * @num_clks: number of parent clocks + * @flags: hardware-specific flags * @lock: register lock * * Clock with multiple selectable parents. Implements .get_parent, .set_parent @@ -287,8 +303,9 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name, struct clk_mux { struct clk_hw hw; void __iomem *reg; + u32 *table; + u32 mask; u8 shift; - u8 width; u8 flags; spinlock_t *lock; }; @@ -297,11 +314,19 @@ struct clk_mux { #define CLK_MUX_INDEX_BIT BIT(1) extern const struct clk_ops clk_mux_ops; + struct clk *clk_register_mux(struct device *dev, const char *name, const char **parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_mux_flags, spinlock_t *lock); +struct clk *clk_register_mux_table(struct device *dev, const char *name, + const char **parent_names, u8 num_parents, unsigned long flags, + void __iomem *reg, u8 shift, u32 mask, + u8 clk_mux_flags, u32 *table, spinlock_t *lock); + +void of_fixed_factor_clk_setup(struct device_node *node); + /** * struct clk_fixed_factor - fixed multiplier and divider clock * @@ -325,6 +350,37 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned int mult, unsigned int div); +/*** + * struct clk_composite - aggregate clock of mux, divider and gate clocks + * + * @hw: handle between common and hardware-specific interfaces + * @mux_hw: handle between composite and hardware-specific mux clock + * @rate_hw: handle between composite and hardware-specific rate clock + * @gate_hw: handle between composite and hardware-specific gate clock + * @mux_ops: clock ops for mux + * @rate_ops: clock ops for rate + * @gate_ops: clock ops for gate + */ +struct clk_composite { + struct clk_hw hw; + struct clk_ops ops; + + struct clk_hw *mux_hw; + struct clk_hw *rate_hw; + struct clk_hw *gate_hw; + + const struct clk_ops *mux_ops; + const struct clk_ops *rate_ops; + const struct clk_ops *gate_ops; +}; + +struct clk *clk_register_composite(struct device *dev, const char *name, + const char **parent_names, int num_parents, + struct clk_hw *mux_hw, const struct clk_ops *mux_ops, + struct clk_hw *rate_hw, const struct clk_ops *rate_ops, + struct clk_hw *gate_hw, const struct clk_ops *gate_ops, + unsigned long flags); + /** * clk_register - allocate a new clock, register it and return an opaque cookie * @dev: device that is registering this clock @@ -351,6 +407,7 @@ unsigned int __clk_get_enable_count(struct clk *clk); unsigned int __clk_get_prepare_count(struct clk *clk); unsigned long __clk_get_rate(struct clk *clk); unsigned long __clk_get_flags(struct clk *clk); +bool __clk_is_prepared(struct clk *clk); bool __clk_is_enabled(struct clk *clk); struct clk *__clk_lookup(const char *name); diff --git a/include/linux/clk.h b/include/linux/clk.h index b3ac22d0fc1f..9a6d04524b1a 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -28,16 +28,16 @@ struct clk; * PRE_RATE_CHANGE - called immediately before the clk rate is changed, * to indicate that the rate change will proceed. Drivers must * immediately terminate any operations that will be affected by the - * rate change. Callbacks may either return NOTIFY_DONE or - * NOTIFY_STOP. + * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, + * NOTIFY_STOP or NOTIFY_BAD. * * ABORT_RATE_CHANGE: called if the rate change failed for some reason * after PRE_RATE_CHANGE. In this case, all registered notifiers on * the clk will be called with ABORT_RATE_CHANGE. Callbacks must - * always return NOTIFY_DONE. + * always return NOTIFY_DONE or NOTIFY_OK. * * POST_RATE_CHANGE - called after the clk rate change has successfully - * completed. Callbacks must always return NOTIFY_DONE. + * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. * */ #define PRE_RATE_CHANGE BIT(0) diff --git a/include/linux/clk/sunxi.h b/include/linux/clk/sunxi.h new file mode 100644 index 000000000000..e074fdd5a236 --- /dev/null +++ b/include/linux/clk/sunxi.h @@ -0,0 +1,22 @@ +/* + * Copyright 2012 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_CLK_SUNXI_H_ +#define __LINUX_CLK_SUNXI_H_ + +void __init sunxi_init_clocks(void); + +#endif diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 032560295fcb..d08e4d2a9b92 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -591,6 +591,21 @@ static inline int cpulist_scnprintf(char *buf, int len, } /** + * cpumask_parse - extract a cpumask from from a string + * @buf: the buffer to extract from + * @dstp: the cpumask to set. + * + * Returns -errno, or 0 for success. + */ +static inline int cpumask_parse(const char *buf, struct cpumask *dstp) +{ + char *nl = strchr(buf, '\n'); + int len = nl ? nl - buf : strlen(buf); + + return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); +} + +/** * cpulist_parse - extract a cpumask from a user string of ranges * @buf: the buffer to extract from * @dstp: the cpumask to set. diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 8c8a60d29407..ccd1de8ad822 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -11,7 +11,6 @@ #include <linux/sched.h> #include <linux/cpumask.h> #include <linux/nodemask.h> -#include <linux/cgroup.h> #include <linux/mm.h> #ifdef CONFIG_CPUSETS diff --git a/include/linux/device.h b/include/linux/device.h index 88615ccaf23a..711793b145ff 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -297,6 +297,8 @@ void subsys_interface_unregister(struct subsys_interface *sif); int subsys_system_register(struct bus_type *subsys, const struct attribute_group **groups); +int subsys_virtual_register(struct bus_type *subsys, + const struct attribute_group **groups); /** * struct class - device classes diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 52da2a250795..f83e17a40e8b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -261,8 +261,10 @@ struct ftrace_probe_ops { void (*func)(unsigned long ip, unsigned long parent_ip, void **data); - int (*callback)(unsigned long ip, void **data); - void (*free)(void **data); + int (*init)(struct ftrace_probe_ops *ops, + unsigned long ip, void **data); + void (*free)(struct ftrace_probe_ops *ops, + unsigned long ip, void **data); int (*print)(struct seq_file *m, unsigned long ip, struct ftrace_probe_ops *ops, diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 13a54d0bdfa8..34e00fb49bec 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -8,6 +8,7 @@ #include <linux/perf_event.h> struct trace_array; +struct trace_buffer; struct tracer; struct dentry; @@ -38,6 +39,12 @@ const char *ftrace_print_symbols_seq_u64(struct trace_seq *p, const char *ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int len); +struct trace_iterator; +struct trace_event; + +int ftrace_raw_output_prep(struct trace_iterator *iter, + struct trace_event *event); + /* * The trace entry - the most basic unit of tracing. This is what * is printed in the end as a single line in the trace output, such as: @@ -61,6 +68,7 @@ struct trace_entry { struct trace_iterator { struct trace_array *tr; struct tracer *trace; + struct trace_buffer *trace_buffer; void *private; int cpu_file; struct mutex mutex; @@ -95,8 +103,6 @@ enum trace_iter_flags { }; -struct trace_event; - typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, int flags, struct trace_event *event); @@ -128,6 +134,13 @@ enum print_line_t { void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, int pc); +struct ftrace_event_file; + +struct ring_buffer_event * +trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, + struct ftrace_event_file *ftrace_file, + int type, unsigned long len, + unsigned long flags, int pc); struct ring_buffer_event * trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, int type, unsigned long len, @@ -182,53 +195,49 @@ extern int ftrace_event_reg(struct ftrace_event_call *event, enum trace_reg type, void *data); enum { - TRACE_EVENT_FL_ENABLED_BIT, TRACE_EVENT_FL_FILTERED_BIT, - TRACE_EVENT_FL_RECORDED_CMD_BIT, TRACE_EVENT_FL_CAP_ANY_BIT, TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT, + TRACE_EVENT_FL_WAS_ENABLED_BIT, }; +/* + * Event flags: + * FILTERED - The event has a filter attached + * CAP_ANY - Any user can enable for perf + * NO_SET_FILTER - Set when filter has error and is to be ignored + * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file + * WAS_ENABLED - Set and stays set when an event was ever enabled + * (used for module unloading, if a module event is enabled, + * it is best to clear the buffers that used it). + */ enum { - TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), - TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT), TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), + TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), }; struct ftrace_event_call { struct list_head list; struct ftrace_event_class *class; char *name; - struct dentry *dir; struct trace_event event; const char *print_fmt; struct event_filter *filter; + struct list_head *files; void *mod; void *data; - /* - * 32 bit flags: - * bit 1: enabled - * bit 2: filter_active - * bit 3: enabled cmd record - * bit 4: allow trace by non root (cap any) - * bit 5: failed to apply filter - * bit 6: ftrace internal event (do not enable) - * - * Changes to flags must hold the event_mutex. - * - * Note: Reads of flags do not hold the event_mutex since - * they occur in critical sections. But the way flags - * is currently used, these changes do no affect the code - * except that when a change is made, it may have a slight - * delay in propagating the changes to other CPUs due to - * caching and such. + * bit 0: filter_active + * bit 1: allow trace by non root (cap any) + * bit 2: failed to apply filter + * bit 3: ftrace internal event (do not enable) + * bit 4: Event was enabled by module */ - unsigned int flags; + int flags; /* static flags of different events */ #ifdef CONFIG_PERF_EVENTS int perf_refcount; @@ -236,6 +245,56 @@ struct ftrace_event_call { #endif }; +struct trace_array; +struct ftrace_subsystem_dir; + +enum { + FTRACE_EVENT_FL_ENABLED_BIT, + FTRACE_EVENT_FL_RECORDED_CMD_BIT, + FTRACE_EVENT_FL_SOFT_MODE_BIT, + FTRACE_EVENT_FL_SOFT_DISABLED_BIT, +}; + +/* + * Ftrace event file flags: + * ENABLED - The event is enabled + * RECORDED_CMD - The comms should be recorded at sched_switch + * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED + * SOFT_DISABLED - When set, do not trace the event (even though its + * tracepoint may be enabled) + */ +enum { + FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), + FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT), + FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), + FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), +}; + +struct ftrace_event_file { + struct list_head list; + struct ftrace_event_call *event_call; + struct dentry *dir; + struct trace_array *tr; + struct ftrace_subsystem_dir *system; + + /* + * 32 bit flags: + * bit 0: enabled + * bit 1: enabled cmd record + * bit 2: enable/disable with the soft disable bit + * bit 3: soft disabled + * + * Note: The bits must be set atomically to prevent races + * from other writers. Reads of flags do not need to be in + * sync as they occur in critical sections. But the way flags + * is currently used, these changes do not affect the code + * except that when a change is made, it may have a slight + * delay in propagating the changes to other CPUs due to + * caching and such. Which is mostly OK ;-) + */ + unsigned long flags; +}; + #define __TRACE_EVENT_FLAGS(name, value) \ static int __init trace_init_flags_##name(void) \ { \ @@ -274,7 +333,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type, extern int trace_add_event_call(struct ftrace_event_call *call); extern void trace_remove_event_call(struct ftrace_event_call *call); -#define is_signed_type(type) (((type)(-1)) < (type)0) +#define is_signed_type(type) (((type)(-1)) < (type)1) int trace_set_clr_event(const char *system, const char *event, int set); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 79fdd80a42d4..2dac79c39199 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -486,6 +486,8 @@ enum ftrace_dump_mode { void tracing_on(void); void tracing_off(void); int tracing_is_on(void); +void tracing_snapshot(void); +void tracing_snapshot_alloc(void); extern void tracing_start(void); extern void tracing_stop(void); @@ -515,10 +517,32 @@ do { \ * * This is intended as a debugging tool for the developer only. * Please refrain from leaving trace_printks scattered around in - * your code. + * your code. (Extra memory is used for special buffers that are + * allocated when trace_printk() is used) + * + * A little optization trick is done here. If there's only one + * argument, there's no need to scan the string for printf formats. + * The trace_puts() will suffice. But how can we take advantage of + * using trace_puts() when trace_printk() has only one argument? + * By stringifying the args and checking the size we can tell + * whether or not there are args. __stringify((__VA_ARGS__)) will + * turn into "()\0" with a size of 3 when there are no args, anything + * else will be bigger. All we need to do is define a string to this, + * and then take its size and compare to 3. If it's bigger, use + * do_trace_printk() otherwise, optimize it to trace_puts(). Then just + * let gcc optimize the rest. */ -#define trace_printk(fmt, args...) \ +#define trace_printk(fmt, ...) \ +do { \ + char _______STR[] = __stringify((__VA_ARGS__)); \ + if (sizeof(_______STR) > 3) \ + do_trace_printk(fmt, ##__VA_ARGS__); \ + else \ + trace_puts(fmt); \ +} while (0) + +#define do_trace_printk(fmt, args...) \ do { \ static const char *trace_printk_fmt \ __attribute__((section("__trace_printk_fmt"))) = \ @@ -538,7 +562,45 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...); extern __printf(2, 3) int __trace_printk(unsigned long ip, const char *fmt, ...); -extern void trace_dump_stack(void); +/** + * trace_puts - write a string into the ftrace buffer + * @str: the string to record + * + * Note: __trace_bputs is an internal function for trace_puts and + * the @ip is passed in via the trace_puts macro. + * + * This is similar to trace_printk() but is made for those really fast + * paths that a developer wants the least amount of "Heisenbug" affects, + * where the processing of the print format is still too much. + * + * This function allows a kernel developer to debug fast path sections + * that printk is not appropriate for. By scattering in various + * printk like tracing in the code, a developer can quickly see + * where problems are occurring. + * + * This is intended as a debugging tool for the developer only. + * Please refrain from leaving trace_puts scattered around in + * your code. (Extra memory is used for special buffers that are + * allocated when trace_puts() is used) + * + * Returns: 0 if nothing was written, positive # if string was. + * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) + */ + +extern int __trace_bputs(unsigned long ip, const char *str); +extern int __trace_puts(unsigned long ip, const char *str, int size); +#define trace_puts(str) ({ \ + static const char *trace_printk_fmt \ + __attribute__((section("__trace_printk_fmt"))) = \ + __builtin_constant_p(str) ? str : NULL; \ + \ + if (__builtin_constant_p(str)) \ + __trace_bputs(_THIS_IP_, trace_printk_fmt); \ + else \ + __trace_puts(_THIS_IP_, str, strlen(str)); \ +}) + +extern void trace_dump_stack(int skip); /* * The double __builtin_constant_p is because gcc will give us an error @@ -573,6 +635,8 @@ static inline void trace_dump_stack(void) { } static inline void tracing_on(void) { } static inline void tracing_off(void) { } static inline int tracing_is_on(void) { return 0; } +static inline void tracing_snapshot(void) { } +static inline void tracing_snapshot_alloc(void) { } static inline __printf(1, 2) int trace_printk(const char *fmt, ...) diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h index 9db0bda446a0..84f449475c25 100644 --- a/include/linux/mfd/abx500/ab8500.h +++ b/include/linux/mfd/abx500/ab8500.h @@ -364,8 +364,7 @@ struct ab8500 { const int *irq_reg_offset; }; -struct regulator_reg_init; -struct regulator_init_data; +struct ab8500_regulator_platform_data; struct ab8500_gpio_platform_data; struct ab8500_codec_platform_data; struct ab8500_sysctrl_platform_data; @@ -375,19 +374,13 @@ struct ab8500_sysctrl_platform_data; * @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used * @pm_power_off: Should machine pm power off hook be registered or not * @init: board-specific initialization after detection of ab8500 - * @num_regulator_reg_init: number of regulator init registers - * @regulator_reg_init: regulator init registers - * @num_regulator: number of regulators * @regulator: machine-specific constraints for regulators */ struct ab8500_platform_data { int irq_base; bool pm_power_off; void (*init) (struct ab8500 *); - int num_regulator_reg_init; - struct ab8500_regulator_reg_init *regulator_reg_init; - int num_regulator; - struct regulator_init_data *regulator; + struct ab8500_regulator_platform_data *regulator; struct abx500_gpio_platform_data *gpio; struct ab8500_codec_platform_data *codec; struct ab8500_sysctrl_platform_data *sysctrl; diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index 3bbda22721ea..ecddc5173c7c 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -109,19 +109,6 @@ struct palmas_reg_init { */ int mode_sleep; - /* tstep is the timestep loaded to the TSTEP register - * - * For SMPS - * - * 0: Jump (no slope control) - * 1: 10mV/us - * 2: 5mV/us - * 3: 2.5mV/us - * - * For LDO unused - */ - int tstep; - /* voltage_sel is the bitfield loaded onto the SMPSX_VOLTAGE * register. Set this is the default voltage set in OTP needs * to be overridden. @@ -154,6 +141,12 @@ enum palmas_regulators { PALMAS_REG_LDO9, PALMAS_REG_LDOLN, PALMAS_REG_LDOUSB, + /* External regulators */ + PALMAS_REG_REGEN1, + PALMAS_REG_REGEN2, + PALMAS_REG_REGEN3, + PALMAS_REG_SYSEN1, + PALMAS_REG_SYSEN2, /* Total number of regulators */ PALMAS_NUM_REGS, }; @@ -171,6 +164,9 @@ struct palmas_pmic_platform_data { /* use LDO6 for vibrator control */ int ldo6_vibrator; + + /* Enable tracking mode of LDO8 */ + bool enable_ldo8_tracking; }; struct palmas_usb_platform_data { @@ -331,6 +327,8 @@ struct palmas_pmic { int smps457; int range[PALMAS_REG_SMPS10]; + unsigned int ramp_delay[PALMAS_REG_SMPS10]; + unsigned int current_reg_mode[PALMAS_REG_SMPS10]; }; struct palmas_resource { diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h new file mode 100644 index 000000000000..92dabcaf6499 --- /dev/null +++ b/include/linux/platform_data/si5351.h @@ -0,0 +1,114 @@ +/* + * Si5351A/B/C programmable clock generator platform_data. + */ + +#ifndef __LINUX_PLATFORM_DATA_SI5351_H__ +#define __LINUX_PLATFORM_DATA_SI5351_H__ + +struct clk; + +/** + * enum si5351_variant - SiLabs Si5351 chip variant + * @SI5351_VARIANT_A: Si5351A (8 output clocks, XTAL input) + * @SI5351_VARIANT_A3: Si5351A MSOP10 (3 output clocks, XTAL input) + * @SI5351_VARIANT_B: Si5351B (8 output clocks, XTAL/VXCO input) + * @SI5351_VARIANT_C: Si5351C (8 output clocks, XTAL/CLKIN input) + */ +enum si5351_variant { + SI5351_VARIANT_A = 1, + SI5351_VARIANT_A3 = 2, + SI5351_VARIANT_B = 3, + SI5351_VARIANT_C = 4, +}; + +/** + * enum si5351_pll_src - Si5351 pll clock source + * @SI5351_PLL_SRC_DEFAULT: default, do not change eeprom config + * @SI5351_PLL_SRC_XTAL: pll source clock is XTAL input + * @SI5351_PLL_SRC_CLKIN: pll source clock is CLKIN input (Si5351C only) + */ +enum si5351_pll_src { + SI5351_PLL_SRC_DEFAULT = 0, + SI5351_PLL_SRC_XTAL = 1, + SI5351_PLL_SRC_CLKIN = 2, +}; + +/** + * enum si5351_multisynth_src - Si5351 multisynth clock source + * @SI5351_MULTISYNTH_SRC_DEFAULT: default, do not change eeprom config + * @SI5351_MULTISYNTH_SRC_VCO0: multisynth source clock is VCO0 + * @SI5351_MULTISYNTH_SRC_VCO1: multisynth source clock is VCO1/VXCO + */ +enum si5351_multisynth_src { + SI5351_MULTISYNTH_SRC_DEFAULT = 0, + SI5351_MULTISYNTH_SRC_VCO0 = 1, + SI5351_MULTISYNTH_SRC_VCO1 = 2, +}; + +/** + * enum si5351_clkout_src - Si5351 clock output clock source + * @SI5351_CLKOUT_SRC_DEFAULT: default, do not change eeprom config + * @SI5351_CLKOUT_SRC_MSYNTH_N: clkout N source clock is multisynth N + * @SI5351_CLKOUT_SRC_MSYNTH_0_4: clkout N source clock is multisynth 0 (N<4) + * or 4 (N>=4) + * @SI5351_CLKOUT_SRC_XTAL: clkout N source clock is XTAL + * @SI5351_CLKOUT_SRC_CLKIN: clkout N source clock is CLKIN (Si5351C only) + */ +enum si5351_clkout_src { + SI5351_CLKOUT_SRC_DEFAULT = 0, + SI5351_CLKOUT_SRC_MSYNTH_N = 1, + SI5351_CLKOUT_SRC_MSYNTH_0_4 = 2, + SI5351_CLKOUT_SRC_XTAL = 3, + SI5351_CLKOUT_SRC_CLKIN = 4, +}; + +/** + * enum si5351_drive_strength - Si5351 clock output drive strength + * @SI5351_DRIVE_DEFAULT: default, do not change eeprom config + * @SI5351_DRIVE_2MA: 2mA clock output drive strength + * @SI5351_DRIVE_4MA: 4mA clock output drive strength + * @SI5351_DRIVE_6MA: 6mA clock output drive strength + * @SI5351_DRIVE_8MA: 8mA clock output drive strength + */ +enum si5351_drive_strength { + SI5351_DRIVE_DEFAULT = 0, + SI5351_DRIVE_2MA = 2, + SI5351_DRIVE_4MA = 4, + SI5351_DRIVE_6MA = 6, + SI5351_DRIVE_8MA = 8, +}; + +/** + * struct si5351_clkout_config - Si5351 clock output configuration + * @clkout: clkout number + * @multisynth_src: multisynth source clock + * @clkout_src: clkout source clock + * @pll_master: if true, clkout can also change pll rate + * @drive: output drive strength + * @rate: initial clkout rate, or default if 0 + */ +struct si5351_clkout_config { + enum si5351_multisynth_src multisynth_src; + enum si5351_clkout_src clkout_src; + enum si5351_drive_strength drive; + bool pll_master; + unsigned long rate; +}; + +/** + * struct si5351_platform_data - Platform data for the Si5351 clock driver + * @variant: Si5351 chip variant + * @clk_xtal: xtal input clock + * @clk_clkin: clkin input clock + * @pll_src: array of pll source clock setting + * @clkout: array of clkout configuration + */ +struct si5351_platform_data { + enum si5351_variant variant; + struct clk *clk_xtal; + struct clk *clk_clkin; + enum si5351_pll_src pll_src[2]; + struct si5351_clkout_config clkout[8]; +}; + +#endif diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index ceba18d23a5a..8447f634c7f5 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h @@ -11,6 +11,8 @@ #ifndef __S3C64XX_PLAT_SPI_H #define __S3C64XX_PLAT_SPI_H +#include <linux/dmaengine.h> + struct platform_device; /** @@ -38,6 +40,7 @@ struct s3c64xx_spi_info { int src_clk_nr; int num_cs; int (*cfg_gpio)(void); + dma_filter_fn filter; }; /** diff --git a/include/linux/regmap.h b/include/linux/regmap.h index bf77dfdabef9..02d84e24b7c2 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -389,6 +389,7 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg, bool *change); int regmap_get_val_bytes(struct regmap *map); int regmap_async_complete(struct regmap *map); +bool regmap_can_raw_write(struct regmap *map); int regcache_sync(struct regmap *map); int regcache_sync_region(struct regmap *map, unsigned int min, diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h index 7bd73bbdfd1b..7c5ff0c55773 100644 --- a/include/linux/regulator/ab8500.h +++ b/include/linux/regulator/ab8500.h @@ -5,11 +5,14 @@ * * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson + * Daniel Willerud <daniel.willerud@stericsson.com> for ST-Ericsson */ #ifndef __LINUX_MFD_AB8500_REGULATOR_H #define __LINUX_MFD_AB8500_REGULATOR_H +#include <linux/platform_device.h> + /* AB8500 regulators */ enum ab8500_regulator_id { AB8500_LDO_AUX1, @@ -17,7 +20,6 @@ enum ab8500_regulator_id { AB8500_LDO_AUX3, AB8500_LDO_INTCORE, AB8500_LDO_TVOUT, - AB8500_LDO_USB, AB8500_LDO_AUDIO, AB8500_LDO_ANAMIC1, AB8500_LDO_ANAMIC2, @@ -26,7 +28,28 @@ enum ab8500_regulator_id { AB8500_NUM_REGULATORS, }; -/* AB9450 regulators */ +/* AB8505 regulators */ +enum ab8505_regulator_id { + AB8505_LDO_AUX1, + AB8505_LDO_AUX2, + AB8505_LDO_AUX3, + AB8505_LDO_AUX4, + AB8505_LDO_AUX5, + AB8505_LDO_AUX6, + AB8505_LDO_INTCORE, + AB8505_LDO_ADC, + AB8505_LDO_USB, + AB8505_LDO_AUDIO, + AB8505_LDO_ANAMIC1, + AB8505_LDO_ANAMIC2, + AB8505_LDO_AUX8, + AB8505_LDO_ANA, + AB8505_SYSCLKREQ_2, + AB8505_SYSCLKREQ_4, + AB8505_NUM_REGULATORS, +}; + +/* AB9540 regulators */ enum ab9540_regulator_id { AB9540_LDO_AUX1, AB9540_LDO_AUX2, @@ -45,16 +68,39 @@ enum ab9540_regulator_id { AB9540_NUM_REGULATORS, }; -/* AB8500 and AB9540 register initialization */ +/* AB8540 regulators */ +enum ab8540_regulator_id { + AB8540_LDO_AUX1, + AB8540_LDO_AUX2, + AB8540_LDO_AUX3, + AB8540_LDO_AUX4, + AB8540_LDO_AUX5, + AB8540_LDO_AUX6, + AB8540_LDO_INTCORE, + AB8540_LDO_TVOUT, + AB8540_LDO_AUDIO, + AB8540_LDO_ANAMIC1, + AB8540_LDO_ANAMIC2, + AB8540_LDO_DMIC, + AB8540_LDO_ANA, + AB8540_LDO_SDIO, + AB8540_SYSCLKREQ_2, + AB8540_SYSCLKREQ_4, + AB8540_NUM_REGULATORS, +}; + +/* AB8500, AB8505, and AB9540 register initialization */ struct ab8500_regulator_reg_init { int id; + u8 mask; u8 value; }; -#define INIT_REGULATOR_REGISTER(_id, _value) \ - { \ - .id = _id, \ - .value = _value, \ +#define INIT_REGULATOR_REGISTER(_id, _mask, _value) \ + { \ + .id = _id, \ + .mask = _mask, \ + .value = _value, \ } /* AB8500 registers */ @@ -86,10 +132,58 @@ enum ab8500_regulator_reg { AB8500_REGUCTRL2SPARE, AB8500_REGUCTRLDISCH, AB8500_REGUCTRLDISCH2, - AB8500_VSMPS1SEL1, AB8500_NUM_REGULATOR_REGISTERS, }; +/* AB8505 registers */ +enum ab8505_regulator_reg { + AB8505_REGUREQUESTCTRL1, + AB8505_REGUREQUESTCTRL2, + AB8505_REGUREQUESTCTRL3, + AB8505_REGUREQUESTCTRL4, + AB8505_REGUSYSCLKREQ1HPVALID1, + AB8505_REGUSYSCLKREQ1HPVALID2, + AB8505_REGUHWHPREQ1VALID1, + AB8505_REGUHWHPREQ1VALID2, + AB8505_REGUHWHPREQ2VALID1, + AB8505_REGUHWHPREQ2VALID2, + AB8505_REGUSWHPREQVALID1, + AB8505_REGUSWHPREQVALID2, + AB8505_REGUSYSCLKREQVALID1, + AB8505_REGUSYSCLKREQVALID2, + AB8505_REGUVAUX4REQVALID, + AB8505_REGUMISC1, + AB8505_VAUDIOSUPPLY, + AB8505_REGUCTRL1VAMIC, + AB8505_VSMPSAREGU, + AB8505_VSMPSBREGU, + AB8505_VSAFEREGU, /* NOTE! PRCMU register */ + AB8505_VPLLVANAREGU, + AB8505_EXTSUPPLYREGU, + AB8505_VAUX12REGU, + AB8505_VRF1VAUX3REGU, + AB8505_VSMPSASEL1, + AB8505_VSMPSASEL2, + AB8505_VSMPSASEL3, + AB8505_VSMPSBSEL1, + AB8505_VSMPSBSEL2, + AB8505_VSMPSBSEL3, + AB8505_VSAFESEL1, /* NOTE! PRCMU register */ + AB8505_VSAFESEL2, /* NOTE! PRCMU register */ + AB8505_VSAFESEL3, /* NOTE! PRCMU register */ + AB8505_VAUX1SEL, + AB8505_VAUX2SEL, + AB8505_VRF1VAUX3SEL, + AB8505_VAUX4REQCTRL, + AB8505_VAUX4REGU, + AB8505_VAUX4SEL, + AB8505_REGUCTRLDISCH, + AB8505_REGUCTRLDISCH2, + AB8505_REGUCTRLDISCH3, + AB8505_CTRLVAUX5, + AB8505_CTRLVAUX6, + AB8505_NUM_REGULATOR_REGISTERS, +}; /* AB9540 registers */ enum ab9540_regulator_reg { @@ -139,4 +233,111 @@ enum ab9540_regulator_reg { AB9540_NUM_REGULATOR_REGISTERS, }; +/* AB8540 registers */ +enum ab8540_regulator_reg { + AB8540_REGUREQUESTCTRL1, + AB8540_REGUREQUESTCTRL2, + AB8540_REGUREQUESTCTRL3, + AB8540_REGUREQUESTCTRL4, + AB8540_REGUSYSCLKREQ1HPVALID1, + AB8540_REGUSYSCLKREQ1HPVALID2, + AB8540_REGUHWHPREQ1VALID1, + AB8540_REGUHWHPREQ1VALID2, + AB8540_REGUHWHPREQ2VALID1, + AB8540_REGUHWHPREQ2VALID2, + AB8540_REGUSWHPREQVALID1, + AB8540_REGUSWHPREQVALID2, + AB8540_REGUSYSCLKREQVALID1, + AB8540_REGUSYSCLKREQVALID2, + AB8540_REGUVAUX4REQVALID, + AB8540_REGUVAUX5REQVALID, + AB8540_REGUVAUX6REQVALID, + AB8540_REGUVCLKBREQVALID, + AB8540_REGUVRF1REQVALID, + AB8540_REGUMISC1, + AB8540_VAUDIOSUPPLY, + AB8540_REGUCTRL1VAMIC, + AB8540_VHSIC, + AB8540_VSDIO, + AB8540_VSMPS1REGU, + AB8540_VSMPS2REGU, + AB8540_VSMPS3REGU, + AB8540_VPLLVANAREGU, + AB8540_EXTSUPPLYREGU, + AB8540_VAUX12REGU, + AB8540_VRF1VAUX3REGU, + AB8540_VSMPS1SEL1, + AB8540_VSMPS1SEL2, + AB8540_VSMPS1SEL3, + AB8540_VSMPS2SEL1, + AB8540_VSMPS2SEL2, + AB8540_VSMPS2SEL3, + AB8540_VSMPS3SEL1, + AB8540_VSMPS3SEL2, + AB8540_VAUX1SEL, + AB8540_VAUX2SEL, + AB8540_VRF1VAUX3SEL, + AB8540_REGUCTRL2SPARE, + AB8540_VAUX4REQCTRL, + AB8540_VAUX4REGU, + AB8540_VAUX4SEL, + AB8540_VAUX5REQCTRL, + AB8540_VAUX5REGU, + AB8540_VAUX5SEL, + AB8540_VAUX6REQCTRL, + AB8540_VAUX6REGU, + AB8540_VAUX6SEL, + AB8540_VCLKBREQCTRL, + AB8540_VCLKBREGU, + AB8540_VCLKBSEL, + AB8540_VRF1REQCTRL, + AB8540_REGUCTRLDISCH, + AB8540_REGUCTRLDISCH2, + AB8540_REGUCTRLDISCH3, + AB8540_REGUCTRLDISCH4, + AB8540_VSIMSYSCLKCTRL, + AB8540_VANAVPLLSEL, + AB8540_NUM_REGULATOR_REGISTERS, +}; + +/* AB8500 external regulators */ +struct ab8500_ext_regulator_cfg { + bool hwreq; /* requires hw mode or high power mode */ +}; + +enum ab8500_ext_regulator_id { + AB8500_EXT_SUPPLY1, + AB8500_EXT_SUPPLY2, + AB8500_EXT_SUPPLY3, + AB8500_NUM_EXT_REGULATORS, +}; + +/* AB8500 regulator platform data */ +struct ab8500_regulator_platform_data { + int num_reg_init; + struct ab8500_regulator_reg_init *reg_init; + int num_regulator; + struct regulator_init_data *regulator; + int num_ext_regulator; + struct regulator_init_data *ext_regulator; +}; + +#ifdef CONFIG_REGULATOR_AB8500_DEBUG +int ab8500_regulator_debug_init(struct platform_device *pdev); +int ab8500_regulator_debug_exit(struct platform_device *pdev); +#else +static inline int ab8500_regulator_debug_init(struct platform_device *pdev) +{ + return 0; +} +static inline int ab8500_regulator_debug_exit(struct platform_device *pdev) +{ + return 0; +} +#endif + +/* AB8500 external regulator functions. */ +int ab8500_ext_regulator_init(struct platform_device *pdev); +void ab8500_ext_regulator_exit(struct platform_device *pdev); + #endif diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 7bc732ce6e50..145022a83085 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -141,18 +141,18 @@ void regulator_put(struct regulator *regulator); void devm_regulator_put(struct regulator *regulator); /* regulator output control and status */ -int regulator_enable(struct regulator *regulator); +int __must_check regulator_enable(struct regulator *regulator); int regulator_disable(struct regulator *regulator); int regulator_force_disable(struct regulator *regulator); int regulator_is_enabled(struct regulator *regulator); int regulator_disable_deferred(struct regulator *regulator, int ms); -int regulator_bulk_get(struct device *dev, int num_consumers, - struct regulator_bulk_data *consumers); -int devm_regulator_bulk_get(struct device *dev, int num_consumers, - struct regulator_bulk_data *consumers); -int regulator_bulk_enable(int num_consumers, - struct regulator_bulk_data *consumers); +int __must_check regulator_bulk_get(struct device *dev, int num_consumers, + struct regulator_bulk_data *consumers); +int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers, + struct regulator_bulk_data *consumers); +int __must_check regulator_bulk_enable(int num_consumers, + struct regulator_bulk_data *consumers); int regulator_bulk_disable(int num_consumers, struct regulator_bulk_data *consumers); int regulator_bulk_force_disable(int num_consumers, diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 7df93f52db08..6700cc94bdd1 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -22,6 +22,7 @@ struct regmap; struct regulator_dev; struct regulator_init_data; +struct regulator_enable_gpio; enum regulator_status { REGULATOR_STATUS_OFF, @@ -199,6 +200,8 @@ enum regulator_type { * output when using regulator_set_voltage_sel_regmap * @enable_reg: Register for control when using regmap enable/disable ops * @enable_mask: Mask for control when using regmap enable/disable ops + * @enable_is_inverted: A flag to indicate set enable_mask bits to disable + * when using regulator_enable_regmap and friends APIs. * @bypass_reg: Register for control when using regmap set_bypass * @bypass_mask: Mask for control when using regmap set_bypass * @@ -228,6 +231,7 @@ struct regulator_desc { unsigned int apply_bit; unsigned int enable_reg; unsigned int enable_mask; + bool enable_is_inverted; unsigned int bypass_reg; unsigned int bypass_mask; @@ -302,8 +306,7 @@ struct regulator_dev { struct dentry *debugfs; - int ena_gpio; - unsigned int ena_gpio_invert:1; + struct regulator_enable_gpio *ena_pin; unsigned int ena_gpio_state:1; }; @@ -329,6 +332,8 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev, int min_uV, int max_uV); int regulator_map_voltage_iterate(struct regulator_dev *rdev, int min_uV, int max_uV); +int regulator_map_voltage_ascend(struct regulator_dev *rdev, + int min_uV, int max_uV); int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev); int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel); int regulator_is_enabled_regmap(struct regulator_dev *rdev); diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h index 45e42855ad05..4dbb63a1d4ab 100644 --- a/include/linux/regulator/max8952.h +++ b/include/linux/regulator/max8952.h @@ -122,13 +122,13 @@ struct max8952_platform_data { int gpio_vid1; int gpio_en; - u8 default_mode; - u8 dvs_mode[MAX8952_NUM_DVS_MODE]; /* MAX8952_DVS_MODEx_XXXXmV */ + u32 default_mode; + u32 dvs_mode[MAX8952_NUM_DVS_MODE]; /* MAX8952_DVS_MODEx_XXXXmV */ - u8 sync_freq; - u8 ramp_speed; + u32 sync_freq; + u32 ramp_speed; - struct regulator_init_data reg_data; + struct regulator_init_data *reg_data; }; diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index c23099413ad6..96a509b6be04 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -13,7 +13,7 @@ * info about what this counter is. */ -#include <linux/cgroup.h> +#include <linux/spinlock.h> #include <linux/errno.h> /* diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 1342e69542f3..d69cf637a15a 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -4,6 +4,7 @@ #include <linux/kmemcheck.h> #include <linux/mm.h> #include <linux/seq_file.h> +#include <linux/poll.h> struct ring_buffer; struct ring_buffer_iter; @@ -96,6 +97,11 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k __ring_buffer_alloc((size), (flags), &__key); \ }) +void ring_buffer_wait(struct ring_buffer *buffer, int cpu); +int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, + struct file *filp, poll_table *poll_table); + + #define RING_BUFFER_ALL_CPUS -1 void ring_buffer_free(struct ring_buffer *buffer); diff --git a/include/linux/sched.h b/include/linux/sched.h index 2d02c76a01be..bcbc30397f23 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1793,7 +1793,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ #define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ -#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */ +#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ diff --git a/include/linux/spi/spi-tegra.h b/include/linux/spi/spi-tegra.h deleted file mode 100644 index 786932c62edb..000000000000 --- a/include/linux/spi/spi-tegra.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * spi-tegra.h: SPI interface for Nvidia Tegra20 SLINK controller. - * - * Copyright (C) 2011 NVIDIA Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef _LINUX_SPI_TEGRA_H -#define _LINUX_SPI_TEGRA_H - -struct tegra_spi_platform_data { - int dma_req_sel; - unsigned int spi_max_frequency; -}; - -/* - * Controller data from device to pass some info like - * hw based chip select can be used or not and if yes - * then CS hold and setup time. - */ -struct tegra_spi_device_controller_data { - bool is_hw_based_cs; - int cs_setup_clk_count; - int cs_hold_clk_count; -}; - -#endif /* _LINUX_SPI_TEGRA_H */ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 38c2b925923d..733eb5ee31c5 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -228,6 +228,11 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * every chipselect is connected to a slave. * @dma_alignment: SPI controller constraint on DMA buffers alignment. * @mode_bits: flags understood by this controller driver + * @bits_per_word_mask: A mask indicating which values of bits_per_word are + * supported by the driver. Bit n indicates that a bits_per_word n+1 is + * suported. If set, the SPI core will reject any transfer with an + * unsupported bits_per_word. If not set, this value is simply ignored, + * and it's up to the individual driver to perform any validation. * @flags: other constraints relevant to this driver * @bus_lock_spinlock: spinlock for SPI bus locking * @bus_lock_mutex: mutex for SPI bus locking @@ -301,6 +306,9 @@ struct spi_master { /* spi_device.mode flags understood by this controller driver */ u16 mode_bits; + /* bitmask of supported bits_per_word for transfers */ + u32 bits_per_word_mask; + /* other constraints relevant to this driver */ u16 flags; #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h index d563f37e1a1d..1d7ca2739272 100644 --- a/include/linux/trace_clock.h +++ b/include/linux/trace_clock.h @@ -16,6 +16,7 @@ extern u64 notrace trace_clock_local(void); extern u64 notrace trace_clock(void); +extern u64 notrace trace_clock_jiffies(void); extern u64 notrace trace_clock_global(void); extern u64 notrace trace_clock_counter(void); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 8afab27cdbc2..717975639378 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -11,6 +11,7 @@ #include <linux/lockdep.h> #include <linux/threads.h> #include <linux/atomic.h> +#include <linux/cpumask.h> struct workqueue_struct; @@ -68,7 +69,7 @@ enum { WORK_STRUCT_COLOR_BITS, /* data contains off-queue information when !WORK_STRUCT_PWQ */ - WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS, + WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT, WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE), @@ -115,6 +116,20 @@ struct delayed_work { int cpu; }; +/* + * A struct for workqueue attributes. This can be used to change + * attributes of an unbound workqueue. + * + * Unlike other fields, ->no_numa isn't a property of a worker_pool. It + * only modifies how apply_workqueue_attrs() select pools and thus doesn't + * participate in pool hash calculations or equality comparisons. + */ +struct workqueue_attrs { + int nice; /* nice level */ + cpumask_var_t cpumask; /* allowed CPUs */ + bool no_numa; /* disable NUMA affinity */ +}; + static inline struct delayed_work *to_delayed_work(struct work_struct *work) { return container_of(work, struct delayed_work, work); @@ -283,9 +298,10 @@ enum { WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ + WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */ - WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */ - WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */ + __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ + __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ @@ -388,7 +404,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, * Pointer to the allocated workqueue on success, %NULL on failure. */ #define alloc_ordered_workqueue(fmt, flags, args...) \ - alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args) + alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) #define create_workqueue(name) \ alloc_workqueue((name), WQ_MEM_RECLAIM, 1) @@ -399,30 +415,23 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, extern void destroy_workqueue(struct workqueue_struct *wq); +struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask); +void free_workqueue_attrs(struct workqueue_attrs *attrs); +int apply_workqueue_attrs(struct workqueue_struct *wq, + const struct workqueue_attrs *attrs); + extern bool queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work); -extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work); extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay); -extern bool queue_delayed_work(struct workqueue_struct *wq, - struct delayed_work *work, unsigned long delay); extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay); -extern bool mod_delayed_work(struct workqueue_struct *wq, - struct delayed_work *dwork, unsigned long delay); extern void flush_workqueue(struct workqueue_struct *wq); extern void drain_workqueue(struct workqueue_struct *wq); extern void flush_scheduled_work(void); -extern bool schedule_work_on(int cpu, struct work_struct *work); -extern bool schedule_work(struct work_struct *work); -extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work, - unsigned long delay); -extern bool schedule_delayed_work(struct delayed_work *work, - unsigned long delay); extern int schedule_on_each_cpu(work_func_t func); -extern int keventd_up(void); int execute_in_process_context(work_func_t fn, struct execute_work *); @@ -435,9 +444,121 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork); extern void workqueue_set_max_active(struct workqueue_struct *wq, int max_active); -extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); +extern bool current_is_workqueue_rescuer(void); +extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); extern unsigned int work_busy(struct work_struct *work); +/** + * queue_work - queue work on a workqueue + * @wq: workqueue to use + * @work: work to queue + * + * Returns %false if @work was already on a queue, %true otherwise. + * + * We queue the work to the CPU on which it was submitted, but if the CPU dies + * it can be processed by another CPU. + */ +static inline bool queue_work(struct workqueue_struct *wq, + struct work_struct *work) +{ + return queue_work_on(WORK_CPU_UNBOUND, wq, work); +} + +/** + * queue_delayed_work - queue work on a workqueue after delay + * @wq: workqueue to use + * @dwork: delayable work to queue + * @delay: number of jiffies to wait before queueing + * + * Equivalent to queue_delayed_work_on() but tries to use the local CPU. + */ +static inline bool queue_delayed_work(struct workqueue_struct *wq, + struct delayed_work *dwork, + unsigned long delay) +{ + return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); +} + +/** + * mod_delayed_work - modify delay of or queue a delayed work + * @wq: workqueue to use + * @dwork: work to queue + * @delay: number of jiffies to wait before queueing + * + * mod_delayed_work_on() on local CPU. + */ +static inline bool mod_delayed_work(struct workqueue_struct *wq, + struct delayed_work *dwork, + unsigned long delay) +{ + return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); +} + +/** + * schedule_work_on - put work task on a specific cpu + * @cpu: cpu to put the work task on + * @work: job to be done + * + * This puts a job on a specific cpu + */ +static inline bool schedule_work_on(int cpu, struct work_struct *work) +{ + return queue_work_on(cpu, system_wq, work); +} + +/** + * schedule_work - put work task in global workqueue + * @work: job to be done + * + * Returns %false if @work was already on the kernel-global workqueue and + * %true otherwise. + * + * This puts a job in the kernel-global workqueue if it was not already + * queued and leaves it in the same position on the kernel-global + * workqueue otherwise. + */ +static inline bool schedule_work(struct work_struct *work) +{ + return queue_work(system_wq, work); +} + +/** + * schedule_delayed_work_on - queue work in global workqueue on CPU after delay + * @cpu: cpu to use + * @dwork: job to be done + * @delay: number of jiffies to wait + * + * After waiting for a given time this puts a job in the kernel-global + * workqueue on the specified CPU. + */ +static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, + unsigned long delay) +{ + return queue_delayed_work_on(cpu, system_wq, dwork, delay); +} + +/** + * schedule_delayed_work - put work task in global workqueue after delay + * @dwork: job to be done + * @delay: number of jiffies to wait or 0 for immediate execution + * + * After waiting for a given time this puts a job in the kernel-global + * workqueue. + */ +static inline bool schedule_delayed_work(struct delayed_work *dwork, + unsigned long delay) +{ + return queue_delayed_work(system_wq, dwork, delay); +} + +/** + * keventd_up - is workqueue initialized yet? + */ +static inline bool keventd_up(void) +{ + return system_wq != NULL; +} + /* * Like above, but uses del_timer() instead of del_timer_sync(). This means, * if it returns 0 the timer function may be running and the queueing is in @@ -466,12 +587,12 @@ static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwo } #ifndef CONFIG_SMP -static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) +static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) { return fn(arg); } #else -long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); +long work_on_cpu(int cpu, long (*fn)(void *), void *arg); #endif /* CONFIG_SMP */ #ifdef CONFIG_FREEZER @@ -480,4 +601,11 @@ extern bool freeze_workqueues_busy(void); extern void thaw_workqueues(void); #endif /* CONFIG_FREEZER */ +#ifdef CONFIG_SYSFS +int workqueue_sysfs_register(struct workqueue_struct *wq); +#else /* CONFIG_SYSFS */ +static inline int workqueue_sysfs_register(struct workqueue_struct *wq) +{ return 0; } +#endif /* CONFIG_SYSFS */ + #endif diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h index 41a7dbd570e2..a43a2f67bd8e 100644 --- a/include/trace/events/regmap.h +++ b/include/trace/events/regmap.h @@ -175,6 +175,54 @@ DEFINE_EVENT(regmap_bool, regmap_cache_bypass, ); +DECLARE_EVENT_CLASS(regmap_async, + + TP_PROTO(struct device *dev), + + TP_ARGS(dev), + + TP_STRUCT__entry( + __string( name, dev_name(dev) ) + ), + + TP_fast_assign( + __assign_str(name, dev_name(dev)); + ), + + TP_printk("%s", __get_str(name)) +); + +DEFINE_EVENT(regmap_block, regmap_async_write_start, + + TP_PROTO(struct device *dev, unsigned int reg, int count), + + TP_ARGS(dev, reg, count) +); + +DEFINE_EVENT(regmap_async, regmap_async_io_complete, + + TP_PROTO(struct device *dev), + + TP_ARGS(dev) + +); + +DEFINE_EVENT(regmap_async, regmap_async_complete_start, + + TP_PROTO(struct device *dev), + + TP_ARGS(dev) + +); + +DEFINE_EVENT(regmap_async, regmap_async_complete_done, + + TP_PROTO(struct device *dev), + + TP_ARGS(dev) + +); + #endif /* _TRACE_REGMAP_H */ /* This part must be outside protection */ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 40dc5e8fe340..19edd7facaa1 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -227,29 +227,18 @@ static notrace enum print_line_t \ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ struct trace_event *trace_event) \ { \ - struct ftrace_event_call *event; \ struct trace_seq *s = &iter->seq; \ + struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ struct ftrace_raw_##call *field; \ - struct trace_entry *entry; \ - struct trace_seq *p = &iter->tmp_seq; \ int ret; \ \ - event = container_of(trace_event, struct ftrace_event_call, \ - event); \ - \ - entry = iter->ent; \ - \ - if (entry->type != event->event.type) { \ - WARN_ON_ONCE(1); \ - return TRACE_TYPE_UNHANDLED; \ - } \ - \ - field = (typeof(field))entry; \ + field = (typeof(field))iter->ent; \ \ - trace_seq_init(p); \ - ret = trace_seq_printf(s, "%s: ", event->name); \ + ret = ftrace_raw_output_prep(iter, trace_event); \ if (ret) \ - ret = trace_seq_printf(s, print); \ + return ret; \ + \ + ret = trace_seq_printf(s, print); \ if (!ret) \ return TRACE_TYPE_PARTIAL_LINE; \ \ @@ -335,7 +324,7 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \ #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ -static int notrace \ +static int notrace __init \ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ { \ struct ftrace_raw_##call field; \ @@ -414,7 +403,8 @@ static inline notrace int ftrace_get_offsets_##call( \ * * static void ftrace_raw_event_<call>(void *__data, proto) * { - * struct ftrace_event_call *event_call = __data; + * struct ftrace_event_file *ftrace_file = __data; + * struct ftrace_event_call *event_call = ftrace_file->event_call; * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; * struct ring_buffer_event *event; * struct ftrace_raw_<call> *entry; <-- defined in stage 1 @@ -423,12 +413,16 @@ static inline notrace int ftrace_get_offsets_##call( \ * int __data_size; * int pc; * + * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, + * &ftrace_file->flags)) + * return; + * * local_save_flags(irq_flags); * pc = preempt_count(); * * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); * - * event = trace_current_buffer_lock_reserve(&buffer, + * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, * event_<call>->event.type, * sizeof(*entry) + __data_size, * irq_flags, pc); @@ -440,7 +434,7 @@ static inline notrace int ftrace_get_offsets_##call( \ * __array macros. * * if (!filter_current_check_discard(buffer, event_call, entry, event)) - * trace_current_buffer_unlock_commit(buffer, + * trace_nowake_buffer_unlock_commit(buffer, * event, irq_flags, pc); * } * @@ -518,7 +512,8 @@ static inline notrace int ftrace_get_offsets_##call( \ static notrace void \ ftrace_raw_event_##call(void *__data, proto) \ { \ - struct ftrace_event_call *event_call = __data; \ + struct ftrace_event_file *ftrace_file = __data; \ + struct ftrace_event_call *event_call = ftrace_file->event_call; \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ring_buffer_event *event; \ struct ftrace_raw_##call *entry; \ @@ -527,12 +522,16 @@ ftrace_raw_event_##call(void *__data, proto) \ int __data_size; \ int pc; \ \ + if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \ + &ftrace_file->flags)) \ + return; \ + \ local_save_flags(irq_flags); \ pc = preempt_count(); \ \ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ \ - event = trace_current_buffer_lock_reserve(&buffer, \ + event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, \ event_call->event.type, \ sizeof(*entry) + __data_size, \ irq_flags, pc); \ @@ -581,7 +580,7 @@ static inline void ftrace_test_probe_##call(void) \ #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ _TRACE_PERF_PROTO(call, PARAMS(proto)); \ static const char print_fmt_##call[] = print; \ -static struct ftrace_event_class __used event_class_##call = { \ +static struct ftrace_event_class __used __refdata event_class_##call = { \ .system = __stringify(TRACE_SYSTEM), \ .define_fields = ftrace_define_fields_##call, \ .fields = LIST_HEAD_INIT(event_class_##call.fields),\ @@ -705,5 +704,3 @@ static inline void perf_test_probe_##call(void) \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #endif /* CONFIG_PERF_EVENTS */ -#undef _TRACE_PROFILE_INIT - |