diff options
Diffstat (limited to 'include/linux')
82 files changed, 2088 insertions, 392 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index b0972c4ce81c..d9099b15b472 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -44,6 +44,20 @@ #include <acpi/acpi_numa.h> #include <asm/acpi.h> +static inline acpi_handle acpi_device_handle(struct acpi_device *adev) +{ + return adev ? adev->handle : NULL; +} + +#define ACPI_COMPANION(dev) ((dev)->acpi_node.companion) +#define ACPI_COMPANION_SET(dev, adev) ACPI_COMPANION(dev) = (adev) +#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) + +static inline const char *acpi_dev_name(struct acpi_device *adev) +{ + return dev_name(&adev->dev); +} + enum acpi_irq_model_id { ACPI_IRQ_MODEL_PIC = 0, ACPI_IRQ_MODEL_IOAPIC, @@ -401,6 +415,15 @@ static inline bool acpi_driver_match_device(struct device *dev, #define acpi_disabled 1 +#define ACPI_COMPANION(dev) (NULL) +#define ACPI_COMPANION_SET(dev, adev) do { } while (0) +#define ACPI_HANDLE(dev) (NULL) + +static inline const char *acpi_dev_name(struct acpi_device *adev) +{ + return NULL; +} + static inline void acpi_early_init(void) { } static inline int early_acpi_boot_init(void) diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index 62d9303c2837..0ddb5c02ad8b 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h @@ -40,7 +40,7 @@ #define UART010_LCRL 0x10 /* Line control register, low byte. */ #define UART010_CR 0x14 /* Control register. */ #define UART01x_FR 0x18 /* Flag register (Read only). */ -#define UART010_IIR 0x1C /* Interrupt indentification register (Read). */ +#define UART010_IIR 0x1C /* Interrupt identification register (Read). */ #define UART010_ICR 0x1C /* Interrupt clear register (Write). */ #define ST_UART011_LCRH_RX 0x1C /* Rx line control register. */ #define UART01x_ILPR 0x20 /* IrDA low power counter register. */ diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h new file mode 100644 index 000000000000..9a193b84238a --- /dev/null +++ b/include/linux/assoc_array.h @@ -0,0 +1,92 @@ +/* Generic associative array implementation. + * + * See Documentation/assoc_array.txt for information. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_ASSOC_ARRAY_H +#define _LINUX_ASSOC_ARRAY_H + +#ifdef CONFIG_ASSOCIATIVE_ARRAY + +#include <linux/types.h> + +#define ASSOC_ARRAY_KEY_CHUNK_SIZE BITS_PER_LONG /* Key data retrieved in chunks of this size */ + +/* + * Generic associative array. + */ +struct assoc_array { + struct assoc_array_ptr *root; /* The node at the root of the tree */ + unsigned long nr_leaves_on_tree; +}; + +/* + * Operations on objects and index keys for use by array manipulation routines. + */ +struct assoc_array_ops { + /* Method to get a chunk of an index key from caller-supplied data */ + unsigned long (*get_key_chunk)(const void *index_key, int level); + + /* Method to get a piece of an object's index key */ + unsigned long (*get_object_key_chunk)(const void *object, int level); + + /* Is this the object we're looking for? */ + bool (*compare_object)(const void *object, const void *index_key); + + /* How different are two objects, to a bit position in their keys? (or + * -1 if they're the same) + */ + int (*diff_objects)(const void *a, const void *b); + + /* Method to free an object. */ + void (*free_object)(void *object); +}; + +/* + * Access and manipulation functions. + */ +struct assoc_array_edit; + +static inline void assoc_array_init(struct assoc_array *array) +{ + array->root = NULL; + array->nr_leaves_on_tree = 0; +} + +extern int assoc_array_iterate(const struct assoc_array *array, + int (*iterator)(const void *object, + void *iterator_data), + void *iterator_data); +extern void *assoc_array_find(const struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key); +extern void assoc_array_destroy(struct assoc_array *array, + const struct assoc_array_ops *ops); +extern struct assoc_array_edit *assoc_array_insert(struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key, + void *object); +extern void assoc_array_insert_set_object(struct assoc_array_edit *edit, + void *object); +extern struct assoc_array_edit *assoc_array_delete(struct assoc_array *array, + const struct assoc_array_ops *ops, + const void *index_key); +extern struct assoc_array_edit *assoc_array_clear(struct assoc_array *array, + const struct assoc_array_ops *ops); +extern void assoc_array_apply_edit(struct assoc_array_edit *edit); +extern void assoc_array_cancel_edit(struct assoc_array_edit *edit); +extern int assoc_array_gc(struct assoc_array *array, + const struct assoc_array_ops *ops, + bool (*iterator)(void *object, void *iterator_data), + void *iterator_data); + +#endif /* CONFIG_ASSOCIATIVE_ARRAY */ +#endif /* _LINUX_ASSOC_ARRAY_H */ diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h new file mode 100644 index 000000000000..711275e6681c --- /dev/null +++ b/include/linux/assoc_array_priv.h @@ -0,0 +1,182 @@ +/* Private definitions for the generic associative array implementation. + * + * See Documentation/assoc_array.txt for information. + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_ASSOC_ARRAY_PRIV_H +#define _LINUX_ASSOC_ARRAY_PRIV_H + +#ifdef CONFIG_ASSOCIATIVE_ARRAY + +#include <linux/assoc_array.h> + +#define ASSOC_ARRAY_FAN_OUT 16 /* Number of slots per node */ +#define ASSOC_ARRAY_FAN_MASK (ASSOC_ARRAY_FAN_OUT - 1) +#define ASSOC_ARRAY_LEVEL_STEP (ilog2(ASSOC_ARRAY_FAN_OUT)) +#define ASSOC_ARRAY_LEVEL_STEP_MASK (ASSOC_ARRAY_LEVEL_STEP - 1) +#define ASSOC_ARRAY_KEY_CHUNK_MASK (ASSOC_ARRAY_KEY_CHUNK_SIZE - 1) +#define ASSOC_ARRAY_KEY_CHUNK_SHIFT (ilog2(BITS_PER_LONG)) + +/* + * Undefined type representing a pointer with type information in the bottom + * two bits. + */ +struct assoc_array_ptr; + +/* + * An N-way node in the tree. + * + * Each slot contains one of four things: + * + * (1) Nothing (NULL). + * + * (2) A leaf object (pointer types 0). + * + * (3) A next-level node (pointer type 1, subtype 0). + * + * (4) A shortcut (pointer type 1, subtype 1). + * + * The tree is optimised for search-by-ID, but permits reasonable iteration + * also. + * + * The tree is navigated by constructing an index key consisting of an array of + * segments, where each segment is ilog2(ASSOC_ARRAY_FAN_OUT) bits in size. + * + * The segments correspond to levels of the tree (the first segment is used at + * level 0, the second at level 1, etc.). + */ +struct assoc_array_node { + struct assoc_array_ptr *back_pointer; + u8 parent_slot; + struct assoc_array_ptr *slots[ASSOC_ARRAY_FAN_OUT]; + unsigned long nr_leaves_on_branch; +}; + +/* + * A shortcut through the index space out to where a collection of nodes/leaves + * with the same IDs live. + */ +struct assoc_array_shortcut { + struct assoc_array_ptr *back_pointer; + int parent_slot; + int skip_to_level; + struct assoc_array_ptr *next_node; + unsigned long index_key[]; +}; + +/* + * Preallocation cache. + */ +struct assoc_array_edit { + struct rcu_head rcu; + struct assoc_array *array; + const struct assoc_array_ops *ops; + const struct assoc_array_ops *ops_for_excised_subtree; + struct assoc_array_ptr *leaf; + struct assoc_array_ptr **leaf_p; + struct assoc_array_ptr *dead_leaf; + struct assoc_array_ptr *new_meta[3]; + struct assoc_array_ptr *excised_meta[1]; + struct assoc_array_ptr *excised_subtree; + struct assoc_array_ptr **set_backpointers[ASSOC_ARRAY_FAN_OUT]; + struct assoc_array_ptr *set_backpointers_to; + struct assoc_array_node *adjust_count_on; + long adjust_count_by; + struct { + struct assoc_array_ptr **ptr; + struct assoc_array_ptr *to; + } set[2]; + struct { + u8 *p; + u8 to; + } set_parent_slot[1]; + u8 segment_cache[ASSOC_ARRAY_FAN_OUT + 1]; +}; + +/* + * Internal tree member pointers are marked in the bottom one or two bits to + * indicate what type they are so that we don't have to look behind every + * pointer to see what it points to. + * + * We provide functions to test type annotations and to create and translate + * the annotated pointers. + */ +#define ASSOC_ARRAY_PTR_TYPE_MASK 0x1UL +#define ASSOC_ARRAY_PTR_LEAF_TYPE 0x0UL /* Points to leaf (or nowhere) */ +#define ASSOC_ARRAY_PTR_META_TYPE 0x1UL /* Points to node or shortcut */ +#define ASSOC_ARRAY_PTR_SUBTYPE_MASK 0x2UL +#define ASSOC_ARRAY_PTR_NODE_SUBTYPE 0x0UL +#define ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE 0x2UL + +static inline bool assoc_array_ptr_is_meta(const struct assoc_array_ptr *x) +{ + return (unsigned long)x & ASSOC_ARRAY_PTR_TYPE_MASK; +} +static inline bool assoc_array_ptr_is_leaf(const struct assoc_array_ptr *x) +{ + return !assoc_array_ptr_is_meta(x); +} +static inline bool assoc_array_ptr_is_shortcut(const struct assoc_array_ptr *x) +{ + return (unsigned long)x & ASSOC_ARRAY_PTR_SUBTYPE_MASK; +} +static inline bool assoc_array_ptr_is_node(const struct assoc_array_ptr *x) +{ + return !assoc_array_ptr_is_shortcut(x); +} + +static inline void *assoc_array_ptr_to_leaf(const struct assoc_array_ptr *x) +{ + return (void *)((unsigned long)x & ~ASSOC_ARRAY_PTR_TYPE_MASK); +} + +static inline +unsigned long __assoc_array_ptr_to_meta(const struct assoc_array_ptr *x) +{ + return (unsigned long)x & + ~(ASSOC_ARRAY_PTR_SUBTYPE_MASK | ASSOC_ARRAY_PTR_TYPE_MASK); +} +static inline +struct assoc_array_node *assoc_array_ptr_to_node(const struct assoc_array_ptr *x) +{ + return (struct assoc_array_node *)__assoc_array_ptr_to_meta(x); +} +static inline +struct assoc_array_shortcut *assoc_array_ptr_to_shortcut(const struct assoc_array_ptr *x) +{ + return (struct assoc_array_shortcut *)__assoc_array_ptr_to_meta(x); +} + +static inline +struct assoc_array_ptr *__assoc_array_x_to_ptr(const void *p, unsigned long t) +{ + return (struct assoc_array_ptr *)((unsigned long)p | t); +} +static inline +struct assoc_array_ptr *assoc_array_leaf_to_ptr(const void *p) +{ + return __assoc_array_x_to_ptr(p, ASSOC_ARRAY_PTR_LEAF_TYPE); +} +static inline +struct assoc_array_ptr *assoc_array_node_to_ptr(const struct assoc_array_node *p) +{ + return __assoc_array_x_to_ptr( + p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_NODE_SUBTYPE); +} +static inline +struct assoc_array_ptr *assoc_array_shortcut_to_ptr(const struct assoc_array_shortcut *p) +{ + return __assoc_array_x_to_ptr( + p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE); +} + +#endif /* CONFIG_ASSOCIATIVE_ARRAY */ +#endif /* _LINUX_ASSOC_ARRAY_PRIV_H */ diff --git a/include/linux/audit.h b/include/linux/audit.h index 729a4d165bcc..a40641954c29 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -73,6 +73,8 @@ struct audit_field { void *lsm_rule; }; +extern int is_audit_feature_set(int which); + extern int __init audit_register_class(int class, unsigned *list); extern int audit_classify_syscall(int abi, unsigned syscall); extern int audit_classify_arch(int arch); @@ -207,7 +209,7 @@ static inline int audit_get_sessionid(struct task_struct *tsk) extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); -extern int __audit_bprm(struct linux_binprm *bprm); +extern void __audit_bprm(struct linux_binprm *bprm); extern int __audit_socketcall(int nargs, unsigned long *args); extern int __audit_sockaddr(int len, void *addr); extern void __audit_fd_pair(int fd1, int fd2); @@ -236,11 +238,10 @@ static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid if (unlikely(!audit_dummy_context())) __audit_ipc_set_perm(qbytes, uid, gid, mode); } -static inline int audit_bprm(struct linux_binprm *bprm) +static inline void audit_bprm(struct linux_binprm *bprm) { if (unlikely(!audit_dummy_context())) - return __audit_bprm(bprm); - return 0; + __audit_bprm(bprm); } static inline int audit_socketcall(int nargs, unsigned long *args) { @@ -367,10 +368,8 @@ static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) { } -static inline int audit_bprm(struct linux_binprm *bprm) -{ - return 0; -} +static inline void audit_bprm(struct linux_binprm *bprm) +{ } static inline int audit_socketcall(int nargs, unsigned long *args) { return 0; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f26ec20f6354..1b135d49b279 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -505,6 +505,9 @@ struct request_queue { (1 << QUEUE_FLAG_SAME_COMP) | \ (1 << QUEUE_FLAG_ADD_RANDOM)) +#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ + (1 << QUEUE_FLAG_SAME_COMP)) + static inline void queue_lockdep_assert_held(struct request_queue *q) { if (q->queue_lock) diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h index 98e892ef6d5a..a0f9280421ec 100644 --- a/include/linux/cmdline-parser.h +++ b/include/linux/cmdline-parser.h @@ -8,6 +8,8 @@ #define CMDLINEPARSEH #include <linux/blkdev.h> +#include <linux/fs.h> +#include <linux/slab.h> /* partition flags */ #define PF_RDONLY 0x01 /* Device is read only */ diff --git a/include/linux/completion.h b/include/linux/completion.h index 22c33e35bcb2..5d5aaae3af43 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -19,8 +19,8 @@ * * See also: complete(), wait_for_completion() (and friends _timeout, * _interruptible, _interruptible_timeout, and _killable), init_completion(), - * and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and - * INIT_COMPLETION(). + * reinit_completion(), and macros DECLARE_COMPLETION(), + * DECLARE_COMPLETION_ONSTACK(). */ struct completion { unsigned int done; @@ -65,7 +65,7 @@ struct completion { /** * init_completion - Initialize a dynamically allocated completion - * @x: completion structure that is to be initialized + * @x: pointer to completion structure that is to be initialized * * This inline function will initialize a dynamically created completion * structure. @@ -76,6 +76,18 @@ static inline void init_completion(struct completion *x) init_waitqueue_head(&x->wait); } +/** + * reinit_completion - reinitialize a completion structure + * @x: pointer to completion structure that is to be reinitialized + * + * This inline function should be used to reinitialize a completion structure so it can + * be reused. This is especially important after complete_all() is used. + */ +static inline void reinit_completion(struct completion *x) +{ + x->done = 0; +} + extern void wait_for_completion(struct completion *); extern void wait_for_completion_io(struct completion *); extern int wait_for_completion_interruptible(struct completion *x); @@ -94,14 +106,4 @@ extern bool completion_done(struct completion *x); extern void complete(struct completion *); extern void complete_all(struct completion *); -/** - * INIT_COMPLETION - reinitialize a completion structure - * @x: completion structure to be reinitialized - * - * This macro should be used to reinitialize a completion structure so it can - * be reused. This is especially important after complete_all() is used. - */ -#define INIT_COMPLETION(x) ((x).done = 0) - - #endif diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 5bd6ab9b0c27..dc196bbcf227 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -107,8 +107,16 @@ struct cpufreq_policy { #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ +#ifdef CONFIG_CPU_FREQ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); void cpufreq_cpu_put(struct cpufreq_policy *policy); +#else +static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) +{ + return NULL; +} +static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { } +#endif static inline bool policy_is_shared(struct cpufreq_policy *policy) { diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 7a7cc74d7f27..d48dc00232a4 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -168,7 +168,7 @@ struct devfreq { unsigned long max_freq; bool stop_polling; - /* information for device freqeuncy transition */ + /* information for device frequency transition */ unsigned int total_trans; unsigned int *trans_table; unsigned long *time_in_state; diff --git a/include/linux/device.h b/include/linux/device.h index b025925df7f7..952b01033c32 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -644,9 +644,11 @@ struct device_dma_parameters { unsigned long segment_boundary_mask; }; +struct acpi_device; + struct acpi_dev_node { #ifdef CONFIG_ACPI - void *handle; + struct acpi_device *companion; #endif }; @@ -790,14 +792,6 @@ static inline struct device *kobj_to_dev(struct kobject *kobj) return container_of(kobj, struct device, kobj); } -#ifdef CONFIG_ACPI -#define ACPI_HANDLE(dev) ((dev)->acpi_node.handle) -#define ACPI_HANDLE_SET(dev, _handle_) (dev)->acpi_node.handle = (_handle_) -#else -#define ACPI_HANDLE(dev) (NULL) -#define ACPI_HANDLE_SET(dev, _handle_) do { } while (0) -#endif - /* Get the wakeup routines, which depend on struct device */ #include <linux/pm_wakeup.h> diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 0bc727534108..41cf0c399288 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie) /** * enum dma_status - DMA transaction status - * @DMA_SUCCESS: transaction completed successfully + * @DMA_COMPLETE: transaction completed * @DMA_IN_PROGRESS: transaction not yet processed * @DMA_PAUSED: transaction is paused * @DMA_ERROR: transaction failed */ enum dma_status { - DMA_SUCCESS, + DMA_COMPLETE, DMA_IN_PROGRESS, DMA_PAUSED, DMA_ERROR, @@ -171,12 +171,6 @@ struct dma_interleaved_template { * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client * acknowledges receipt, i.e. has has a chance to establish any dependency * chains - * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) - * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) - * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single - * (if not set, do the source dma-unmapping as page) - * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single - * (if not set, do the destination dma-unmapping as page) * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as @@ -188,14 +182,10 @@ struct dma_interleaved_template { enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), DMA_CTRL_ACK = (1 << 1), - DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), - DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), - DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), - DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), - DMA_PREP_PQ_DISABLE_P = (1 << 6), - DMA_PREP_PQ_DISABLE_Q = (1 << 7), - DMA_PREP_CONTINUE = (1 << 8), - DMA_PREP_FENCE = (1 << 9), + DMA_PREP_PQ_DISABLE_P = (1 << 2), + DMA_PREP_PQ_DISABLE_Q = (1 << 3), + DMA_PREP_CONTINUE = (1 << 4), + DMA_PREP_FENCE = (1 << 5), }; /** @@ -413,6 +403,17 @@ void dma_chan_cleanup(struct kref *kref); typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); typedef void (*dma_async_tx_callback)(void *dma_async_param); + +struct dmaengine_unmap_data { + u8 to_cnt; + u8 from_cnt; + u8 bidi_cnt; + struct device *dev; + struct kref kref; + size_t len; + dma_addr_t addr[0]; +}; + /** * struct dma_async_tx_descriptor - async transaction descriptor * ---dma generic offload fields--- @@ -438,6 +439,7 @@ struct dma_async_tx_descriptor { dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_async_tx_callback callback; void *callback_param; + struct dmaengine_unmap_data *unmap; #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent; @@ -445,6 +447,40 @@ struct dma_async_tx_descriptor { #endif }; +#ifdef CONFIG_DMA_ENGINE +static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, + struct dmaengine_unmap_data *unmap) +{ + kref_get(&unmap->kref); + tx->unmap = unmap; +} + +struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); +void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); +#else +static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, + struct dmaengine_unmap_data *unmap) +{ +} +static inline struct dmaengine_unmap_data * +dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) +{ + return NULL; +} +static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) +{ +} +#endif + +static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) +{ + if (tx->unmap) { + dmaengine_unmap_put(tx->unmap); + tx->unmap = NULL; + } +} + #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH static inline void txd_lock(struct dma_async_tx_descriptor *txd) { @@ -979,10 +1015,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, { if (last_complete <= last_used) { if ((cookie <= last_complete) || (cookie > last_used)) - return DMA_SUCCESS; + return DMA_COMPLETE; } else { if ((cookie <= last_complete) && (cookie > last_used)) - return DMA_SUCCESS; + return DMA_COMPLETE; } return DMA_IN_PROGRESS; } @@ -1013,11 +1049,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ } static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) { - return DMA_SUCCESS; + return DMA_COMPLETE; } static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) { - return DMA_SUCCESS; + return DMA_COMPLETE; } static inline void dma_issue_pending_all(void) { diff --git a/include/linux/export.h b/include/linux/export.h index 412cd509effe..3f2793d51899 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -43,7 +43,7 @@ extern struct module __this_module; /* Mark the CRC weak since genksyms apparently decides not to * generate a checksums for some symbols */ #define __CRC_SYMBOL(sym, sec) \ - extern void *__crc_##sym __attribute__((weak)); \ + extern __visible void *__crc_##sym __attribute__((weak)); \ static const unsigned long __kcrctab_##sym \ __used \ __attribute__((section("___kcrctab" sec "+" #sym), unused)) \ @@ -59,7 +59,7 @@ extern struct module __this_module; static const char __kstrtab_##sym[] \ __attribute__((section("__ksymtab_strings"), aligned(1))) \ = VMLINUX_SYMBOL_STR(sym); \ - static const struct kernel_symbol __ksymtab_##sym \ + __visible const struct kernel_symbol __ksymtab_##sym \ __used \ __attribute__((section("___ksymtab" sec "+" #sym), unused)) \ = { (unsigned long)&sym, __kstrtab_##sym } diff --git a/include/linux/fs.h b/include/linux/fs.h index bf5d574ebdf4..121f11f001c0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2622,7 +2622,9 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping, extern int simple_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); +extern int always_delete_dentry(const struct dentry *); extern struct inode *alloc_anon_inode(struct super_block *); +extern const struct dentry_operations simple_dentry_operations; extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9f15c0064c50..31ea4b428360 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -533,11 +533,11 @@ static inline int ftrace_force_update(void) { return 0; } static inline void ftrace_disable_daemon(void) { } static inline void ftrace_enable_daemon(void) { } static inline void ftrace_release_mod(struct module *mod) {} -static inline int register_ftrace_command(struct ftrace_func_command *cmd) +static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) { return -EINVAL; } -static inline int unregister_ftrace_command(char *cmd_name) +static inline __init int unregister_ftrace_command(char *cmd_name) { return -EINVAL; } @@ -721,6 +721,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, extern char __irqentry_text_start[]; extern char __irqentry_text_end[]; +#define FTRACE_NOTRACE_DEPTH 65536 #define FTRACE_RETFUNC_DEPTH 50 #define FTRACE_RETSTACK_ALLOC_SIZE 32 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 5eaa746735ff..9abbe630c456 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -202,6 +202,7 @@ enum { TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT, TRACE_EVENT_FL_WAS_ENABLED_BIT, + TRACE_EVENT_FL_USE_CALL_FILTER_BIT, }; /* @@ -213,6 +214,7 @@ enum { * WAS_ENABLED - Set and stays set when an event was ever enabled * (used for module unloading, if a module event is enabled, * it is best to clear the buffers that used it). + * USE_CALL_FILTER - For ftrace internal events, don't use file filter */ enum { TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), @@ -220,6 +222,7 @@ enum { TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT), + TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT), }; struct ftrace_event_call { @@ -238,6 +241,7 @@ struct ftrace_event_call { * bit 2: failed to apply filter * bit 3: ftrace internal event (do not enable) * bit 4: Event was enabled by module + * bit 5: use call filter rather than file filter */ int flags; /* static flags of different events */ @@ -253,6 +257,8 @@ struct ftrace_subsystem_dir; enum { FTRACE_EVENT_FL_ENABLED_BIT, FTRACE_EVENT_FL_RECORDED_CMD_BIT, + FTRACE_EVENT_FL_FILTERED_BIT, + FTRACE_EVENT_FL_NO_SET_FILTER_BIT, FTRACE_EVENT_FL_SOFT_MODE_BIT, FTRACE_EVENT_FL_SOFT_DISABLED_BIT, }; @@ -261,6 +267,8 @@ enum { * Ftrace event file flags: * ENABLED - The event is enabled * RECORDED_CMD - The comms should be recorded at sched_switch + * FILTERED - The event has a filter attached + * NO_SET_FILTER - Set when filter has error and is to be ignored * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED * SOFT_DISABLED - When set, do not trace the event (even though its * tracepoint may be enabled) @@ -268,6 +276,8 @@ enum { enum { FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT), FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT), + FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT), + FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT), FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT), FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT), }; @@ -275,6 +285,7 @@ enum { struct ftrace_event_file { struct list_head list; struct ftrace_event_call *event_call; + struct event_filter *filter; struct dentry *dir; struct trace_array *tr; struct ftrace_subsystem_dir *system; @@ -310,12 +321,16 @@ struct ftrace_event_file { #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ -extern void destroy_preds(struct ftrace_event_call *call); +extern void destroy_preds(struct ftrace_event_file *file); +extern void destroy_call_preds(struct ftrace_event_call *call); extern int filter_match_preds(struct event_filter *filter, void *rec); -extern int filter_current_check_discard(struct ring_buffer *buffer, - struct ftrace_event_call *call, - void *rec, - struct ring_buffer_event *event); + +extern int filter_check_discard(struct ftrace_event_file *file, void *rec, + struct ring_buffer *buffer, + struct ring_buffer_event *event); +extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec, + struct ring_buffer *buffer, + struct ring_buffer_event *event); enum { FILTER_OTHER = 0, diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h index 023bc346b877..c0894dd8827b 100644 --- a/include/linux/genl_magic_func.h +++ b/include/linux/genl_magic_func.h @@ -273,49 +273,40 @@ static struct genl_family ZZZ_genl_family __read_mostly = { * Magic: define multicast groups * Magic: define multicast group registration helper */ +#define ZZZ_genl_mcgrps CONCAT_(GENL_MAGIC_FAMILY, _genl_mcgrps) +static const struct genl_multicast_group ZZZ_genl_mcgrps[] = { +#undef GENL_mc_group +#define GENL_mc_group(group) { .name = #group, }, +#include GENL_MAGIC_INCLUDE_FILE +}; + +enum CONCAT_(GENL_MAGIC_FAMILY, group_ids) { +#undef GENL_mc_group +#define GENL_mc_group(group) CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group), +#include GENL_MAGIC_INCLUDE_FILE +}; + #undef GENL_mc_group #define GENL_mc_group(group) \ -static struct genl_multicast_group \ -CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group) __read_mostly = { \ - .name = #group, \ -}; \ static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \ struct sk_buff *skb, gfp_t flags) \ { \ unsigned int group_id = \ - CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group).id; \ - if (!group_id) \ - return -EINVAL; \ - return genlmsg_multicast(skb, 0, group_id, flags); \ + CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group); \ + return genlmsg_multicast(&ZZZ_genl_family, skb, 0, \ + group_id, flags); \ } #include GENL_MAGIC_INCLUDE_FILE -int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void) -{ - int err = genl_register_family_with_ops(&ZZZ_genl_family, - ZZZ_genl_ops, ARRAY_SIZE(ZZZ_genl_ops)); - if (err) - return err; -#undef GENL_mc_group -#define GENL_mc_group(group) \ - err = genl_register_mc_group(&ZZZ_genl_family, \ - &CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group)); \ - if (err) \ - goto fail; \ - else \ - pr_info("%s: mcg %s: %u\n", #group, \ - __stringify(GENL_MAGIC_FAMILY), \ - CONCAT_(GENL_MAGIC_FAMILY, _mcg_ ## group).id); - -#include GENL_MAGIC_INCLUDE_FILE - #undef GENL_mc_group #define GENL_mc_group(group) - return 0; -fail: - genl_unregister_family(&ZZZ_genl_family); - return err; + +int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void) +{ + return genl_register_family_with_ops_groups(&ZZZ_genl_family, \ + ZZZ_genl_ops, \ + ZZZ_genl_mcgrps); } void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void) diff --git a/include/linux/host1x.h b/include/linux/host1x.h new file mode 100644 index 000000000000..f5b9b87ac9a9 --- /dev/null +++ b/include/linux/host1x.h @@ -0,0 +1,284 @@ +/* + * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef __LINUX_HOST1X_H +#define __LINUX_HOST1X_H + +#include <linux/device.h> +#include <linux/types.h> + +enum host1x_class { + HOST1X_CLASS_HOST1X = 0x1, + HOST1X_CLASS_GR2D = 0x51, + HOST1X_CLASS_GR2D_SB = 0x52, + HOST1X_CLASS_GR3D = 0x60, +}; + +struct host1x_client; + +struct host1x_client_ops { + int (*init)(struct host1x_client *client); + int (*exit)(struct host1x_client *client); +}; + +struct host1x_client { + struct list_head list; + struct device *parent; + struct device *dev; + + const struct host1x_client_ops *ops; + + enum host1x_class class; + struct host1x_channel *channel; + + struct host1x_syncpt **syncpts; + unsigned int num_syncpts; +}; + +/* + * host1x buffer objects + */ + +struct host1x_bo; +struct sg_table; + +struct host1x_bo_ops { + struct host1x_bo *(*get)(struct host1x_bo *bo); + void (*put)(struct host1x_bo *bo); + dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt); + void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt); + void *(*mmap)(struct host1x_bo *bo); + void (*munmap)(struct host1x_bo *bo, void *addr); + void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum); + void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr); +}; + +struct host1x_bo { + const struct host1x_bo_ops *ops; +}; + +static inline void host1x_bo_init(struct host1x_bo *bo, + const struct host1x_bo_ops *ops) +{ + bo->ops = ops; +} + +static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo) +{ + return bo->ops->get(bo); +} + +static inline void host1x_bo_put(struct host1x_bo *bo) +{ + bo->ops->put(bo); +} + +static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo, + struct sg_table **sgt) +{ + return bo->ops->pin(bo, sgt); +} + +static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) +{ + bo->ops->unpin(bo, sgt); +} + +static inline void *host1x_bo_mmap(struct host1x_bo *bo) +{ + return bo->ops->mmap(bo); +} + +static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr) +{ + bo->ops->munmap(bo, addr); +} + +static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum) +{ + return bo->ops->kmap(bo, pagenum); +} + +static inline void host1x_bo_kunmap(struct host1x_bo *bo, + unsigned int pagenum, void *addr) +{ + bo->ops->kunmap(bo, pagenum, addr); +} + +/* + * host1x syncpoints + */ + +#define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0) +#define HOST1X_SYNCPT_HAS_BASE (1 << 1) + +struct host1x_syncpt_base; +struct host1x_syncpt; +struct host1x; + +struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id); +u32 host1x_syncpt_id(struct host1x_syncpt *sp); +u32 host1x_syncpt_read_min(struct host1x_syncpt *sp); +u32 host1x_syncpt_read_max(struct host1x_syncpt *sp); +int host1x_syncpt_incr(struct host1x_syncpt *sp); +int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, + u32 *value); +struct host1x_syncpt *host1x_syncpt_request(struct device *dev, + unsigned long flags); +void host1x_syncpt_free(struct host1x_syncpt *sp); + +struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp); +u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base); + +/* + * host1x channel + */ + +struct host1x_channel; +struct host1x_job; + +struct host1x_channel *host1x_channel_request(struct device *dev); +void host1x_channel_free(struct host1x_channel *channel); +struct host1x_channel *host1x_channel_get(struct host1x_channel *channel); +void host1x_channel_put(struct host1x_channel *channel); +int host1x_job_submit(struct host1x_job *job); + +/* + * host1x job + */ + +struct host1x_reloc { + struct host1x_bo *cmdbuf; + u32 cmdbuf_offset; + struct host1x_bo *target; + u32 target_offset; + u32 shift; + u32 pad; +}; + +struct host1x_job { + /* When refcount goes to zero, job can be freed */ + struct kref ref; + + /* List entry */ + struct list_head list; + + /* Channel where job is submitted to */ + struct host1x_channel *channel; + + u32 client; + + /* Gathers and their memory */ + struct host1x_job_gather *gathers; + unsigned int num_gathers; + + /* Wait checks to be processed at submit time */ + struct host1x_waitchk *waitchk; + unsigned int num_waitchk; + u32 waitchk_mask; + + /* Array of handles to be pinned & unpinned */ + struct host1x_reloc *relocarray; + unsigned int num_relocs; + struct host1x_job_unpin_data *unpins; + unsigned int num_unpins; + + dma_addr_t *addr_phys; + dma_addr_t *gather_addr_phys; + dma_addr_t *reloc_addr_phys; + + /* Sync point id, number of increments and end related to the submit */ + u32 syncpt_id; + u32 syncpt_incrs; + u32 syncpt_end; + + /* Maximum time to wait for this job */ + unsigned int timeout; + + /* Index and number of slots used in the push buffer */ + unsigned int first_get; + unsigned int num_slots; + + /* Copy of gathers */ + size_t gather_copy_size; + dma_addr_t gather_copy; + u8 *gather_copy_mapped; + + /* Check if register is marked as an address reg */ + int (*is_addr_reg)(struct device *dev, u32 reg, u32 class); + + /* Request a SETCLASS to this class */ + u32 class; + + /* Add a channel wait for previous ops to complete */ + bool serialize; +}; + +struct host1x_job *host1x_job_alloc(struct host1x_channel *ch, + u32 num_cmdbufs, u32 num_relocs, + u32 num_waitchks); +void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id, + u32 words, u32 offset); +struct host1x_job *host1x_job_get(struct host1x_job *job); +void host1x_job_put(struct host1x_job *job); +int host1x_job_pin(struct host1x_job *job, struct device *dev); +void host1x_job_unpin(struct host1x_job *job); + +/* + * subdevice probe infrastructure + */ + +struct host1x_device; + +struct host1x_driver { + const struct of_device_id *subdevs; + struct list_head list; + const char *name; + + int (*probe)(struct host1x_device *device); + int (*remove)(struct host1x_device *device); +}; + +int host1x_driver_register(struct host1x_driver *driver); +void host1x_driver_unregister(struct host1x_driver *driver); + +struct host1x_device { + struct host1x_driver *driver; + struct list_head list; + struct device dev; + + struct mutex subdevs_lock; + struct list_head subdevs; + struct list_head active; + + struct mutex clients_lock; + struct list_head clients; +}; + +static inline struct host1x_device *to_host1x_device(struct device *dev) +{ + return container_of(dev, struct host1x_device, dev); +} + +int host1x_device_init(struct host1x_device *device); +int host1x_device_exit(struct host1x_device *device); + +int host1x_client_register(struct host1x_client *client); +int host1x_client_unregister(struct host1x_client *client); + +#endif diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 3935428c57cf..91672e2deec3 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -54,7 +54,8 @@ enum page_check_address_pmd_flag { extern pmd_t *page_check_address_pmd(struct page *page, struct mm_struct *mm, unsigned long address, - enum page_check_address_pmd_flag flag); + enum page_check_address_pmd_flag flag, + spinlock_t **ptl); #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) @@ -129,15 +130,15 @@ extern void __vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next); -extern int __pmd_trans_huge_lock(pmd_t *pmd, - struct vm_area_struct *vma); +extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, + spinlock_t **ptl); /* mmap_sem must be held on entry */ -static inline int pmd_trans_huge_lock(pmd_t *pmd, - struct vm_area_struct *vma) +static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, + spinlock_t **ptl) { VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); if (pmd_trans_huge(*pmd)) - return __pmd_trans_huge_lock(pmd, vma); + return __pmd_trans_huge_lock(pmd, vma, ptl); else return 0; } @@ -215,8 +216,8 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, long adjust_next) { } -static inline int pmd_trans_huge_lock(pmd_t *pmd, - struct vm_area_struct *vma) +static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, + spinlock_t **ptl) { return 0; } diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 0393270466c3..9649ff0c63f8 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -31,6 +31,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); void hugepage_put_subpool(struct hugepage_subpool *spool); int PageHuge(struct page *page); +int PageHeadHuge(struct page *page_head); void reset_vma_resv_huge_pages(struct vm_area_struct *vma); int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); @@ -69,7 +70,6 @@ int dequeue_hwpoisoned_huge_page(struct page *page); bool isolate_huge_page(struct page *page, struct list_head *list); void putback_active_hugepage(struct page *page); bool is_hugepage_active(struct page *page); -void copy_huge_page(struct page *dst, struct page *src); #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); @@ -104,6 +104,11 @@ static inline int PageHuge(struct page *page) return 0; } +static inline int PageHeadHuge(struct page *page_head) +{ + return 0; +} + static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) { } @@ -140,9 +145,6 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page) #define isolate_huge_page(p, l) false #define putback_active_hugepage(p) do {} while (0) #define is_hugepage_active(x) false -static inline void copy_huge_page(struct page *dst, struct page *src) -{ -} static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) @@ -392,6 +394,15 @@ static inline int hugepage_migration_support(struct hstate *h) return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT); } +static inline spinlock_t *huge_pte_lockptr(struct hstate *h, + struct mm_struct *mm, pte_t *pte) +{ + if (huge_page_size(h) == PMD_SIZE) + return pmd_lockptr(mm, (pmd_t *) pte); + VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); + return &mm->page_table_lock; +} + #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; #define alloc_huge_page_node(h, nid) NULL @@ -401,6 +412,7 @@ struct hstate {}; #define hstate_sizelog(s) NULL #define hstate_vma(v) NULL #define hstate_inode(i) NULL +#define page_hstate(page) NULL #define huge_page_size(h) PAGE_SIZE #define huge_page_mask(h) PAGE_MASK #define vma_kernel_pagesize(v) PAGE_SIZE @@ -421,6 +433,22 @@ static inline pgoff_t basepage_index(struct page *page) #define dissolve_free_huge_pages(s, e) do {} while (0) #define pmd_huge_support() 0 #define hugepage_migration_support(h) 0 + +static inline spinlock_t *huge_pte_lockptr(struct hstate *h, + struct mm_struct *mm, pte_t *pte) +{ + return &mm->page_table_lock; +} #endif /* CONFIG_HUGETLB_PAGE */ +static inline spinlock_t *huge_pte_lock(struct hstate *h, + struct mm_struct *mm, pte_t *pte) +{ + spinlock_t *ptl; + + ptl = huge_pte_lockptr(h, mm, pte); + spin_lock(ptl); + return ptl; +} + #endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 2ab11dc38077..eff50e062be8 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -205,7 +205,6 @@ struct i2c_driver { * @name: Indicates the type of the device, usually a chip name that's * generic enough to hide second-sourcing and compatible revisions. * @adapter: manages the bus segment hosting this I2C device - * @driver: device's driver, hence pointer to access routines * @dev: Driver model device node for the slave. * @irq: indicates the IRQ generated by this device (if any) * @detected: member of an i2c_driver.clients list or i2c-core's @@ -222,7 +221,6 @@ struct i2c_client { /* _LOWER_ 7 bits */ char name[I2C_NAME_SIZE]; struct i2c_adapter *adapter; /* the adapter we sit on */ - struct i2c_driver *driver; /* and our access routines */ struct device dev; /* the device structure */ int irq; /* irq issued by device */ struct list_head detected; diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index c2702856295e..84ba5ac39e03 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -119,4 +119,21 @@ extern int macvlan_link_register(struct rtnl_link_ops *ops); extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, struct net_device *dev); +#if IS_ENABLED(CONFIG_MACVLAN) +static inline struct net_device * +macvlan_dev_real_dev(const struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->lowerdev; +} +#else +static inline struct net_device * +macvlan_dev_real_dev(const struct net_device *dev) +{ + BUG(); + return NULL; +} +#endif + #endif /* _LINUX_IF_MACVLAN_H */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c9e831dc80bc..db43b58a3355 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -11,8 +11,6 @@ #include <linux/irqnr.h> #include <linux/hardirq.h> #include <linux/irqflags.h> -#include <linux/smp.h> -#include <linux/percpu.h> #include <linux/hrtimer.h> #include <linux/kref.h> #include <linux/workqueue.h> @@ -392,15 +390,6 @@ extern void __raise_softirq_irqoff(unsigned int nr); extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); -/* This is the worklist that queues up per-cpu softirq work. - * - * send_remote_sendirq() adds work to these lists, and - * the softirq handler itself dequeues from them. The queues - * are protected by disabling local cpu interrupts and they must - * only be accessed by the local cpu that they are for. - */ -DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); - DECLARE_PER_CPU(struct task_struct *, ksoftirqd); static inline struct task_struct *this_cpu_ksoftirqd(void) @@ -408,17 +397,6 @@ static inline struct task_struct *this_cpu_ksoftirqd(void) return this_cpu_read(ksoftirqd); } -/* Try to send a softirq to a remote cpu. If this cannot be done, the - * work will be queued to the local cpu. - */ -extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq); - -/* Like send_remote_softirq(), but the caller must disable local cpu interrupts - * and compute the current cpu, passed in as 'this_cpu'. - */ -extern void __send_remote_softirq(struct call_single_data *cp, int cpu, - int this_cpu, int softirq); - /* Tasklets --- multithreaded analogue of BHs. Main feature differing them of generic softirqs: tasklet diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 7ea319e95b47..a444c790fa72 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -22,6 +22,7 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/types.h> +#include <trace/events/iommu.h> #define IOMMU_READ (1) #define IOMMU_WRITE (2) @@ -227,6 +228,7 @@ static inline int report_iommu_fault(struct iommu_domain *domain, ret = domain->handler(domain, dev, iova, flags, domain->handler_token); + trace_io_page_fault(dev, iova, flags); return ret; } diff --git a/include/linux/irq.h b/include/linux/irq.h index 56bb0dc8b7d4..7dc10036eff5 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -70,6 +70,9 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data); * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context * IRQ_NESTED_TRHEAD - Interrupt nests into another thread * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable + * IRQ_IS_POLLED - Always polled by another interrupt. Exclude + * it from the spurious interrupt detection + * mechanism and from core side polling. */ enum { IRQ_TYPE_NONE = 0x00000000, @@ -94,12 +97,14 @@ enum { IRQ_NESTED_THREAD = (1 << 15), IRQ_NOTHREAD = (1 << 16), IRQ_PER_CPU_DEVID = (1 << 17), + IRQ_IS_POLLED = (1 << 18), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ - IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID) + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ + IRQ_IS_POLLED) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 672ddc4de4af..d4e98d13eff4 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -501,7 +501,6 @@ void tracing_snapshot_alloc(void); extern void tracing_start(void); extern void tracing_stop(void); -extern void ftrace_off_permanent(void); static inline __printf(1, 2) void ____trace_printk_check_format(const char *fmt, ...) @@ -639,7 +638,6 @@ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); #else static inline void tracing_start(void) { } static inline void tracing_stop(void) { } -static inline void ftrace_off_permanent(void) { } static inline void trace_dump_stack(int skip) { } static inline void tracing_on(void) { } diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 518a53afb9ea..a74c3a84dfdd 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -45,6 +45,7 @@ struct key_preparsed_payload { const void *data; /* Raw data */ size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ + bool trusted; /* True if key is trusted */ }; typedef int (*request_key_actor_t)(struct key_construction *key, @@ -63,6 +64,11 @@ struct key_type { */ size_t def_datalen; + /* Default key search algorithm. */ + unsigned def_lookup_type; +#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */ +#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */ + /* vet a description */ int (*vet_description)(const char *description); diff --git a/include/linux/key.h b/include/linux/key.h index 4dfde1161c5e..80d677483e31 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -22,6 +22,7 @@ #include <linux/sysctl.h> #include <linux/rwsem.h> #include <linux/atomic.h> +#include <linux/assoc_array.h> #ifdef __KERNEL__ #include <linux/uidgid.h> @@ -82,6 +83,12 @@ struct key_owner; struct keyring_list; struct keyring_name; +struct keyring_index_key { + struct key_type *type; + const char *description; + size_t desc_len; +}; + /*****************************************************************************/ /* * key reference with possession attribute handling @@ -99,7 +106,7 @@ struct keyring_name; typedef struct __key_reference_with_attributes *key_ref_t; static inline key_ref_t make_key_ref(const struct key *key, - unsigned long possession) + bool possession) { return (key_ref_t) ((unsigned long) key | possession); } @@ -109,7 +116,7 @@ static inline struct key *key_ref_to_ptr(const key_ref_t key_ref) return (struct key *) ((unsigned long) key_ref & ~1UL); } -static inline unsigned long is_key_possessed(const key_ref_t key_ref) +static inline bool is_key_possessed(const key_ref_t key_ref) { return (unsigned long) key_ref & 1UL; } @@ -129,7 +136,6 @@ struct key { struct list_head graveyard_link; struct rb_node serial_node; }; - struct key_type *type; /* type of key */ struct rw_semaphore sem; /* change vs change sem */ struct key_user *user; /* owner of this key */ void *security; /* security data for this key */ @@ -162,13 +168,21 @@ struct key { #define KEY_FLAG_NEGATIVE 5 /* set if key is negative */ #define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */ #define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */ +#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */ +#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */ - /* the description string - * - this is used to match a key against search criteria - * - this should be a printable string + /* the key type and key description string + * - the desc is used to match a key against search criteria + * - it should be a printable string * - eg: for krb5 AFS, this might be "afs@REDHAT.COM" */ - char *description; + union { + struct keyring_index_key index_key; + struct { + struct key_type *type; /* type of key */ + char *description; + }; + }; /* type specific data * - this is used by the keyring type to index the name @@ -185,11 +199,14 @@ struct key { * whatever */ union { - unsigned long value; - void __rcu *rcudata; - void *data; - struct keyring_list __rcu *subscriptions; - } payload; + union { + unsigned long value; + void __rcu *rcudata; + void *data; + void *data2[2]; + } payload; + struct assoc_array keys; + }; }; extern struct key *key_alloc(struct key_type *type, @@ -203,18 +220,23 @@ extern struct key *key_alloc(struct key_type *type, #define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */ #define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */ #define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ +#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */ extern void key_revoke(struct key *key); extern void key_invalidate(struct key *key); extern void key_put(struct key *key); -static inline struct key *key_get(struct key *key) +static inline struct key *__key_get(struct key *key) { - if (key) - atomic_inc(&key->usage); + atomic_inc(&key->usage); return key; } +static inline struct key *key_get(struct key *key) +{ + return key ? __key_get(key) : key; +} + static inline void key_ref_put(key_ref_t key_ref) { key_put(key_ref_to_ptr(key_ref)); diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 10308c6a3d1c..552d51efb429 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -1,7 +1,7 @@ /* * A generic kernel FIFO implementation * - * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net> + * Copyright (C) 2013 Stefani Seibold <stefani@seibold.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -67,9 +67,10 @@ struct __kfifo { union { \ struct __kfifo kfifo; \ datatype *type; \ + const datatype *const_type; \ char (*rectype)[recsize]; \ ptrtype *ptr; \ - const ptrtype *ptr_const; \ + ptrtype const *ptr_const; \ } #define __STRUCT_KFIFO(type, size, recsize, ptrtype) \ @@ -386,16 +387,12 @@ __kfifo_int_must_check_helper( \ #define kfifo_put(fifo, val) \ ({ \ typeof((fifo) + 1) __tmp = (fifo); \ - typeof((val) + 1) __val = (val); \ + typeof(*__tmp->const_type) __val = (val); \ unsigned int __ret; \ - const size_t __recsize = sizeof(*__tmp->rectype); \ + size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ - if (0) { \ - typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \ - __dummy = (typeof(__val))NULL; \ - } \ if (__recsize) \ - __ret = __kfifo_in_r(__kfifo, __val, sizeof(*__val), \ + __ret = __kfifo_in_r(__kfifo, &__val, sizeof(__val), \ __recsize); \ else { \ __ret = !kfifo_is_full(__tmp); \ @@ -404,7 +401,7 @@ __kfifo_int_must_check_helper( \ ((typeof(__tmp->type))__kfifo->data) : \ (__tmp->buf) \ )[__kfifo->in & __tmp->kfifo.mask] = \ - *(typeof(__tmp->type))__val; \ + (typeof(*__tmp->type))__val; \ smp_wmb(); \ __kfifo->in++; \ } \ @@ -415,7 +412,7 @@ __kfifo_int_must_check_helper( \ /** * kfifo_get - get data from the fifo * @fifo: address of the fifo to be used - * @val: the var where to store the data to be added + * @val: address where to store the data * * This macro reads the data from the fifo. * It returns 0 if the fifo was empty. Otherwise it returns the number @@ -428,12 +425,10 @@ __kfifo_int_must_check_helper( \ __kfifo_uint_must_check_helper( \ ({ \ typeof((fifo) + 1) __tmp = (fifo); \ - typeof((val) + 1) __val = (val); \ + typeof(__tmp->ptr) __val = (val); \ unsigned int __ret; \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ - if (0) \ - __val = (typeof(__tmp->ptr))0; \ if (__recsize) \ __ret = __kfifo_out_r(__kfifo, __val, sizeof(*__val), \ __recsize); \ @@ -456,7 +451,7 @@ __kfifo_uint_must_check_helper( \ /** * kfifo_peek - get data from the fifo without removing * @fifo: address of the fifo to be used - * @val: the var where to store the data to be added + * @val: address where to store the data * * This reads the data from the fifo without removing it from the fifo. * It returns 0 if the fifo was empty. Otherwise it returns the number @@ -469,12 +464,10 @@ __kfifo_uint_must_check_helper( \ __kfifo_uint_must_check_helper( \ ({ \ typeof((fifo) + 1) __tmp = (fifo); \ - typeof((val) + 1) __val = (val); \ + typeof(__tmp->ptr) __val = (val); \ unsigned int __ret; \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ - if (0) \ - __val = (typeof(__tmp->ptr))NULL; \ if (__recsize) \ __ret = __kfifo_out_peek_r(__kfifo, __val, sizeof(*__val), \ __recsize); \ @@ -508,14 +501,10 @@ __kfifo_uint_must_check_helper( \ #define kfifo_in(fifo, buf, n) \ ({ \ typeof((fifo) + 1) __tmp = (fifo); \ - typeof((buf) + 1) __buf = (buf); \ + typeof(__tmp->ptr_const) __buf = (buf); \ unsigned long __n = (n); \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ - if (0) { \ - typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \ - __dummy = (typeof(__buf))NULL; \ - } \ (__recsize) ?\ __kfifo_in_r(__kfifo, __buf, __n, __recsize) : \ __kfifo_in(__kfifo, __buf, __n); \ @@ -561,14 +550,10 @@ __kfifo_uint_must_check_helper( \ __kfifo_uint_must_check_helper( \ ({ \ typeof((fifo) + 1) __tmp = (fifo); \ - typeof((buf) + 1) __buf = (buf); \ + typeof(__tmp->ptr) __buf = (buf); \ unsigned long __n = (n); \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ - if (0) { \ - typeof(__tmp->ptr) __dummy = NULL; \ - __buf = __dummy; \ - } \ (__recsize) ?\ __kfifo_out_r(__kfifo, __buf, __n, __recsize) : \ __kfifo_out(__kfifo, __buf, __n); \ @@ -773,14 +758,10 @@ __kfifo_uint_must_check_helper( \ __kfifo_uint_must_check_helper( \ ({ \ typeof((fifo) + 1) __tmp = (fifo); \ - typeof((buf) + 1) __buf = (buf); \ + typeof(__tmp->ptr) __buf = (buf); \ unsigned long __n = (n); \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ - if (0) { \ - typeof(__tmp->ptr) __dummy __attribute__ ((unused)) = NULL; \ - __buf = __dummy; \ - } \ (__recsize) ? \ __kfifo_out_peek_r(__kfifo, __buf, __n, __recsize) : \ __kfifo_out_peek(__kfifo, __buf, __n); \ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0fbbc7aa02cb..9523d2ad7535 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -142,7 +142,7 @@ struct kvm; struct kvm_vcpu; extern struct kmem_cache *kvm_vcpu_cache; -extern raw_spinlock_t kvm_lock; +extern spinlock_t kvm_lock; extern struct list_head vm_list; struct kvm_io_range { @@ -189,8 +189,7 @@ struct kvm_async_pf { gva_t gva; unsigned long addr; struct kvm_arch_async_pf arch; - struct page *page; - bool done; + bool wakeup_all; }; void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); @@ -508,9 +507,10 @@ int kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem); int __kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem); -void kvm_arch_free_memslot(struct kvm_memory_slot *free, +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont); -int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages); +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages); void kvm_arch_memslots_updated(struct kvm *kvm); int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, @@ -671,6 +671,25 @@ static inline void kvm_arch_free_vm(struct kvm *kvm) } #endif +#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA +void kvm_arch_register_noncoherent_dma(struct kvm *kvm); +void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); +bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); +#else +static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) +{ +} + +static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) +{ +} + +static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) +{ + return false; +} +#endif + static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) { #ifdef __KVM_HAVE_ARCH_WQP @@ -747,9 +766,6 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm, int kvm_request_irq_source_id(struct kvm *kvm); void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); -/* For vcpu->arch.iommu_flags */ -#define KVM_IOMMU_CACHE_COHERENCY 0x1 - #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); @@ -789,7 +805,7 @@ static inline void kvm_guest_enter(void) /* KVM does not hold any references to rcu protected data when it * switches CPU into a guest mode. In fact switching to a guest mode - * is very similar to exiting to userspase from rcu point of view. In + * is very similar to exiting to userspace from rcu point of view. In * addition CPU may stay in a guest mode for quite a long time (up to * one time slice). Lets treat guest mode as quiescent state, just like * we do with user-mode execution. @@ -842,13 +858,6 @@ static inline int memslot_id(struct kvm *kvm, gfn_t gfn) return gfn_to_memslot(kvm, gfn)->id; } -static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) -{ - /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ - return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - - (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); -} - static inline gfn_t hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) { @@ -1066,6 +1075,7 @@ struct kvm_device *kvm_device_from_filp(struct file *filp); extern struct kvm_device_ops kvm_mpic_ops; extern struct kvm_device_ops kvm_xics_ops; +extern struct kvm_device_ops kvm_vfio_ops; #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT diff --git a/include/linux/llist.h b/include/linux/llist.h index 8828a78dec9a..fbf10a0bc095 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -195,4 +195,6 @@ static inline struct llist_node *llist_del_all(struct llist_head *head) extern struct llist_node *llist_del_first(struct llist_head *head); +struct llist_node *llist_reverse_order(struct llist_node *head); + #endif /* LLIST_H */ diff --git a/include/linux/lockref.h b/include/linux/lockref.h index 13dfd36a3294..c8929c3832db 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -15,10 +15,15 @@ */ #include <linux/spinlock.h> +#include <generated/bounds.h> + +#define USE_CMPXCHG_LOCKREF \ + (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ + IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS) struct lockref { union { -#ifdef CONFIG_CMPXCHG_LOCKREF +#if USE_CMPXCHG_LOCKREF aligned_u64 lock_count; #endif struct { diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h index 4706d3d46e56..cb49417f8ba9 100644 --- a/include/linux/mfd/arizona/registers.h +++ b/include/linux/mfd/arizona/registers.h @@ -1908,7 +1908,7 @@ #define ARIZONA_FLL2_SYNC_GAIN_MASK 0x003c /* FLL2_SYNC_GAIN */ #define ARIZONA_FLL2_SYNC_GAIN_SHIFT 2 /* FLL2_SYNC_GAIN */ #define ARIZONA_FLL2_SYNC_GAIN_WIDTH 4 /* FLL2_SYNC_GAIN */ -#define ARIZONA_FLL2_SYNC_BW_MASK 0x0001 /* FLL2_SYNC_BW */ +#define ARIZONA_FLL2_SYNC_BW 0x0001 /* FLL2_SYNC_BW */ #define ARIZONA_FLL2_SYNC_BW_MASK 0x0001 /* FLL2_SYNC_BW */ #define ARIZONA_FLL2_SYNC_BW_SHIFT 0 /* FLL2_SYNC_BW */ #define ARIZONA_FLL2_SYNC_BW_WIDTH 1 /* FLL2_SYNC_BW */ diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h new file mode 100644 index 000000000000..16bf8a0dcd97 --- /dev/null +++ b/include/linux/mfd/as3722.h @@ -0,0 +1,423 @@ +/* + * as3722 definitions + * + * Copyright (C) 2013 ams + * Copyright (c) 2013, NVIDIA Corporation. All rights reserved. + * + * Author: Florian Lobmaier <florian.lobmaier@ams.com> + * Author: Laxman Dewangan <ldewangan@nvidia.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef __LINUX_MFD_AS3722_H__ +#define __LINUX_MFD_AS3722_H__ + +#include <linux/regmap.h> + +/* AS3722 registers */ +#define AS3722_SD0_VOLTAGE_REG 0x00 +#define AS3722_SD1_VOLTAGE_REG 0x01 +#define AS3722_SD2_VOLTAGE_REG 0x02 +#define AS3722_SD3_VOLTAGE_REG 0x03 +#define AS3722_SD4_VOLTAGE_REG 0x04 +#define AS3722_SD5_VOLTAGE_REG 0x05 +#define AS3722_SD6_VOLTAGE_REG 0x06 +#define AS3722_GPIO0_CONTROL_REG 0x08 +#define AS3722_GPIO1_CONTROL_REG 0x09 +#define AS3722_GPIO2_CONTROL_REG 0x0A +#define AS3722_GPIO3_CONTROL_REG 0x0B +#define AS3722_GPIO4_CONTROL_REG 0x0C +#define AS3722_GPIO5_CONTROL_REG 0x0D +#define AS3722_GPIO6_CONTROL_REG 0x0E +#define AS3722_GPIO7_CONTROL_REG 0x0F +#define AS3722_LDO0_VOLTAGE_REG 0x10 +#define AS3722_LDO1_VOLTAGE_REG 0x11 +#define AS3722_LDO2_VOLTAGE_REG 0x12 +#define AS3722_LDO3_VOLTAGE_REG 0x13 +#define AS3722_LDO4_VOLTAGE_REG 0x14 +#define AS3722_LDO5_VOLTAGE_REG 0x15 +#define AS3722_LDO6_VOLTAGE_REG 0x16 +#define AS3722_LDO7_VOLTAGE_REG 0x17 +#define AS3722_LDO9_VOLTAGE_REG 0x19 +#define AS3722_LDO10_VOLTAGE_REG 0x1A +#define AS3722_LDO11_VOLTAGE_REG 0x1B +#define AS3722_GPIO_DEB1_REG 0x1E +#define AS3722_GPIO_DEB2_REG 0x1F +#define AS3722_GPIO_SIGNAL_OUT_REG 0x20 +#define AS3722_GPIO_SIGNAL_IN_REG 0x21 +#define AS3722_REG_SEQU_MOD1_REG 0x22 +#define AS3722_REG_SEQU_MOD2_REG 0x23 +#define AS3722_REG_SEQU_MOD3_REG 0x24 +#define AS3722_SD_PHSW_CTRL_REG 0x27 +#define AS3722_SD_PHSW_STATUS 0x28 +#define AS3722_SD0_CONTROL_REG 0x29 +#define AS3722_SD1_CONTROL_REG 0x2A +#define AS3722_SDmph_CONTROL_REG 0x2B +#define AS3722_SD23_CONTROL_REG 0x2C +#define AS3722_SD4_CONTROL_REG 0x2D +#define AS3722_SD5_CONTROL_REG 0x2E +#define AS3722_SD6_CONTROL_REG 0x2F +#define AS3722_SD_DVM_REG 0x30 +#define AS3722_RESET_REASON_REG 0x31 +#define AS3722_BATTERY_VOLTAGE_MONITOR_REG 0x32 +#define AS3722_STARTUP_CONTROL_REG 0x33 +#define AS3722_RESET_TIMER_REG 0x34 +#define AS3722_REFERENCE_CONTROL_REG 0x35 +#define AS3722_RESET_CONTROL_REG 0x36 +#define AS3722_OVER_TEMP_CONTROL_REG 0x37 +#define AS3722_WATCHDOG_CONTROL_REG 0x38 +#define AS3722_REG_STANDBY_MOD1_REG 0x39 +#define AS3722_REG_STANDBY_MOD2_REG 0x3A +#define AS3722_REG_STANDBY_MOD3_REG 0x3B +#define AS3722_ENABLE_CTRL1_REG 0x3C +#define AS3722_ENABLE_CTRL2_REG 0x3D +#define AS3722_ENABLE_CTRL3_REG 0x3E +#define AS3722_ENABLE_CTRL4_REG 0x3F +#define AS3722_ENABLE_CTRL5_REG 0x40 +#define AS3722_PWM_CONTROL_L_REG 0x41 +#define AS3722_PWM_CONTROL_H_REG 0x42 +#define AS3722_WATCHDOG_TIMER_REG 0x46 +#define AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG 0x48 +#define AS3722_IOVOLTAGE_REG 0x49 +#define AS3722_BATTERY_VOLTAGE_MONITOR2_REG 0x4A +#define AS3722_SD_CONTROL_REG 0x4D +#define AS3722_LDOCONTROL0_REG 0x4E +#define AS3722_LDOCONTROL1_REG 0x4F +#define AS3722_SD0_PROTECT_REG 0x50 +#define AS3722_SD6_PROTECT_REG 0x51 +#define AS3722_PWM_VCONTROL1_REG 0x52 +#define AS3722_PWM_VCONTROL2_REG 0x53 +#define AS3722_PWM_VCONTROL3_REG 0x54 +#define AS3722_PWM_VCONTROL4_REG 0x55 +#define AS3722_BB_CHARGER_REG 0x57 +#define AS3722_CTRL_SEQU1_REG 0x58 +#define AS3722_CTRL_SEQU2_REG 0x59 +#define AS3722_OVCURRENT_REG 0x5A +#define AS3722_OVCURRENT_DEB_REG 0x5B +#define AS3722_SDLV_DEB_REG 0x5C +#define AS3722_OC_PG_CTRL_REG 0x5D +#define AS3722_OC_PG_CTRL2_REG 0x5E +#define AS3722_CTRL_STATUS 0x5F +#define AS3722_RTC_CONTROL_REG 0x60 +#define AS3722_RTC_SECOND_REG 0x61 +#define AS3722_RTC_MINUTE_REG 0x62 +#define AS3722_RTC_HOUR_REG 0x63 +#define AS3722_RTC_DAY_REG 0x64 +#define AS3722_RTC_MONTH_REG 0x65 +#define AS3722_RTC_YEAR_REG 0x66 +#define AS3722_RTC_ALARM_SECOND_REG 0x67 +#define AS3722_RTC_ALARM_MINUTE_REG 0x68 +#define AS3722_RTC_ALARM_HOUR_REG 0x69 +#define AS3722_RTC_ALARM_DAY_REG 0x6A +#define AS3722_RTC_ALARM_MONTH_REG 0x6B +#define AS3722_RTC_ALARM_YEAR_REG 0x6C +#define AS3722_SRAM_REG 0x6D +#define AS3722_RTC_ACCESS_REG 0x6F +#define AS3722_RTC_STATUS_REG 0x73 +#define AS3722_INTERRUPT_MASK1_REG 0x74 +#define AS3722_INTERRUPT_MASK2_REG 0x75 +#define AS3722_INTERRUPT_MASK3_REG 0x76 +#define AS3722_INTERRUPT_MASK4_REG 0x77 +#define AS3722_INTERRUPT_STATUS1_REG 0x78 +#define AS3722_INTERRUPT_STATUS2_REG 0x79 +#define AS3722_INTERRUPT_STATUS3_REG 0x7A +#define AS3722_INTERRUPT_STATUS4_REG 0x7B +#define AS3722_TEMP_STATUS_REG 0x7D +#define AS3722_ADC0_CONTROL_REG 0x80 +#define AS3722_ADC1_CONTROL_REG 0x81 +#define AS3722_ADC0_MSB_RESULT_REG 0x82 +#define AS3722_ADC0_LSB_RESULT_REG 0x83 +#define AS3722_ADC1_MSB_RESULT_REG 0x84 +#define AS3722_ADC1_LSB_RESULT_REG 0x85 +#define AS3722_ADC1_THRESHOLD_HI_MSB_REG 0x86 +#define AS3722_ADC1_THRESHOLD_HI_LSB_REG 0x87 +#define AS3722_ADC1_THRESHOLD_LO_MSB_REG 0x88 +#define AS3722_ADC1_THRESHOLD_LO_LSB_REG 0x89 +#define AS3722_ADC_CONFIGURATION_REG 0x8A +#define AS3722_ASIC_ID1_REG 0x90 +#define AS3722_ASIC_ID2_REG 0x91 +#define AS3722_LOCK_REG 0x9E +#define AS3722_MAX_REGISTER 0xF4 + +#define AS3722_SD0_EXT_ENABLE_MASK 0x03 +#define AS3722_SD1_EXT_ENABLE_MASK 0x0C +#define AS3722_SD2_EXT_ENABLE_MASK 0x30 +#define AS3722_SD3_EXT_ENABLE_MASK 0xC0 +#define AS3722_SD4_EXT_ENABLE_MASK 0x03 +#define AS3722_SD5_EXT_ENABLE_MASK 0x0C +#define AS3722_SD6_EXT_ENABLE_MASK 0x30 +#define AS3722_LDO0_EXT_ENABLE_MASK 0x03 +#define AS3722_LDO1_EXT_ENABLE_MASK 0x0C +#define AS3722_LDO2_EXT_ENABLE_MASK 0x30 +#define AS3722_LDO3_EXT_ENABLE_MASK 0xC0 +#define AS3722_LDO4_EXT_ENABLE_MASK 0x03 +#define AS3722_LDO5_EXT_ENABLE_MASK 0x0C +#define AS3722_LDO6_EXT_ENABLE_MASK 0x30 +#define AS3722_LDO7_EXT_ENABLE_MASK 0xC0 +#define AS3722_LDO9_EXT_ENABLE_MASK 0x0C +#define AS3722_LDO10_EXT_ENABLE_MASK 0x30 +#define AS3722_LDO11_EXT_ENABLE_MASK 0xC0 + +#define AS3722_OVCURRENT_SD0_ALARM_MASK 0x07 +#define AS3722_OVCURRENT_SD0_ALARM_SHIFT 0x01 +#define AS3722_OVCURRENT_SD0_TRIP_MASK 0x18 +#define AS3722_OVCURRENT_SD0_TRIP_SHIFT 0x03 +#define AS3722_OVCURRENT_SD1_TRIP_MASK 0x60 +#define AS3722_OVCURRENT_SD1_TRIP_SHIFT 0x05 + +#define AS3722_OVCURRENT_SD6_ALARM_MASK 0x07 +#define AS3722_OVCURRENT_SD6_ALARM_SHIFT 0x01 +#define AS3722_OVCURRENT_SD6_TRIP_MASK 0x18 +#define AS3722_OVCURRENT_SD6_TRIP_SHIFT 0x03 + +/* AS3722 register bits and bit masks */ +#define AS3722_LDO_ILIMIT_MASK BIT(7) +#define AS3722_LDO_ILIMIT_BIT BIT(7) +#define AS3722_LDO0_VSEL_MASK 0x1F +#define AS3722_LDO0_VSEL_MIN 0x01 +#define AS3722_LDO0_VSEL_MAX 0x12 +#define AS3722_LDO0_NUM_VOLT 0x12 +#define AS3722_LDO3_VSEL_MASK 0x3F +#define AS3722_LDO3_VSEL_MIN 0x01 +#define AS3722_LDO3_VSEL_MAX 0x2D +#define AS3722_LDO3_NUM_VOLT 0x2D +#define AS3722_LDO_VSEL_MASK 0x7F +#define AS3722_LDO_VSEL_MIN 0x01 +#define AS3722_LDO_VSEL_MAX 0x7F +#define AS3722_LDO_VSEL_DNU_MIN 0x25 +#define AS3722_LDO_VSEL_DNU_MAX 0x3F +#define AS3722_LDO_NUM_VOLT 0x80 + +#define AS3722_LDO0_CTRL BIT(0) +#define AS3722_LDO1_CTRL BIT(1) +#define AS3722_LDO2_CTRL BIT(2) +#define AS3722_LDO3_CTRL BIT(3) +#define AS3722_LDO4_CTRL BIT(4) +#define AS3722_LDO5_CTRL BIT(5) +#define AS3722_LDO6_CTRL BIT(6) +#define AS3722_LDO7_CTRL BIT(7) +#define AS3722_LDO9_CTRL BIT(1) +#define AS3722_LDO10_CTRL BIT(2) +#define AS3722_LDO11_CTRL BIT(3) + +#define AS3722_LDO3_MODE_MASK (3 << 6) +#define AS3722_LDO3_MODE_VAL(n) (((n) & 0x3) << 6) +#define AS3722_LDO3_MODE_PMOS AS3722_LDO3_MODE_VAL(0) +#define AS3722_LDO3_MODE_PMOS_TRACKING AS3722_LDO3_MODE_VAL(1) +#define AS3722_LDO3_MODE_NMOS AS3722_LDO3_MODE_VAL(2) +#define AS3722_LDO3_MODE_SWITCH AS3722_LDO3_MODE_VAL(3) + +#define AS3722_SD_VSEL_MASK 0x7F +#define AS3722_SD0_VSEL_MIN 0x01 +#define AS3722_SD0_VSEL_MAX 0x5A +#define AS3722_SD2_VSEL_MIN 0x01 +#define AS3722_SD2_VSEL_MAX 0x7F + +#define AS3722_SDn_CTRL(n) BIT(n) + +#define AS3722_SD0_MODE_FAST BIT(4) +#define AS3722_SD1_MODE_FAST BIT(4) +#define AS3722_SD2_MODE_FAST BIT(2) +#define AS3722_SD3_MODE_FAST BIT(6) +#define AS3722_SD4_MODE_FAST BIT(2) +#define AS3722_SD5_MODE_FAST BIT(2) +#define AS3722_SD6_MODE_FAST BIT(4) + +#define AS3722_POWER_OFF BIT(1) + +#define AS3722_INTERRUPT_MASK1_LID BIT(0) +#define AS3722_INTERRUPT_MASK1_ACOK BIT(1) +#define AS3722_INTERRUPT_MASK1_ENABLE1 BIT(2) +#define AS3722_INTERRUPT_MASK1_OCURR_ALARM_SD0 BIT(3) +#define AS3722_INTERRUPT_MASK1_ONKEY_LONG BIT(4) +#define AS3722_INTERRUPT_MASK1_ONKEY BIT(5) +#define AS3722_INTERRUPT_MASK1_OVTMP BIT(6) +#define AS3722_INTERRUPT_MASK1_LOWBAT BIT(7) + +#define AS3722_INTERRUPT_MASK2_SD0_LV BIT(0) +#define AS3722_INTERRUPT_MASK2_SD1_LV BIT(1) +#define AS3722_INTERRUPT_MASK2_SD2345_LV BIT(2) +#define AS3722_INTERRUPT_MASK2_PWM1_OV_PROT BIT(3) +#define AS3722_INTERRUPT_MASK2_PWM2_OV_PROT BIT(4) +#define AS3722_INTERRUPT_MASK2_ENABLE2 BIT(5) +#define AS3722_INTERRUPT_MASK2_SD6_LV BIT(6) +#define AS3722_INTERRUPT_MASK2_RTC_REP BIT(7) + +#define AS3722_INTERRUPT_MASK3_RTC_ALARM BIT(0) +#define AS3722_INTERRUPT_MASK3_GPIO1 BIT(1) +#define AS3722_INTERRUPT_MASK3_GPIO2 BIT(2) +#define AS3722_INTERRUPT_MASK3_GPIO3 BIT(3) +#define AS3722_INTERRUPT_MASK3_GPIO4 BIT(4) +#define AS3722_INTERRUPT_MASK3_GPIO5 BIT(5) +#define AS3722_INTERRUPT_MASK3_WATCHDOG BIT(6) +#define AS3722_INTERRUPT_MASK3_ENABLE3 BIT(7) + +#define AS3722_INTERRUPT_MASK4_TEMP_SD0_SHUTDOWN BIT(0) +#define AS3722_INTERRUPT_MASK4_TEMP_SD1_SHUTDOWN BIT(1) +#define AS3722_INTERRUPT_MASK4_TEMP_SD6_SHUTDOWN BIT(2) +#define AS3722_INTERRUPT_MASK4_TEMP_SD0_ALARM BIT(3) +#define AS3722_INTERRUPT_MASK4_TEMP_SD1_ALARM BIT(4) +#define AS3722_INTERRUPT_MASK4_TEMP_SD6_ALARM BIT(5) +#define AS3722_INTERRUPT_MASK4_OCCUR_ALARM_SD6 BIT(6) +#define AS3722_INTERRUPT_MASK4_ADC BIT(7) + +#define AS3722_ADC1_INTERVAL_TIME BIT(0) +#define AS3722_ADC1_INT_MODE_ON BIT(1) +#define AS3722_ADC_BUF_ON BIT(2) +#define AS3722_ADC1_LOW_VOLTAGE_RANGE BIT(5) +#define AS3722_ADC1_INTEVAL_SCAN BIT(6) +#define AS3722_ADC1_INT_MASK BIT(7) + +#define AS3722_ADC_MSB_VAL_MASK 0x7F +#define AS3722_ADC_LSB_VAL_MASK 0x07 + +#define AS3722_ADC0_CONV_START BIT(7) +#define AS3722_ADC0_CONV_NOTREADY BIT(7) +#define AS3722_ADC0_SOURCE_SELECT_MASK 0x1F + +#define AS3722_ADC1_CONV_START BIT(7) +#define AS3722_ADC1_CONV_NOTREADY BIT(7) +#define AS3722_ADC1_SOURCE_SELECT_MASK 0x1F + +/* GPIO modes */ +#define AS3722_GPIO_MODE_MASK 0x07 +#define AS3722_GPIO_MODE_INPUT 0x00 +#define AS3722_GPIO_MODE_OUTPUT_VDDH 0x01 +#define AS3722_GPIO_MODE_IO_OPEN_DRAIN 0x02 +#define AS3722_GPIO_MODE_ADC_IN 0x03 +#define AS3722_GPIO_MODE_INPUT_PULL_UP 0x04 +#define AS3722_GPIO_MODE_INPUT_PULL_DOWN 0x05 +#define AS3722_GPIO_MODE_IO_OPEN_DRAIN_PULL_UP 0x06 +#define AS3722_GPIO_MODE_OUTPUT_VDDL 0x07 +#define AS3722_GPIO_MODE_VAL(n) ((n) & AS3722_GPIO_MODE_MASK) + +#define AS3722_GPIO_INV BIT(7) +#define AS3722_GPIO_IOSF_MASK 0x78 +#define AS3722_GPIO_IOSF_VAL(n) (((n) & 0xF) << 3) +#define AS3722_GPIO_IOSF_NORMAL AS3722_GPIO_IOSF_VAL(0) +#define AS3722_GPIO_IOSF_INTERRUPT_OUT AS3722_GPIO_IOSF_VAL(1) +#define AS3722_GPIO_IOSF_VSUP_LOW_OUT AS3722_GPIO_IOSF_VAL(2) +#define AS3722_GPIO_IOSF_GPIO_INTERRUPT_IN AS3722_GPIO_IOSF_VAL(3) +#define AS3722_GPIO_IOSF_ISINK_PWM_IN AS3722_GPIO_IOSF_VAL(4) +#define AS3722_GPIO_IOSF_VOLTAGE_STBY AS3722_GPIO_IOSF_VAL(5) +#define AS3722_GPIO_IOSF_PWR_GOOD_OUT AS3722_GPIO_IOSF_VAL(7) +#define AS3722_GPIO_IOSF_Q32K_OUT AS3722_GPIO_IOSF_VAL(8) +#define AS3722_GPIO_IOSF_WATCHDOG_IN AS3722_GPIO_IOSF_VAL(9) +#define AS3722_GPIO_IOSF_SOFT_RESET_IN AS3722_GPIO_IOSF_VAL(11) +#define AS3722_GPIO_IOSF_PWM_OUT AS3722_GPIO_IOSF_VAL(12) +#define AS3722_GPIO_IOSF_VSUP_LOW_DEB_OUT AS3722_GPIO_IOSF_VAL(13) +#define AS3722_GPIO_IOSF_SD6_LOW_VOLT_LOW AS3722_GPIO_IOSF_VAL(14) + +#define AS3722_GPIOn_SIGNAL(n) BIT(n) +#define AS3722_GPIOn_CONTROL_REG(n) (AS3722_GPIO0_CONTROL_REG + n) +#define AS3722_I2C_PULL_UP BIT(4) +#define AS3722_INT_PULL_UP BIT(5) + +#define AS3722_RTC_REP_WAKEUP_EN BIT(0) +#define AS3722_RTC_ALARM_WAKEUP_EN BIT(1) +#define AS3722_RTC_ON BIT(2) +#define AS3722_RTC_IRQMODE BIT(3) +#define AS3722_RTC_CLK32K_OUT_EN BIT(5) + +#define AS3722_WATCHDOG_TIMER_MAX 0x7F +#define AS3722_WATCHDOG_ON BIT(0) +#define AS3722_WATCHDOG_SW_SIG BIT(0) + +#define AS3722_EXT_CONTROL_ENABLE1 0x1 +#define AS3722_EXT_CONTROL_ENABLE2 0x2 +#define AS3722_EXT_CONTROL_ENABLE3 0x3 + +/* Interrupt IDs */ +enum as3722_irq { + AS3722_IRQ_LID, + AS3722_IRQ_ACOK, + AS3722_IRQ_ENABLE1, + AS3722_IRQ_OCCUR_ALARM_SD0, + AS3722_IRQ_ONKEY_LONG_PRESS, + AS3722_IRQ_ONKEY, + AS3722_IRQ_OVTMP, + AS3722_IRQ_LOWBAT, + AS3722_IRQ_SD0_LV, + AS3722_IRQ_SD1_LV, + AS3722_IRQ_SD2_LV, + AS3722_IRQ_PWM1_OV_PROT, + AS3722_IRQ_PWM2_OV_PROT, + AS3722_IRQ_ENABLE2, + AS3722_IRQ_SD6_LV, + AS3722_IRQ_RTC_REP, + AS3722_IRQ_RTC_ALARM, + AS3722_IRQ_GPIO1, + AS3722_IRQ_GPIO2, + AS3722_IRQ_GPIO3, + AS3722_IRQ_GPIO4, + AS3722_IRQ_GPIO5, + AS3722_IRQ_WATCHDOG, + AS3722_IRQ_ENABLE3, + AS3722_IRQ_TEMP_SD0_SHUTDOWN, + AS3722_IRQ_TEMP_SD1_SHUTDOWN, + AS3722_IRQ_TEMP_SD2_SHUTDOWN, + AS3722_IRQ_TEMP_SD0_ALARM, + AS3722_IRQ_TEMP_SD1_ALARM, + AS3722_IRQ_TEMP_SD6_ALARM, + AS3722_IRQ_OCCUR_ALARM_SD6, + AS3722_IRQ_ADC, + AS3722_IRQ_MAX, +}; + +struct as3722 { + struct device *dev; + struct regmap *regmap; + int chip_irq; + unsigned long irq_flags; + bool en_intern_int_pullup; + bool en_intern_i2c_pullup; + struct regmap_irq_chip_data *irq_data; +}; + +static inline int as3722_read(struct as3722 *as3722, u32 reg, u32 *dest) +{ + return regmap_read(as3722->regmap, reg, dest); +} + +static inline int as3722_write(struct as3722 *as3722, u32 reg, u32 value) +{ + return regmap_write(as3722->regmap, reg, value); +} + +static inline int as3722_block_read(struct as3722 *as3722, u32 reg, + int count, u8 *buf) +{ + return regmap_bulk_read(as3722->regmap, reg, buf, count); +} + +static inline int as3722_block_write(struct as3722 *as3722, u32 reg, + int count, u8 *data) +{ + return regmap_bulk_write(as3722->regmap, reg, data, count); +} + +static inline int as3722_update_bits(struct as3722 *as3722, u32 reg, + u32 mask, u8 val) +{ + return regmap_update_bits(as3722->regmap, reg, mask, val); +} + +static inline int as3722_irq_get_virq(struct as3722 *as3722, int irq) +{ + return regmap_irq_get_virq(as3722->irq_data, irq); +} +#endif /* __LINUX_MFD_AS3722_H__ */ diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 7314fc4e6d25..bdba8c61207b 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -104,7 +104,7 @@ static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev) } extern int mfd_add_devices(struct device *parent, int id, - struct mfd_cell *cells, int n_devs, + const struct mfd_cell *cells, int n_devs, struct resource *mem_base, int irq_base, struct irq_domain *irq_domain); diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h index 786d02eb79d2..21e21b81cc75 100644 --- a/include/linux/mfd/da9052/da9052.h +++ b/include/linux/mfd/da9052/da9052.h @@ -148,10 +148,15 @@ static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg, unsigned reg_cnt, unsigned char *val) { int ret; + unsigned int tmp; + int i; - ret = regmap_bulk_read(da9052->regmap, reg, val, reg_cnt); - if (ret < 0) - return ret; + for (i = 0; i < reg_cnt; i++) { + ret = regmap_read(da9052->regmap, reg + i, &tmp); + val[i] = (unsigned char)tmp; + if (ret < 0) + return ret; + } if (da9052->fix_io) { ret = da9052->fix_io(da9052, reg); @@ -166,10 +171,13 @@ static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg, unsigned reg_cnt, unsigned char *val) { int ret; + int i; - ret = regmap_raw_write(da9052->regmap, reg, val, reg_cnt); - if (ret < 0) - return ret; + for (i = 0; i < reg_cnt; i++) { + ret = regmap_write(da9052->regmap, reg + i, val[i]); + if (ret < 0) + return ret; + } if (da9052->fix_io) { ret = da9052->fix_io(da9052, reg); diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index 244fb0d51589..3e050b933dd0 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -323,7 +323,6 @@ struct max77693_dev { int irq; int irq_gpio; - bool wakeup; struct mutex irqlock; int irq_masks_cur[MAX77693_IRQ_GROUP_NR]; int irq_masks_cache[MAX77693_IRQ_GROUP_NR]; diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h index 676f0f388992..3f3dc45f93ee 100644 --- a/include/linux/mfd/max77693.h +++ b/include/linux/mfd/max77693.h @@ -64,8 +64,6 @@ struct max77693_muic_platform_data { }; struct max77693_platform_data { - int wakeup; - /* regulator data */ struct max77693_regulator_data *regulators; int num_regulators; diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index d1382dfbeff0..0ce772105508 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h @@ -756,6 +756,59 @@ #define PCR_SETTING_REG2 0x814 #define PCR_SETTING_REG3 0x747 +/* Phy bits */ +#define PHY_PCR_FORCE_CODE 0xB000 +#define PHY_PCR_OOBS_CALI_50 0x0800 +#define PHY_PCR_OOBS_VCM_08 0x0200 +#define PHY_PCR_OOBS_SEN_90 0x0040 +#define PHY_PCR_RSSI_EN 0x0002 + +#define PHY_RCR1_ADP_TIME 0x0100 +#define PHY_RCR1_VCO_COARSE 0x001F + +#define PHY_RCR2_EMPHASE_EN 0x8000 +#define PHY_RCR2_NADJR 0x4000 +#define PHY_RCR2_CDR_CP_10 0x0400 +#define PHY_RCR2_CDR_SR_2 0x0100 +#define PHY_RCR2_FREQSEL_12 0x0040 +#define PHY_RCR2_CPADJEN 0x0020 +#define PHY_RCR2_CDR_SC_8 0x0008 +#define PHY_RCR2_CALIB_LATE 0x0002 + +#define PHY_RDR_RXDSEL_1_9 0x4000 + +#define PHY_TUNE_TUNEREF_1_0 0x4000 +#define PHY_TUNE_VBGSEL_1252 0x0C00 +#define PHY_TUNE_SDBUS_33 0x0200 +#define PHY_TUNE_TUNED18 0x01C0 +#define PHY_TUNE_TUNED12 0X0020 + +#define PHY_BPCR_IBRXSEL 0x0400 +#define PHY_BPCR_IBTXSEL 0x0100 +#define PHY_BPCR_IB_FILTER 0x0080 +#define PHY_BPCR_CMIRROR_EN 0x0040 + +#define PHY_REG_REV_RESV 0xE000 +#define PHY_REG_REV_RXIDLE_LATCHED 0x1000 +#define PHY_REG_REV_P1_EN 0x0800 +#define PHY_REG_REV_RXIDLE_EN 0x0400 +#define PHY_REG_REV_CLKREQ_DLY_TIMER_1_0 0x0040 +#define PHY_REG_REV_STOP_CLKRD 0x0020 +#define PHY_REG_REV_RX_PWST 0x0008 +#define PHY_REG_REV_STOP_CLKWR 0x0004 + +#define PHY_FLD3_TIMER_4 0x7800 +#define PHY_FLD3_TIMER_6 0x00E0 +#define PHY_FLD3_RXDELINK 0x0004 + +#define PHY_FLD4_FLDEN_SEL 0x4000 +#define PHY_FLD4_REQ_REF 0x2000 +#define PHY_FLD4_RXAMP_OFF 0x1000 +#define PHY_FLD4_REQ_ADDA 0x0800 +#define PHY_FLD4_BER_COUNT 0x00E0 +#define PHY_FLD4_BER_TIMER 0x000A +#define PHY_FLD4_BER_CHK_EN 0x0001 + #define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0) struct rtsx_pcr; diff --git a/include/linux/mfd/si476x-core.h b/include/linux/mfd/si476x-core.h index ba89b94e4a56..674b45d5a757 100644 --- a/include/linux/mfd/si476x-core.h +++ b/include/linux/mfd/si476x-core.h @@ -316,7 +316,7 @@ enum si476x_smoothmetrics { * response to 'FM_RD_STATUS' command * @rdstpptyint: Traffic program flag(TP) and/or program type(PTY) * code has changed. - * @rdspiint: Program indentifiaction(PI) code has changed. + * @rdspiint: Program identification(PI) code has changed. * @rdssyncint: RDS synchronization has changed. * @rdsfifoint: RDS was received and the RDS FIFO has at least * 'FM_RDS_INTERRUPT_FIFO_COUNT' elements in it. diff --git a/include/linux/mfd/stw481x.h b/include/linux/mfd/stw481x.h new file mode 100644 index 000000000000..eda121556e5d --- /dev/null +++ b/include/linux/mfd/stw481x.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2011 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef MFD_STW481X_H +#define MFD_STW481X_H + +#include <linux/i2c.h> +#include <linux/regulator/machine.h> +#include <linux/regmap.h> +#include <linux/bitops.h> + +/* These registers are accessed from more than one driver */ +#define STW_CONF1 0x11U +#define STW_CONF1_PDN_VMMC 0x01U +#define STW_CONF1_VMMC_MASK 0x0eU +#define STW_CONF1_VMMC_1_8V 0x02U +#define STW_CONF1_VMMC_2_85V 0x04U +#define STW_CONF1_VMMC_3V 0x06U +#define STW_CONF1_VMMC_1_85V 0x08U +#define STW_CONF1_VMMC_2_6V 0x0aU +#define STW_CONF1_VMMC_2_7V 0x0cU +#define STW_CONF1_VMMC_3_3V 0x0eU +#define STW_CONF1_MMC_LS_STATUS 0x10U +#define STW_PCTL_REG_LO 0x1eU +#define STW_PCTL_REG_HI 0x1fU +#define STW_CONF1_V_MONITORING 0x20U +#define STW_CONF1_IT_WARN 0x40U +#define STW_CONF1_PDN_VAUX 0x80U +#define STW_CONF2 0x20U +#define STW_CONF2_MASK_TWARN 0x01U +#define STW_CONF2_VMMC_EXT 0x02U +#define STW_CONF2_MASK_IT_WAKE_UP 0x04U +#define STW_CONF2_GPO1 0x08U +#define STW_CONF2_GPO2 0x10U +#define STW_VCORE_SLEEP 0x21U + +/** + * struct stw481x - state holder for the Stw481x drivers + * @mutex: mutex to serialize I2C accesses + * @i2c_client: corresponding I2C client + * @regulator: regulator device for regulator children + * @map: regmap handle to access device registers + */ +struct stw481x { + struct mutex lock; + struct i2c_client *client; + struct regulator_dev *vmmc_regulator; + struct regmap *map; +}; + +#endif diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h index b473577f36db..8789fa3c7fd9 100644 --- a/include/linux/mfd/syscon.h +++ b/include/linux/mfd/syscon.h @@ -17,10 +17,35 @@ struct device_node; +#ifdef CONFIG_MFD_SYSCON extern struct regmap *syscon_node_to_regmap(struct device_node *np); extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s); extern struct regmap *syscon_regmap_lookup_by_pdevname(const char *s); extern struct regmap *syscon_regmap_lookup_by_phandle( struct device_node *np, const char *property); +#else +static inline struct regmap *syscon_node_to_regmap(struct device_node *np) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct regmap *syscon_regmap_lookup_by_phandle( + struct device_node *np, + const char *property) +{ + return ERR_PTR(-ENOSYS); +} +#endif + #endif /* __LINUX_MFD_SYSCON_H__ */ diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index 08cce7f96ab9..d498d98f0c2c 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -134,13 +134,18 @@ #define FIFO1_THRESHOLD 19 /* -* ADC runs at 3MHz, and it takes -* 15 cycles to latch one data output. -* Hence the idle time for ADC to -* process one sample data would be -* around 5 micro seconds. -*/ -#define IDLE_TIMEOUT 5 /* microsec */ + * time in us for processing a single channel, calculated as follows: + * + * num cycles = open delay + (sample delay + conv time) * averaging + * + * num cycles: 152 + (1 + 13) * 16 = 376 + * + * clock frequency: 26MHz / 8 = 3.25MHz + * clock period: 1 / 3.25MHz = 308ns + * + * processing time: 376 * 308ns = 116us + */ +#define IDLE_TIMEOUT 116 /* microsec */ #define TSCADC_CELLS 2 @@ -155,6 +160,7 @@ struct ti_tscadc_dev { struct mfd_cell cells[TSCADC_CELLS]; u32 reg_se_cache; spinlock_t reg_lock; + unsigned int clk_div; /* tsc device */ struct titsc *tsc; diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h index 40854ac0ba3d..eefafa62d304 100644 --- a/include/linux/mfd/wm8994/core.h +++ b/include/linux/mfd/wm8994/core.h @@ -56,8 +56,6 @@ struct irq_domain; #define WM8994_IRQ_GPIO(x) (x + WM8994_IRQ_TEMP_WARN) struct wm8994 { - struct mutex irq_lock; - struct wm8994_pdata pdata; enum wm8994_type type; @@ -85,16 +83,43 @@ struct wm8994 { }; /* Device I/O API */ -int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg); -int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg, - unsigned short val); -int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg, - unsigned short mask, unsigned short val); -int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg, - int count, u16 *buf); -int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg, - int count, const u16 *buf); +static inline int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg) +{ + unsigned int val; + int ret; + + ret = regmap_read(wm8994->regmap, reg, &val); + + if (ret < 0) + return ret; + else + return val; +} + +static inline int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg, + unsigned short val) +{ + return regmap_write(wm8994->regmap, reg, val); +} + +static inline int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg, + int count, u16 *buf) +{ + return regmap_bulk_read(wm8994->regmap, reg, buf, count); +} + +static inline int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg, + int count, const u16 *buf) +{ + return regmap_raw_write(wm8994->regmap, reg, buf, count * sizeof(u16)); +} + +static inline int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg, + unsigned short mask, unsigned short val) +{ + return regmap_update_bits(wm8994->regmap, reg, mask, val); +} /* Helper to save on boilerplate */ static inline int wm8994_request_irq(struct wm8994 *wm8994, int irq, diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 5eb4e31af22b..da78875807fc 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -230,6 +230,15 @@ enum { MLX5_MAX_PAGE_SHIFT = 31 }; +enum { + MLX5_ADAPTER_PAGE_SHIFT = 12 +}; + +enum { + MLX5_CAP_OFF_DCT = 41, + MLX5_CAP_OFF_CMDIF_CSUM = 46, +}; + struct mlx5_inbox_hdr { __be16 opcode; u8 rsvd[4]; @@ -319,9 +328,9 @@ struct mlx5_hca_cap { u8 rsvd25[42]; __be16 log_uar_page_sz; u8 rsvd26[28]; - u8 log_msx_atomic_size_qp; + u8 log_max_atomic_size_qp; u8 rsvd27[2]; - u8 log_msx_atomic_size_dc; + u8 log_max_atomic_size_dc; u8 rsvd28[76]; }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 6b8c496572c8..554548cd3dd4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -483,6 +483,7 @@ struct mlx5_priv { struct rb_root page_root; int fw_pages; int reg_pages; + struct list_head free_list; struct mlx5_core_health health; @@ -557,9 +558,11 @@ typedef void (*mlx5_cmd_cbk_t)(int status, void *context); struct mlx5_cmd_work_ent { struct mlx5_cmd_msg *in; struct mlx5_cmd_msg *out; + void *uout; + int uout_size; mlx5_cmd_cbk_t callback; void *context; - int idx; + int idx; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; @@ -570,6 +573,7 @@ struct mlx5_cmd_work_ent { u8 token; struct timespec ts1; struct timespec ts2; + u16 op; }; struct mlx5_pas { @@ -653,6 +657,9 @@ void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); +int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, + void *out, int out_size, mlx5_cmd_cbk_t callback, + void *context); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); @@ -676,7 +683,9 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq); int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, - struct mlx5_create_mkey_mbox_in *in, int inlen); + struct mlx5_create_mkey_mbox_in *in, int inlen, + mlx5_cmd_cbk_t callback, void *context, + struct mlx5_create_mkey_mbox_out *out); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr); int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, struct mlx5_query_mkey_mbox_out *out, int outlen); @@ -745,6 +754,11 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) return mkey_idx << 8; } +static inline u8 mlx5_mkey_variant(u32 mkey) +{ + return mkey & 0xff; +} + enum { MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, diff --git a/include/linux/mm.h b/include/linux/mm.h index 42a35d94b82c..1cedd000cf29 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1316,32 +1316,76 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a } #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ -#if USE_SPLIT_PTLOCKS -/* - * We tuck a spinlock to guard each pagetable page into its struct page, - * at page->private, with BUILD_BUG_ON to make sure that this will not - * overflow into the next struct page (as it might with DEBUG_SPINLOCK). - * When freeing, reset page->mapping so free_pages_check won't complain. - */ -#define __pte_lockptr(page) &((page)->ptl) -#define pte_lock_init(_page) do { \ - spin_lock_init(__pte_lockptr(_page)); \ -} while (0) -#define pte_lock_deinit(page) ((page)->mapping = NULL) -#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) -#else /* !USE_SPLIT_PTLOCKS */ +#if USE_SPLIT_PTE_PTLOCKS +#if BLOATED_SPINLOCKS +extern bool ptlock_alloc(struct page *page); +extern void ptlock_free(struct page *page); + +static inline spinlock_t *ptlock_ptr(struct page *page) +{ + return page->ptl; +} +#else /* BLOATED_SPINLOCKS */ +static inline bool ptlock_alloc(struct page *page) +{ + return true; +} + +static inline void ptlock_free(struct page *page) +{ +} + +static inline spinlock_t *ptlock_ptr(struct page *page) +{ + return &page->ptl; +} +#endif /* BLOATED_SPINLOCKS */ + +static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) +{ + return ptlock_ptr(pmd_page(*pmd)); +} + +static inline bool ptlock_init(struct page *page) +{ + /* + * prep_new_page() initialize page->private (and therefore page->ptl) + * with 0. Make sure nobody took it in use in between. + * + * It can happen if arch try to use slab for page table allocation: + * slab code uses page->slab_cache and page->first_page (for tail + * pages), which share storage with page->ptl. + */ + VM_BUG_ON(*(unsigned long *)&page->ptl); + if (!ptlock_alloc(page)) + return false; + spin_lock_init(ptlock_ptr(page)); + return true; +} + +/* Reset page->mapping so free_pages_check won't complain. */ +static inline void pte_lock_deinit(struct page *page) +{ + page->mapping = NULL; + ptlock_free(page); +} + +#else /* !USE_SPLIT_PTE_PTLOCKS */ /* * We use mm->page_table_lock to guard all pagetable pages of the mm. */ -#define pte_lock_init(page) do {} while (0) -#define pte_lock_deinit(page) do {} while (0) -#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) -#endif /* USE_SPLIT_PTLOCKS */ +static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) +{ + return &mm->page_table_lock; +} +static inline bool ptlock_init(struct page *page) { return true; } +static inline void pte_lock_deinit(struct page *page) {} +#endif /* USE_SPLIT_PTE_PTLOCKS */ -static inline void pgtable_page_ctor(struct page *page) +static inline bool pgtable_page_ctor(struct page *page) { - pte_lock_init(page); inc_zone_page_state(page, NR_PAGETABLE); + return ptlock_init(page); } static inline void pgtable_page_dtor(struct page *page) @@ -1378,6 +1422,52 @@ static inline void pgtable_page_dtor(struct page *page) ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ NULL: pte_offset_kernel(pmd, address)) +#if USE_SPLIT_PMD_PTLOCKS + +static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) +{ + return ptlock_ptr(virt_to_page(pmd)); +} + +static inline bool pgtable_pmd_page_ctor(struct page *page) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + page->pmd_huge_pte = NULL; +#endif + return ptlock_init(page); +} + +static inline void pgtable_pmd_page_dtor(struct page *page) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + VM_BUG_ON(page->pmd_huge_pte); +#endif + ptlock_free(page); +} + +#define pmd_huge_pte(mm, pmd) (virt_to_page(pmd)->pmd_huge_pte) + +#else + +static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) +{ + return &mm->page_table_lock; +} + +static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; } +static inline void pgtable_pmd_page_dtor(struct page *page) {} + +#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) + +#endif + +static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) +{ + spinlock_t *ptl = pmd_lockptr(mm, pmd); + spin_lock(ptl); + return ptl; +} + extern void free_area_init(unsigned long * zones_size); extern void free_area_init_node(int nid, unsigned long * zones_size, unsigned long zone_start_pfn, unsigned long *zholes_size); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index a3198e5aaf4e..bd299418a934 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -23,7 +23,9 @@ struct address_space; -#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) +#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) +#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ + IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) /* * Each physical page in the system has a struct page associated with @@ -42,18 +44,22 @@ struct page { /* First double word block */ unsigned long flags; /* Atomic flags, some possibly * updated asynchronously */ - struct address_space *mapping; /* If low bit clear, points to - * inode address_space, or NULL. - * If page mapped as anonymous - * memory, low bit is set, and - * it points to anon_vma object: - * see PAGE_MAPPING_ANON below. - */ + union { + struct address_space *mapping; /* If low bit clear, points to + * inode address_space, or NULL. + * If page mapped as anonymous + * memory, low bit is set, and + * it points to anon_vma object: + * see PAGE_MAPPING_ANON below. + */ + void *s_mem; /* slab first object */ + }; + /* Second double word */ struct { union { pgoff_t index; /* Our offset within mapping. */ - void *freelist; /* slub/slob first free object */ + void *freelist; /* sl[aou]b first free object */ bool pfmemalloc; /* If set by the page allocator, * ALLOC_NO_WATERMARKS was set * and the low watermark was not @@ -109,6 +115,7 @@ struct page { }; atomic_t _count; /* Usage count, see below. */ }; + unsigned int active; /* SLAB */ }; }; @@ -130,6 +137,12 @@ struct page { struct list_head list; /* slobs list of pages */ struct slab *slab_page; /* slab fields */ + struct rcu_head rcu_head; /* Used by SLAB + * when destroying via RCU + */ +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS + pgtable_t pmd_huge_pte; /* protected by page->ptl */ +#endif }; /* Remainder is not double word aligned */ @@ -141,9 +154,13 @@ struct page { * indicates order in the buddy * system if PG_buddy is set. */ -#if USE_SPLIT_PTLOCKS +#if USE_SPLIT_PTE_PTLOCKS +#if BLOATED_SPINLOCKS + spinlock_t *ptl; +#else spinlock_t ptl; #endif +#endif struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ struct page *first_page; /* Compound tail pages */ }; @@ -309,14 +326,14 @@ enum { NR_MM_COUNTERS }; -#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU) +#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU) #define SPLIT_RSS_COUNTING /* per-thread cached information, */ struct task_rss_stat { int events; /* for synchronization threshold */ int count[NR_MM_COUNTERS]; }; -#endif /* USE_SPLIT_PTLOCKS */ +#endif /* USE_SPLIT_PTE_PTLOCKS */ struct mm_rss_stat { atomic_long_t count[NR_MM_COUNTERS]; @@ -339,6 +356,7 @@ struct mm_struct { pgd_t * pgd; atomic_t mm_users; /* How many users with user space? */ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ + atomic_long_t nr_ptes; /* Page table pages */ int map_count; /* number of VMAs */ spinlock_t page_table_lock; /* Protects page tables and some counters */ @@ -360,7 +378,6 @@ struct mm_struct { unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ unsigned long stack_vm; /* VM_GROWSUP/DOWN */ unsigned long def_flags; - unsigned long nr_ptes; /* Page table pages */ unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; unsigned long arg_start, arg_end, env_start, env_end; @@ -406,7 +423,7 @@ struct mm_struct { #ifdef CONFIG_MMU_NOTIFIER struct mmu_notifier_mm *mmu_notifier_mm; #endif -#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS pgtable_t pmd_huge_pte; /* protected by page_table_lock */ #endif #ifdef CONFIG_CPUMASK_OFFSTACK diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 842de3e21e70..176fdf824b14 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -240,6 +240,7 @@ struct mmc_part { struct mmc_card { struct mmc_host *host; /* the host this device belongs to */ struct device dev; /* the device */ + u32 ocr; /* the current OCR setting */ unsigned int rca; /* relative card address of device */ unsigned int type; /* card type */ #define MMC_TYPE_MMC 0 /* MMC card */ @@ -257,6 +258,7 @@ struct mmc_card { #define MMC_CARD_REMOVED (1<<7) /* card has been removed */ #define MMC_STATE_HIGHSPEED_200 (1<<8) /* card is in HS200 mode */ #define MMC_STATE_DOING_BKOPS (1<<10) /* card is doing BKOPS */ +#define MMC_STATE_SUSPENDED (1<<11) /* card is suspended */ unsigned int quirks; /* card quirks */ #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ @@ -420,10 +422,10 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data) #define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR) #define mmc_card_ddr_mode(c) ((c)->state & MMC_STATE_HIGHSPEED_DDR) #define mmc_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) -#define mmc_sd_card_uhs(c) ((c)->state & MMC_STATE_ULTRAHIGHSPEED) #define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC) #define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED)) #define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS) +#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED) #define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) #define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY) @@ -432,11 +434,12 @@ static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data) #define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) #define mmc_card_set_ddr_mode(c) ((c)->state |= MMC_STATE_HIGHSPEED_DDR) #define mmc_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) -#define mmc_sd_card_set_uhs(c) ((c)->state |= MMC_STATE_ULTRAHIGHSPEED) #define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC) #define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED) #define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS) #define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS) +#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED) +#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED) /* * Quirk add/remove for MMC products. diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index da51bec578c3..87079fc38011 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -151,7 +151,8 @@ extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *); extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, struct mmc_command *, int); extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); -extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool); +extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool, + bool); extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); @@ -188,7 +189,6 @@ extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int); extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); extern void mmc_release_host(struct mmc_host *host); -extern int mmc_try_claim_host(struct mmc_host *host); extern void mmc_get_card(struct mmc_card *card); extern void mmc_put_card(struct mmc_card *card); diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 198f0fa44e9f..6ce7d2cd3c7a 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -15,6 +15,7 @@ #define LINUX_MMC_DW_MMC_H #include <linux/scatterlist.h> +#include <linux/mmc/core.h> #define MAX_MCI_SLOTS 2 @@ -129,6 +130,9 @@ struct dw_mci { struct mmc_request *mrq; struct mmc_command *cmd; struct mmc_data *data; + struct mmc_command stop_abort; + unsigned int prev_blksz; + unsigned char timing; struct workqueue_struct *card_workqueue; /* DMA interface members*/ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 3b0c33ae13e1..99f5709ac343 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -254,6 +254,7 @@ struct mmc_host { #define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */ #define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */ #define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */ +#define MMC_CAP_RUNTIME_RESUME (1 << 20) /* Resume at runtime_resume. */ #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ @@ -309,7 +310,6 @@ struct mmc_host { spinlock_t lock; /* lock for claim and bus ops */ struct mmc_ios ios; /* current io bus settings */ - u32 ocr; /* the current OCR setting */ /* group bitfields together to minimize padding */ unsigned int use_spi_crc:1; @@ -382,9 +382,6 @@ static inline void *mmc_priv(struct mmc_host *host) #define mmc_classdev(x) (&(x)->class_dev) #define mmc_hostname(x) (dev_name(&(x)->class_dev)) -int mmc_suspend_host(struct mmc_host *); -int mmc_resume_host(struct mmc_host *); - int mmc_power_save_host(struct mmc_host *host); int mmc_power_restore_host(struct mmc_host *host); diff --git a/include/linux/module.h b/include/linux/module.h index 05f2447f8c15..15cd6b1b211e 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -367,9 +367,6 @@ struct module /* What modules do I depend on? */ struct list_head target_list; - /* Who is waiting for us to be unloaded */ - struct task_struct *waiter; - /* Destruction function. */ void (*exit)(void); diff --git a/include/linux/net.h b/include/linux/net.h index b292a0435571..4bcee94cef93 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -164,6 +164,14 @@ struct proto_ops { #endif int (*sendmsg) (struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len); + /* Notes for implementing recvmsg: + * =============================== + * msg->msg_namelen should get updated by the recvmsg handlers + * iff msg_name != NULL. It is by default 0 to prevent + * returning uninitialized memory to user space. The recvfrom + * handlers can assume that msg.msg_name is either NULL or has + * a minimum size of sizeof(struct sockaddr_storage). + */ int (*recvmsg) (struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len, int flags); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8b3de7cc2ffc..7f0ed423a360 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1587,7 +1587,7 @@ static inline void *netdev_priv(const struct net_device *dev) #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) /* Set the sysfs device type for the network logical device to allow - * fin grained indentification of different network device types. For + * fine-grained identification of different network device types. For * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. */ #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index c6f41b616965..c1637062c1ce 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -118,6 +118,9 @@ Needs to be updated if more operations are defined in future.*/ #define FIRST_NFS4_OP OP_ACCESS #define LAST_NFS4_OP OP_RECLAIM_COMPLETE +#define LAST_NFS40_OP OP_RELEASE_LOCKOWNER +#define LAST_NFS41_OP OP_RECLAIM_COMPLETE +#define LAST_NFS42_OP OP_RECLAIM_COMPLETE enum nfsstat4 { NFS4_OK = 0, diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index d006f0ca60f4..5a462c4e5009 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -27,7 +27,7 @@ static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) while (!pci_is_root_bus(pbus)) pbus = pbus->parent; - return DEVICE_ACPI_HANDLE(pbus->bridge); + return ACPI_HANDLE(pbus->bridge); } static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) @@ -39,7 +39,7 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) else dev = &pbus->self->dev; - return DEVICE_ACPI_HANDLE(dev); + return ACPI_HANDLE(dev); } void acpi_pci_add_bus(struct pci_bus *bus); diff --git a/include/linux/phy.h b/include/linux/phy.h index 64ab823f7b74..48a4dc3cb8cf 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -559,6 +559,7 @@ static inline int phy_read_status(struct phy_device *phydev) { return phydev->drv->read_status(phydev); } +int genphy_setup_forced(struct phy_device *phydev); int genphy_restart_aneg(struct phy_device *phydev); int genphy_config_aneg(struct phy_device *phydev); int genphy_update_link(struct phy_device *phydev); diff --git a/include/linux/i2c/at24.h b/include/linux/platform_data/at24.h index 285025a9cdc9..c42aa89d34ee 100644 --- a/include/linux/i2c/at24.h +++ b/include/linux/platform_data/at24.h @@ -28,7 +28,7 @@ * * void get_mac_addr(struct memory_accessor *mem_acc, void *context) * { - * u8 *mac_addr = ethernet_pdata->mac_addr; + * u8 *mac_addr = ethernet_pdata->mac_addr; * off_t offset = context; * * // Read MAC addr from EEPROM diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index 179fb91bb5f2..f50821cb64be 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h @@ -67,10 +67,10 @@ struct edmacc_param { #define ITCCHEN BIT(23) /*ch_status paramater of callback function possible values*/ -#define DMA_COMPLETE 1 -#define DMA_CC_ERROR 2 -#define DMA_TC1_ERROR 3 -#define DMA_TC2_ERROR 4 +#define EDMA_DMA_COMPLETE 1 +#define EDMA_DMA_CC_ERROR 2 +#define EDMA_DMA_TC1_ERROR 3 +#define EDMA_DMA_TC2_ERROR 4 enum address_mode { INCR = 0, diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h index d44912d81578..75f70f6ac137 100644 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ b/include/linux/platform_data/mmc-esdhc-imx.h @@ -10,6 +10,8 @@ #ifndef __ASM_ARCH_IMX_ESDHC_H #define __ASM_ARCH_IMX_ESDHC_H +#include <linux/types.h> + enum wp_types { ESDHC_WP_NONE, /* no WP, neither controller nor gpio */ ESDHC_WP_CONTROLLER, /* mmc controller internal WP */ @@ -32,6 +34,7 @@ enum cd_types { * @cd_gpio: gpio for card_detect interrupt * @wp_type: type of write_protect method (see wp_types enum above) * @cd_type: type of card_detect method (see cd_types enum above) + * @support_vsel: indicate it supports 1.8v switching */ struct esdhc_platform_data { @@ -41,5 +44,7 @@ struct esdhc_platform_data { enum cd_types cd_type; int max_bus_width; unsigned int f_max; + bool support_vsel; + unsigned int delay_line; }; #endif /* __ASM_ARCH_IMX_ESDHC_H */ diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h new file mode 100644 index 000000000000..0472ab2f6ede --- /dev/null +++ b/include/linux/platform_data/zforce_ts.h @@ -0,0 +1,26 @@ +/* drivers/input/touchscreen/zforce.c + * + * Copyright (C) 2012-2013 MundoReader S.L. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_INPUT_ZFORCE_TS_H +#define _LINUX_INPUT_ZFORCE_TS_H + +struct zforce_ts_platdata { + int gpio_int; + int gpio_rst; + + unsigned int x_max; + unsigned int y_max; +}; + +#endif /* _LINUX_INPUT_ZFORCE_TS_H */ diff --git a/include/linux/power/bq24735-charger.h b/include/linux/power/bq24735-charger.h new file mode 100644 index 000000000000..f536164a6069 --- /dev/null +++ b/include/linux/power/bq24735-charger.h @@ -0,0 +1,39 @@ +/* + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __CHARGER_BQ24735_H_ +#define __CHARGER_BQ24735_H_ + +#include <linux/types.h> +#include <linux/power_supply.h> + +struct bq24735_platform { + uint32_t charge_current; + uint32_t charge_voltage; + uint32_t input_current; + + const char *name; + + int status_gpio; + int status_gpio_active_low; + bool status_gpio_valid; + + char **supplied_to; + size_t num_supplicants; +}; + +#endif /* __CHARGER_BQ24735_H_ */ diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h index 931bc616219f..d169820203dd 100644 --- a/include/linux/preempt_mask.h +++ b/include/linux/preempt_mask.h @@ -11,36 +11,23 @@ * - bits 0-7 are the preemption count (max preemption depth: 256) * - bits 8-15 are the softirq count (max # of softirqs: 256) * - * The hardirq count can in theory reach the same as NR_IRQS. - * In reality, the number of nested IRQS is limited to the stack - * size as well. For archs with over 1000 IRQS it is not practical - * to expect that they will all nest. We give a max of 10 bits for - * hardirq nesting. An arch may choose to give less than 10 bits. - * m68k expects it to be 8. + * The hardirq count could in theory be the same as the number of + * interrupts in the system, but we run all interrupt handlers with + * interrupts disabled, so we cannot have nesting interrupts. Though + * there are a few palaeontologic drivers which reenable interrupts in + * the handler, so we need more than one bit here. * - * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) - * - bit 26 is the NMI_MASK - * - bit 27 is the PREEMPT_ACTIVE flag - * - * PREEMPT_MASK: 0x000000ff - * SOFTIRQ_MASK: 0x0000ff00 - * HARDIRQ_MASK: 0x03ff0000 - * NMI_MASK: 0x04000000 + * PREEMPT_MASK: 0x000000ff + * SOFTIRQ_MASK: 0x0000ff00 + * HARDIRQ_MASK: 0x000f0000 + * NMI_MASK: 0x00100000 + * PREEMPT_ACTIVE: 0x00200000 */ #define PREEMPT_BITS 8 #define SOFTIRQ_BITS 8 +#define HARDIRQ_BITS 4 #define NMI_BITS 1 -#define MAX_HARDIRQ_BITS 10 - -#ifndef HARDIRQ_BITS -# define HARDIRQ_BITS MAX_HARDIRQ_BITS -#endif - -#if HARDIRQ_BITS > MAX_HARDIRQ_BITS -#error HARDIRQ_BITS too high! -#endif - #define PREEMPT_SHIFT 0 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) @@ -60,15 +47,9 @@ #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -#ifndef PREEMPT_ACTIVE #define PREEMPT_ACTIVE_BITS 1 #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) -#endif - -#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) -#error PREEMPT_ACTIVE is too low! -#endif #define hardirq_count() (preempt_count() & HARDIRQ_MASK) #define softirq_count() (preempt_count() & SOFTIRQ_MASK) diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h index 56f4a866539a..2de2e275b2cb 100644 --- a/include/linux/pwm_backlight.h +++ b/include/linux/pwm_backlight.h @@ -6,6 +6,9 @@ #include <linux/backlight.h> +/* TODO: convert to gpiod_*() API once it has been merged */ +#define PWM_BACKLIGHT_GPIO_ACTIVE_LOW (1 << 0) + struct platform_pwm_backlight_data { int pwm_id; unsigned int max_brightness; @@ -13,6 +16,8 @@ struct platform_pwm_backlight_data { unsigned int lth_brightness; unsigned int pwm_period_ns; unsigned int *levels; + int enable_gpio; + unsigned long enable_gpio_flags; int (*init)(struct device *dev); int (*notify)(struct device *dev, int brightness); void (*notify_after)(struct device *dev, int brightness); diff --git a/include/linux/sched.h b/include/linux/sched.h index f7efc8604652..7e35d4b9e14a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -22,7 +22,7 @@ struct sched_param { #include <linux/errno.h> #include <linux/nodemask.h> #include <linux/mm_types.h> -#include <linux/preempt.h> +#include <linux/preempt_mask.h> #include <asm/page.h> #include <asm/ptrace.h> @@ -286,6 +286,14 @@ static inline void lockup_detector_init(void) } #endif +#ifdef CONFIG_DETECT_HUNG_TASK +void reset_hung_task_detector(void); +#else +static inline void reset_hung_task_detector(void) +{ +} +#endif + /* Attach to any functions which should be ignored in wchan output. */ #define __sched __attribute__((__section__(".sched.text"))) diff --git a/include/linux/security.h b/include/linux/security.h index 9d37e2b9d3ec..5623a7f965b7 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1052,17 +1052,25 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @xfrm_policy_delete_security: * @ctx contains the xfrm_sec_ctx. * Authorize deletion of xp->security. - * @xfrm_state_alloc_security: + * @xfrm_state_alloc: * @x contains the xfrm_state being added to the Security Association * Database by the XFRM system. * @sec_ctx contains the security context information being provided by * the user-level SA generation program (e.g., setkey or racoon). - * @secid contains the secid from which to take the mls portion of the context. * Allocate a security structure to the x->security field; the security * field is initialized to NULL when the xfrm_state is allocated. Set the - * context to correspond to either sec_ctx or polsec, with the mls portion - * taken from secid in the latter case. - * Return 0 if operation was successful (memory to allocate, legal context). + * context to correspond to sec_ctx. Return 0 if operation was successful + * (memory to allocate, legal context). + * @xfrm_state_alloc_acquire: + * @x contains the xfrm_state being added to the Security Association + * Database by the XFRM system. + * @polsec contains the policy's security context. + * @secid contains the secid from which to take the mls portion of the + * context. + * Allocate a security structure to the x->security field; the security + * field is initialized to NULL when the xfrm_state is allocated. Set the + * context to correspond to secid. Return 0 if operation was successful + * (memory to allocate, legal context). * @xfrm_state_free_security: * @x contains the xfrm_state. * Deallocate x->security. @@ -1679,9 +1687,11 @@ struct security_operations { int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); - int (*xfrm_state_alloc_security) (struct xfrm_state *x, - struct xfrm_user_sec_ctx *sec_ctx, - u32 secid); + int (*xfrm_state_alloc) (struct xfrm_state *x, + struct xfrm_user_sec_ctx *sec_ctx); + int (*xfrm_state_alloc_acquire) (struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, + u32 secid); void (*xfrm_state_free_security) (struct xfrm_state *x); int (*xfrm_state_delete_security) (struct xfrm_state *x); int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 4e32edc8f506..52e0097f61f0 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -20,6 +20,7 @@ struct seq_file { size_t size; size_t from; size_t count; + size_t pad_until; loff_t index; loff_t read_pos; u64 version; @@ -79,6 +80,20 @@ static inline void seq_commit(struct seq_file *m, int num) } } +/** + * seq_setwidth - set padding width + * @m: the seq_file handle + * @size: the max number of bytes to pad. + * + * Call seq_setwidth() for setting max width, then call seq_printf() etc. and + * finally call seq_pad() to pad the remaining bytes. + */ +static inline void seq_setwidth(struct seq_file *m, size_t size) +{ + m->pad_until = m->count + size; +} +void seq_pad(struct seq_file *m, char c); + char *mangle_path(char *s, const char *p, const char *esc); int seq_open(struct file *, const struct seq_operations *); ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 1e8a8b6e837d..cf87a24c0f92 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -354,6 +354,35 @@ static inline void read_sequnlock_excl(seqlock_t *sl) spin_unlock(&sl->lock); } +/** + * read_seqbegin_or_lock - begin a sequence number check or locking block + * @lock: sequence lock + * @seq : sequence number to be checked + * + * First try it once optimistically without taking the lock. If that fails, + * take the lock. The sequence number is also used as a marker for deciding + * whether to be a reader (even) or writer (odd). + * N.B. seq must be initialized to an even number to begin with. + */ +static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) +{ + if (!(*seq & 1)) /* Even */ + *seq = read_seqbegin(lock); + else /* Odd */ + read_seqlock_excl(lock); +} + +static inline int need_seqretry(seqlock_t *lock, int seq) +{ + return !(seq & 1) && read_seqretry(lock, seq); +} + +static inline void done_seqretry(seqlock_t *lock, int seq) +{ + if (seq & 1) + read_sequnlock_excl(lock); +} + static inline void read_seqlock_excl_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 215b5ea1cb30..bec1cc7d5e3c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2263,24 +2263,6 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); -/** - * pskb_trim_rcsum - trim received skb and update checksum - * @skb: buffer to trim - * @len: new length - * - * This is exactly the same as pskb_trim except that it ensures the - * checksum of received packets are still valid after the operation. - */ - -static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) -{ - if (likely(len >= skb->len)) - return 0; - if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->ip_summed = CHECKSUM_NONE; - return __pskb_trim(skb, len); -} - #define skb_queue_walk(queue, skb) \ for (skb = (queue)->next; \ skb != (struct sk_buff *)(queue); \ @@ -2378,6 +2360,27 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); +/** + * pskb_trim_rcsum - trim received skb and update checksum + * @skb: buffer to trim + * @len: new length + * + * This is exactly the same as pskb_trim except that it ensures the + * checksum of received packets are still valid after the operation. + */ + +static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (likely(len >= skb->len)) + return 0; + if (skb->ip_summed == CHECKSUM_COMPLETE) { + __wsum adj = skb_checksum(skb, len, skb->len - len, 0); + + skb->csum = csum_sub(skb->csum, adj); + } + return __pskb_trim(skb, len); +} + static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) { diff --git a/include/linux/slab.h b/include/linux/slab.h index 74f105847d13..c2bba248fa63 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -53,7 +53,14 @@ * } * rcu_read_unlock(); * - * See also the comment on struct slab_rcu in mm/slab.c. + * This is useful if we need to approach a kernel structure obliquely, + * from its address obtained without the usual locking. We can lock + * the structure to stabilize it and check it's still at the given address, + * only if we can be sure that the memory has not been meanwhile reused + * for some other kind of object (which our subsystem's lock might corrupt). + * + * rcu_read_lock before reading the address, then rcu_read_unlock after + * taking the spinlock within the structure expected at that address. */ #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index e9346b4f1ef4..09bfffb08a56 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -27,8 +27,8 @@ struct kmem_cache { size_t colour; /* cache colouring range */ unsigned int colour_off; /* colour offset */ - struct kmem_cache *slabp_cache; - unsigned int slab_size; + struct kmem_cache *freelist_cache; + unsigned int freelist_size; /* constructor func */ void (*ctor)(void *obj); diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index cc0b67eada42..f56bfa9e4526 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -11,7 +11,7 @@ enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ - FREE_FASTPATH, /* Free to cpu slub */ + FREE_FASTPATH, /* Free to cpu slab */ FREE_SLOWPATH, /* Freeing not to cpu slab */ FREE_FROZEN, /* Freeing to frozen slab */ FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ diff --git a/include/linux/smp.h b/include/linux/smp.h index 731f5237d5f4..5da22ee42e16 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -49,6 +49,9 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags); +void __smp_call_function_single(int cpuid, struct call_single_data *data, + int wait); + #ifdef CONFIG_SMP #include <linux/preempt.h> @@ -95,9 +98,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait); void smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait); -void __smp_call_function_single(int cpuid, struct call_single_data *data, - int wait); - int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait); @@ -106,14 +106,10 @@ void kick_all_cpus_sync(void); /* * Generic and arch helpers */ -#ifdef CONFIG_USE_GENERIC_SMP_HELPERS void __init call_function_init(void); void generic_smp_call_function_single_interrupt(void); #define generic_smp_call_function_interrupt \ generic_smp_call_function_single_interrupt -#else -static inline void call_function_init(void) { } -#endif /* * Mark the boot cpu "online" so that it can call console drivers in @@ -155,12 +151,6 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, static inline void kick_all_cpus_sync(void) { } -static inline void __smp_call_function_single(int cpuid, - struct call_single_data *data, int wait) -{ - on_each_cpu(data->func, data->info, wait); -} - #endif /* !SMP */ /* diff --git a/include/linux/srcu.h b/include/linux/srcu.h index c114614ed172..9b058eecd403 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -237,4 +237,18 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) __srcu_read_unlock(sp, idx); } +/** + * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock + * + * Converts the preceding srcu_read_unlock into a two-way memory barrier. + * + * Call this after srcu_read_unlock, to guarantee that all memory operations + * that occur after smp_mb__after_srcu_read_unlock will appear to happen after + * the preceding srcu_read_unlock. + */ +static inline void smp_mb__after_srcu_read_unlock(void) +{ + /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */ +} + #endif diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 8d4fa82bfb91..c0f75261a728 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -139,7 +139,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry) extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address); -extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte); +extern void migration_entry_wait_huge(struct vm_area_struct *vma, + struct mm_struct *mm, pte_t *pte); #else #define make_migration_entry(page, write) swp_entry(0, 0) @@ -151,8 +152,8 @@ static inline int is_migration_entry(swp_entry_t swp) static inline void make_migration_entry_read(swp_entry_t *entryp) { } static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } -static inline void migration_entry_wait_huge(struct mm_struct *mm, - pte_t *pte) { } +static inline void migration_entry_wait_huge(struct vm_area_struct *vma, + struct mm_struct *mm, pte_t *pte) { } static inline int is_write_migration_entry(swp_entry_t entry) { return 0; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index c27f846f6b71..94273bbe6050 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -120,7 +120,7 @@ extern struct trace_event_functions exit_syscall_print_funcs; .class = &event_class_syscall_enter, \ .event.funcs = &enter_syscall_print_funcs, \ .data = (void *)&__syscall_meta_##sname,\ - .flags = TRACE_EVENT_FL_CAP_ANY, \ + .flags = TRACE_EVENT_FL_CAP_ANY, \ }; \ static struct ftrace_event_call __used \ __attribute__((section("_ftrace_events"))) \ @@ -134,7 +134,7 @@ extern struct trace_event_functions exit_syscall_print_funcs; .class = &event_class_syscall_exit, \ .event.funcs = &exit_syscall_print_funcs, \ .data = (void *)&__syscall_meta_##sname,\ - .flags = TRACE_EVENT_FL_CAP_ANY, \ + .flags = TRACE_EVENT_FL_CAP_ANY, \ }; \ static struct ftrace_event_call __used \ __attribute__((section("_ftrace_events"))) \ diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 4db29859464f..4836ba3c1cd8 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -27,6 +27,12 @@ struct user_namespace { kuid_t owner; kgid_t group; unsigned int proc_inum; + + /* Register of per-UID persistent keyrings for this namespace */ +#ifdef CONFIG_PERSISTENT_KEYRINGS + struct key *persistent_keyring_register; + struct rw_semaphore persistent_keyring_register_sem; +#endif }; extern struct user_namespace init_user_ns; diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 36d36cc89329..e4abb84199be 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -51,11 +51,11 @@ int virtqueue_add_sgs(struct virtqueue *vq, void *data, gfp_t gfp); -void virtqueue_kick(struct virtqueue *vq); +bool virtqueue_kick(struct virtqueue *vq); bool virtqueue_kick_prepare(struct virtqueue *vq); -void virtqueue_notify(struct virtqueue *vq); +bool virtqueue_notify(struct virtqueue *vq); void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); @@ -73,6 +73,8 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq); unsigned int virtqueue_get_vring_size(struct virtqueue *vq); +bool virtqueue_is_broken(struct virtqueue *vq); + /** * virtio_device - representation of a device using virtio * @index: unique position on the virtio bus diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 29b9104232b4..e8f8f71e843c 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -96,33 +96,6 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, return test_bit(fbit, vdev->features); } -/** - * virtio_config_val - look for a feature and get a virtio config entry. - * @vdev: the virtio device - * @fbit: the feature bit - * @offset: the type to search for. - * @v: a pointer to the value to fill in. - * - * The return value is -ENOENT if the feature doesn't exist. Otherwise - * the config value is copied into whatever is pointed to by v. */ -#define virtio_config_val(vdev, fbit, offset, v) \ - virtio_config_buf((vdev), (fbit), (offset), (v), sizeof(*v)) - -#define virtio_config_val_len(vdev, fbit, offset, v, len) \ - virtio_config_buf((vdev), (fbit), (offset), (v), (len)) - -static inline int virtio_config_buf(struct virtio_device *vdev, - unsigned int fbit, - unsigned int offset, - void *buf, unsigned len) -{ - if (!virtio_has_feature(vdev, fbit)) - return -ENOENT; - - vdev->config->get(vdev, offset, buf, len); - return 0; -} - static inline struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, vq_callback_t *c, const char *n) @@ -162,5 +135,139 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu) return 0; } +/* Config space accessors. */ +#define virtio_cread(vdev, structname, member, ptr) \ + do { \ + /* Must match the member's type, and be integer */ \ + if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ + (*ptr) = 1; \ + \ + switch (sizeof(*ptr)) { \ + case 1: \ + *(ptr) = virtio_cread8(vdev, \ + offsetof(structname, member)); \ + break; \ + case 2: \ + *(ptr) = virtio_cread16(vdev, \ + offsetof(structname, member)); \ + break; \ + case 4: \ + *(ptr) = virtio_cread32(vdev, \ + offsetof(structname, member)); \ + break; \ + case 8: \ + *(ptr) = virtio_cread64(vdev, \ + offsetof(structname, member)); \ + break; \ + default: \ + BUG(); \ + } \ + } while(0) + +/* Config space accessors. */ +#define virtio_cwrite(vdev, structname, member, ptr) \ + do { \ + /* Must match the member's type, and be integer */ \ + if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ + BUG_ON((*ptr) == 1); \ + \ + switch (sizeof(*ptr)) { \ + case 1: \ + virtio_cwrite8(vdev, \ + offsetof(structname, member), \ + *(ptr)); \ + break; \ + case 2: \ + virtio_cwrite16(vdev, \ + offsetof(structname, member), \ + *(ptr)); \ + break; \ + case 4: \ + virtio_cwrite32(vdev, \ + offsetof(structname, member), \ + *(ptr)); \ + break; \ + case 8: \ + virtio_cwrite64(vdev, \ + offsetof(structname, member), \ + *(ptr)); \ + break; \ + default: \ + BUG(); \ + } \ + } while(0) + +static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset) +{ + u8 ret; + vdev->config->get(vdev, offset, &ret, sizeof(ret)); + return ret; +} + +static inline void virtio_cread_bytes(struct virtio_device *vdev, + unsigned int offset, + void *buf, size_t len) +{ + vdev->config->get(vdev, offset, buf, len); +} + +static inline void virtio_cwrite8(struct virtio_device *vdev, + unsigned int offset, u8 val) +{ + vdev->config->set(vdev, offset, &val, sizeof(val)); +} + +static inline u16 virtio_cread16(struct virtio_device *vdev, + unsigned int offset) +{ + u16 ret; + vdev->config->get(vdev, offset, &ret, sizeof(ret)); + return ret; +} + +static inline void virtio_cwrite16(struct virtio_device *vdev, + unsigned int offset, u16 val) +{ + vdev->config->set(vdev, offset, &val, sizeof(val)); +} + +static inline u32 virtio_cread32(struct virtio_device *vdev, + unsigned int offset) +{ + u32 ret; + vdev->config->get(vdev, offset, &ret, sizeof(ret)); + return ret; +} + +static inline void virtio_cwrite32(struct virtio_device *vdev, + unsigned int offset, u32 val) +{ + vdev->config->set(vdev, offset, &val, sizeof(val)); +} + +static inline u64 virtio_cread64(struct virtio_device *vdev, + unsigned int offset) +{ + u64 ret; + vdev->config->get(vdev, offset, &ret, sizeof(ret)); + return ret; +} + +static inline void virtio_cwrite64(struct virtio_device *vdev, + unsigned int offset, u64 val) +{ + vdev->config->set(vdev, offset, &val, sizeof(val)); +} + +/* Conditional config space accessors. */ +#define virtio_cread_feature(vdev, fbit, structname, member, ptr) \ + ({ \ + int _r = 0; \ + if (!virtio_has_feature(vdev, fbit)) \ + _r = -ENOENT; \ + else \ + virtio_cread((vdev), structname, member, ptr); \ + _r; \ + }) #endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index b300787af8e0..67e06fe18c03 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -71,7 +71,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index, struct virtio_device *vdev, bool weak_barriers, void *pages, - void (*notify)(struct virtqueue *vq), + bool (*notify)(struct virtqueue *vq), void (*callback)(struct virtqueue *vq), const char *name); void vring_del_virtqueue(struct virtqueue *vq); diff --git a/include/linux/wait.h b/include/linux/wait.h index 61939ba30aa0..eaa00b10abaa 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -278,6 +278,31 @@ do { \ __ret; \ }) +#define __wait_event_cmd(wq, condition, cmd1, cmd2) \ + (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ + cmd1; schedule(); cmd2) + +/** + * wait_event_cmd - sleep until a condition gets true + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * cmd1: the command will be executed before sleep + * cmd2: the command will be executed after sleep + * + * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the + * @condition evaluates to true. The @condition is checked each time + * the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + */ +#define wait_event_cmd(wq, condition, cmd1, cmd2) \ +do { \ + if (condition) \ + break; \ + __wait_event_cmd(wq, condition, cmd1, cmd2); \ +} while (0) + #define __wait_event_interruptible(wq, condition) \ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ schedule()) |