diff options
Diffstat (limited to 'include')
437 files changed, 13225 insertions, 3053 deletions
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h index 88cb477524a6..d5ec6c87810f 100644 --- a/include/acpi/acbuffer.h +++ b/include/acpi/acbuffer.h @@ -111,7 +111,9 @@ struct acpi_gtm_info { struct acpi_pld_info { u8 revision; u8 ignore_color; - u32 color; + u8 red; + u8 green; + u8 blue; u16 width; u16 height; u8 user_visible; @@ -155,8 +157,14 @@ struct acpi_pld_info { #define ACPI_PLD_GET_IGNORE_COLOR(dword) ACPI_GET_BITS (dword, 7, ACPI_1BIT_MASK) #define ACPI_PLD_SET_IGNORE_COLOR(dword,value) ACPI_SET_BITS (dword, 7, ACPI_1BIT_MASK, value) /* Offset 7, Len 1 */ -#define ACPI_PLD_GET_COLOR(dword) ACPI_GET_BITS (dword, 8, ACPI_24BIT_MASK) -#define ACPI_PLD_SET_COLOR(dword,value) ACPI_SET_BITS (dword, 8, ACPI_24BIT_MASK, value) /* Offset 8, Len 24 */ +#define ACPI_PLD_GET_RED(dword) ACPI_GET_BITS (dword, 8, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_RED(dword,value) ACPI_SET_BITS (dword, 8, ACPI_8BIT_MASK, value) /* Offset 8, Len 8 */ + +#define ACPI_PLD_GET_GREEN(dword) ACPI_GET_BITS (dword, 16, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_GREEN(dword,value) ACPI_SET_BITS (dword, 16, ACPI_8BIT_MASK, value) /* Offset 16, Len 8 */ + +#define ACPI_PLD_GET_BLUE(dword) ACPI_GET_BITS (dword, 24, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_BLUE(dword,value) ACPI_SET_BITS (dword, 24, ACPI_8BIT_MASK, value) /* Offset 24, Len 8 */ /* Second 32-bit dword, bits 33:63 */ diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index f34a0835aa4f..61e32ec1fc4d 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -27,6 +27,7 @@ #define __ACPI_BUS_H__ #include <linux/device.h> +#include <linux/property.h> /* TBD: Make dynamic */ #define ACPI_MAX_HANDLES 10 @@ -312,6 +313,7 @@ struct acpi_device_wakeup_flags { u8 valid:1; /* Can successfully enable wakeup? */ u8 run_wake:1; /* Run-Wake GPE devices */ u8 notifier_present:1; /* Wake-up notify handler has been installed */ + u8 enabled:1; /* Enabled for wakeup */ }; struct acpi_device_wakeup_context { @@ -337,10 +339,20 @@ struct acpi_device_physical_node { bool put_online:1; }; +/* ACPI Device Specific Data (_DSD) */ +struct acpi_device_data { + const union acpi_object *pointer; + const union acpi_object *properties; + const union acpi_object *of_compatible; +}; + +struct acpi_gpio_mapping; + /* Device */ struct acpi_device { int device_type; acpi_handle handle; /* no handle for fixed hardware */ + struct fwnode_handle fwnode; struct acpi_device *parent; struct list_head children; struct list_head node; @@ -353,17 +365,35 @@ struct acpi_device { struct acpi_device_wakeup wakeup; struct acpi_device_perf performance; struct acpi_device_dir dir; + struct acpi_device_data data; struct acpi_scan_handler *handler; struct acpi_hotplug_context *hp; struct acpi_driver *driver; + const struct acpi_gpio_mapping *driver_gpios; void *driver_data; struct device dev; unsigned int physical_node_count; + unsigned int dep_unmet; struct list_head physical_node_list; struct mutex physical_node_lock; void (*remove)(struct acpi_device *); }; +static inline bool is_acpi_node(struct fwnode_handle *fwnode) +{ + return fwnode && fwnode->type == FWNODE_ACPI; +} + +static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode) +{ + return fwnode ? container_of(fwnode, struct acpi_device, fwnode) : NULL; +} + +static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) +{ + return &adev->fwnode; +} + static inline void *acpi_driver_data(struct acpi_device *d) { return d->driver_data; @@ -516,6 +546,7 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, void (*work_func)(struct work_struct *work)); acpi_status acpi_remove_pm_notifier(struct acpi_device *adev); int acpi_pm_device_sleep_state(struct device *, int *, int); +int acpi_pm_device_run_wake(struct device *, bool); #else static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, @@ -535,11 +566,6 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m) return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ? m : ACPI_STATE_D0; } -#endif - -#ifdef CONFIG_PM_RUNTIME -int acpi_pm_device_run_wake(struct device *, bool); -#else static inline int acpi_pm_device_run_wake(struct device *dev, bool enable) { return -ENODEV; diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index ab2acf629a64..5ba78464c1b1 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -46,7 +46,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20140926 +#define ACPI_CA_VERSION 0x20141107 #include <acpi/acconfig.h> #include <acpi/actypes.h> diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 7000e66f768e..bbef17368e49 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -736,6 +736,10 @@ typedef u32 acpi_event_status; #define ACPI_GPE_ENABLE 0 #define ACPI_GPE_DISABLE 1 #define ACPI_GPE_CONDITIONAL_ENABLE 2 +#define ACPI_GPE_SAVE_MASK 4 + +#define ACPI_GPE_ENABLE_SAVE (ACPI_GPE_ENABLE | ACPI_GPE_SAVE_MASK) +#define ACPI_GPE_DISABLE_SAVE (ACPI_GPE_DISABLE | ACPI_GPE_SAVE_MASK) /* * GPE info flags - Per GPE diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 9b9b6f29bbf3..3ca9b751f122 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -67,9 +67,6 @@ struct acpi_processor_cx { }; struct acpi_processor_power { - struct acpi_processor_cx *state; - unsigned long bm_check_timestamp; - u32 default_state; int count; struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER]; int timer_broadcast_on_state; @@ -313,11 +310,13 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) #endif /* CONFIG_CPU_FREQ */ /* in processor_core.c */ -void acpi_processor_set_pdc(acpi_handle handle); int acpi_get_apicid(acpi_handle, int type, u32 acpi_id); int acpi_map_cpuid(int apic_id, u32 acpi_id); int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); +/* in processor_pdc.c */ +void acpi_processor_set_pdc(acpi_handle handle); + /* in processor_throttling.c */ int acpi_processor_tstate_has_changed(struct acpi_processor *pr); int acpi_processor_get_throttling_info(struct acpi_processor *pr); diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 1402fa855388..f5c40b0fadc2 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -42,6 +42,14 @@ #define wmb() mb() #endif +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif + +#ifndef dma_wmb +#define dma_wmb() wmb() +#endif + #ifndef read_barrier_depends #define read_barrier_depends() do { } while (0) #endif diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h deleted file mode 100644 index b6312843dbd9..000000000000 --- a/include/asm-generic/hash.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ASM_GENERIC_HASH_H -#define __ASM_GENERIC_HASH_H - -struct fast_hash_ops; -static inline void setup_arch_fast_hash(struct fast_hash_ops *ops) -{ -} - -#endif /* __ASM_GENERIC_HASH_H */ diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 752e30d63904..177d5973b132 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -103,6 +103,17 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif +#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp, + int full) +{ + return pmdp_get_and_clear(mm, address, pmdp); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index aa70cbda327c..bee5d683074d 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -164,6 +164,7 @@ #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) +#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) #define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon) @@ -497,6 +498,7 @@ CLK_OF_TABLES() \ RESERVEDMEM_OF_TABLES() \ CLKSRC_OF_TABLES() \ + IOMMU_OF_TABLES() \ CPU_METHOD_OF_TABLES() \ KERNEL_DTB() \ IRQCHIP_OF_MATCH_TABLE() \ diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 74b13ec1ebd4..98abda9ed3aa 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -17,6 +17,32 @@ struct crypto_ahash; +/** + * DOC: Message Digest Algorithm Definitions + * + * These data structures define modular message digest algorithm + * implementations, managed via crypto_register_ahash(), + * crypto_register_shash(), crypto_unregister_ahash() and + * crypto_unregister_shash(). + */ + +/** + * struct hash_alg_common - define properties of message digest + * @digestsize: Size of the result of the transformation. A buffer of this size + * must be available to the @final and @finup calls, so they can + * store the resulting hash into it. For various predefined sizes, + * search include/crypto/ using + * git grep _DIGEST_SIZE include/crypto. + * @statesize: Size of the block for partial state of the transformation. A + * buffer of this size must be passed to the @export function as it + * will save the partial state of the transformation into it. On the + * other side, the @import function will load the state from a + * buffer of this size as well. + * @base: Start of data structure of cipher algorithm. The common data + * structure of crypto_alg contains information common to all ciphers. + * The hash_alg_common data structure now adds the hash-specific + * information. + */ struct hash_alg_common { unsigned int digestsize; unsigned int statesize; @@ -37,6 +63,63 @@ struct ahash_request { void *__ctx[] CRYPTO_MINALIGN_ATTR; }; +/** + * struct ahash_alg - asynchronous message digest definition + * @init: Initialize the transformation context. Intended only to initialize the + * state of the HASH transformation at the begining. This shall fill in + * the internal structures used during the entire duration of the whole + * transformation. No data processing happens at this point. + * @update: Push a chunk of data into the driver for transformation. This + * function actually pushes blocks of data from upper layers into the + * driver, which then passes those to the hardware as seen fit. This + * function must not finalize the HASH transformation by calculating the + * final message digest as this only adds more data into the + * transformation. This function shall not modify the transformation + * context, as this function may be called in parallel with the same + * transformation object. Data processing can happen synchronously + * [SHASH] or asynchronously [AHASH] at this point. + * @final: Retrieve result from the driver. This function finalizes the + * transformation and retrieves the resulting hash from the driver and + * pushes it back to upper layers. No data processing happens at this + * point. + * @finup: Combination of @update and @final. This function is effectively a + * combination of @update and @final calls issued in sequence. As some + * hardware cannot do @update and @final separately, this callback was + * added to allow such hardware to be used at least by IPsec. Data + * processing can happen synchronously [SHASH] or asynchronously [AHASH] + * at this point. + * @digest: Combination of @init and @update and @final. This function + * effectively behaves as the entire chain of operations, @init, + * @update and @final issued in sequence. Just like @finup, this was + * added for hardware which cannot do even the @finup, but can only do + * the whole transformation in one run. Data processing can happen + * synchronously [SHASH] or asynchronously [AHASH] at this point. + * @setkey: Set optional key used by the hashing algorithm. Intended to push + * optional key used by the hashing algorithm from upper layers into + * the driver. This function can store the key in the transformation + * context or can outright program it into the hardware. In the former + * case, one must be careful to program the key into the hardware at + * appropriate time and one must be careful that .setkey() can be + * called multiple times during the existence of the transformation + * object. Not all hashing algorithms do implement this function as it + * is only needed for keyed message digests. SHAx/MDx/CRCx do NOT + * implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement + * this function. This function must be called before any other of the + * @init, @update, @final, @finup, @digest is called. No data + * processing happens at this point. + * @export: Export partial state of the transformation. This function dumps the + * entire state of the ongoing transformation into a provided block of + * data so it can be @import 'ed back later on. This is useful in case + * you want to save partial result of the transformation after + * processing certain amount of data and reload this partial result + * multiple times later on for multiple re-use. No data processing + * happens at this point. + * @import: Import partial state of the transformation. This function loads the + * entire state of the ongoing transformation from a provided block of + * data so the transformation can continue from this point onward. No + * data processing happens at this point. + * @halg: see struct hash_alg_common + */ struct ahash_alg { int (*init)(struct ahash_request *req); int (*update)(struct ahash_request *req); @@ -63,6 +146,23 @@ struct shash_desc { crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \ struct shash_desc *shash = (struct shash_desc *)__##shash##_desc +/** + * struct shash_alg - synchronous message digest definition + * @init: see struct ahash_alg + * @update: see struct ahash_alg + * @final: see struct ahash_alg + * @finup: see struct ahash_alg + * @digest: see struct ahash_alg + * @export: see struct ahash_alg + * @import: see struct ahash_alg + * @setkey: see struct ahash_alg + * @digestsize: see struct ahash_alg + * @statesize: see struct ahash_alg + * @descsize: Size of the operational state for the message digest. This state + * size is the memory size that needs to be allocated for + * shash_desc.__ctx + * @base: internally used + */ struct shash_alg { int (*init)(struct shash_desc *desc); int (*update)(struct shash_desc *desc, const u8 *data, @@ -107,11 +207,35 @@ struct crypto_shash { struct crypto_tfm base; }; +/** + * DOC: Asynchronous Message Digest API + * + * The asynchronous message digest API is used with the ciphers of type + * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto) + * + * The asynchronous cipher operation discussion provided for the + * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well. + */ + static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) { return container_of(tfm, struct crypto_ahash, base); } +/** + * crypto_alloc_ahash() - allocate ahash cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ahash cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an ahash. The returned struct + * crypto_ahash is the cipher handle that is required for any subsequent + * API invocation for that ahash. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, u32 mask); @@ -120,6 +244,10 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) return &tfm->base; } +/** + * crypto_free_ahash() - zeroize and free the ahash handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_ahash(struct crypto_ahash *tfm) { crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); @@ -143,6 +271,16 @@ static inline struct hash_alg_common *crypto_hash_alg_common( return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg); } +/** + * crypto_ahash_digestsize() - obtain message digest size + * @tfm: cipher handle + * + * The size for the message digest created by the message digest cipher + * referenced with the cipher handle is returned. + * + * + * Return: message digest size of cipher + */ static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) { return crypto_hash_alg_common(tfm)->digestsize; @@ -168,12 +306,32 @@ static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags) crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags); } +/** + * crypto_ahash_reqtfm() - obtain cipher handle from request + * @req: asynchronous request handle that contains the reference to the ahash + * cipher handle + * + * Return the ahash cipher handle that is registered with the asynchronous + * request handle ahash_request. + * + * Return: ahash cipher handle + */ static inline struct crypto_ahash *crypto_ahash_reqtfm( struct ahash_request *req) { return __crypto_ahash_cast(req->base.tfm); } +/** + * crypto_ahash_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return the size of the ahash state size. With the crypto_ahash_export + * function, the caller can export the state into a buffer whose size is + * defined with this function. + * + * Return: size of the ahash state + */ static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) { return tfm->reqsize; @@ -184,38 +342,166 @@ static inline void *ahash_request_ctx(struct ahash_request *req) return req->__ctx; } +/** + * crypto_ahash_setkey - set key for cipher handle + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the ahash cipher. The cipher + * handle must point to a keyed hash in order for this function to succeed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen); + +/** + * crypto_ahash_finup() - update and finalize message digest + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * This function is a "short-hand" for the function calls of + * crypto_ahash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate functions. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ int crypto_ahash_finup(struct ahash_request *req); + +/** + * crypto_ahash_final() - calculate message digest + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * Finalize the message digest operation and create the message digest + * based on all data added to the cipher handle. The message digest is placed + * into the output buffer registered with the ahash_request handle. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ int crypto_ahash_final(struct ahash_request *req); + +/** + * crypto_ahash_digest() - calculate message digest for a buffer + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * This function is a "short-hand" for the function calls of crypto_ahash_init, + * crypto_ahash_update and crypto_ahash_final. The parameters have the same + * meaning as discussed for those separate three functions. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ int crypto_ahash_digest(struct ahash_request *req); +/** + * crypto_ahash_export() - extract current message digest state + * @req: reference to the ahash_request handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * This function exports the hash state of the ahash_request handle into the + * caller-allocated output buffer out which must have sufficient size (e.g. by + * calling crypto_ahash_reqsize). + * + * Return: 0 if the export was successful; < 0 if an error occurred + */ static inline int crypto_ahash_export(struct ahash_request *req, void *out) { return crypto_ahash_reqtfm(req)->export(req, out); } +/** + * crypto_ahash_import() - import message digest state + * @req: reference to ahash_request handle the state is imported into + * @in: buffer holding the state + * + * This function imports the hash state into the ahash_request handle from the + * input buffer. That buffer should have been generated with the + * crypto_ahash_export function. + * + * Return: 0 if the import was successful; < 0 if an error occurred + */ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) { return crypto_ahash_reqtfm(req)->import(req, in); } +/** + * crypto_ahash_init() - (re)initialize message digest handle + * @req: ahash_request handle that already is initialized with all necessary + * data using the ahash_request_* API functions + * + * The call (re-)initializes the message digest referenced by the ahash_request + * handle. Any potentially existing state created by previous operations is + * discarded. + * + * Return: 0 if the message digest initialization was successful; < 0 if an + * error occurred + */ static inline int crypto_ahash_init(struct ahash_request *req) { return crypto_ahash_reqtfm(req)->init(req); } +/** + * crypto_ahash_update() - add data to message digest for processing + * @req: ahash_request handle that was previously initialized with the + * crypto_ahash_init call. + * + * Updates the message digest state of the &ahash_request handle. The input data + * is pointed to by the scatter/gather list registered in the &ahash_request + * handle + * + * Return: 0 if the message digest update was successful; < 0 if an error + * occurred + */ static inline int crypto_ahash_update(struct ahash_request *req) { return crypto_ahash_reqtfm(req)->update(req); } +/** + * DOC: Asynchronous Hash Request Handle + * + * The &ahash_request data structure contains all pointers to data + * required for the asynchronous cipher operation. This includes the cipher + * handle (which can be used by multiple &ahash_request instances), pointer + * to plaintext and the message digest output buffer, asynchronous callback + * function, etc. It acts as a handle to the ahash_request_* API calls in a + * similar way as ahash handle to the crypto_ahash_* API calls. + */ + +/** + * ahash_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing ahash handle in the request + * data structure with a different one. + */ static inline void ahash_request_set_tfm(struct ahash_request *req, struct crypto_ahash *tfm) { req->base.tfm = crypto_ahash_tfm(tfm); } +/** + * ahash_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the ahash + * message digest API calls. During + * the allocation, the provided ahash handle + * is registered in the request data structure. + * + * Return: allocated request handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ static inline struct ahash_request *ahash_request_alloc( struct crypto_ahash *tfm, gfp_t gfp) { @@ -230,6 +516,10 @@ static inline struct ahash_request *ahash_request_alloc( return req; } +/** + * ahash_request_free() - zeroize and free the request data structure + * @req: request data structure cipher handle to be freed + */ static inline void ahash_request_free(struct ahash_request *req) { kzfree(req); @@ -241,6 +531,31 @@ static inline struct ahash_request *ahash_request_cast( return container_of(req, struct ahash_request, base); } +/** + * ahash_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * &crypto_async_request data structure provided to the callback function. + * + * This function allows setting the callback function that is triggered once + * the cipher operation completes. + * + * The callback function is registered with the &ahash_request handle and + * must comply with the following template + * + * void callback_function(struct crypto_async_request *req, int error) + */ static inline void ahash_request_set_callback(struct ahash_request *req, u32 flags, crypto_completion_t compl, @@ -251,6 +566,19 @@ static inline void ahash_request_set_callback(struct ahash_request *req, req->base.flags = flags; } +/** + * ahash_request_set_crypt() - set data buffers + * @req: ahash_request handle to be updated + * @src: source scatter/gather list + * @result: buffer that is filled with the message digest -- the caller must + * ensure that the buffer has sufficient space by, for example, calling + * crypto_ahash_digestsize() + * @nbytes: number of bytes to process from the source scatter/gather list + * + * By using this call, the caller references the source scatter/gather list. + * The source scatter/gather list points to the data the message digest is to + * be calculated for. + */ static inline void ahash_request_set_crypt(struct ahash_request *req, struct scatterlist *src, u8 *result, unsigned int nbytes) @@ -260,6 +588,33 @@ static inline void ahash_request_set_crypt(struct ahash_request *req, req->result = result; } +/** + * DOC: Synchronous Message Digest API + * + * The synchronous message digest API is used with the ciphers of type + * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto) + * + * The message digest API is able to maintain state information for the + * caller. + * + * The synchronous message digest API can store user-related context in in its + * shash_desc request data structure. + */ + +/** + * crypto_alloc_shash() - allocate message digest handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a message digest. The returned &struct + * crypto_shash is the cipher handle that is required for any subsequent + * API invocation for that message digest. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, u32 mask); @@ -268,6 +623,10 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) return &tfm->base; } +/** + * crypto_free_shash() - zeroize and free the message digest handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_shash(struct crypto_shash *tfm) { crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm)); @@ -279,6 +638,15 @@ static inline unsigned int crypto_shash_alignmask( return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); } +/** + * crypto_shash_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the message digest cipher referenced with the cipher + * handle is returned. + * + * Return: block size of cipher + */ static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm) { return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm)); @@ -294,6 +662,15 @@ static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm) return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg); } +/** + * crypto_shash_digestsize() - obtain message digest size + * @tfm: cipher handle + * + * The size for the message digest created by the message digest cipher + * referenced with the cipher handle is returned. + * + * Return: digest size of cipher + */ static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm) { return crypto_shash_alg(tfm)->digestsize; @@ -319,6 +696,21 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags) crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags); } +/** + * crypto_shash_descsize() - obtain the operational state size + * @tfm: cipher handle + * + * The size of the operational state the cipher needs during operation is + * returned for the hash referenced with the cipher handle. This size is + * required to calculate the memory requirements to allow the caller allocating + * sufficient memory for operational state. + * + * The operational state is defined with struct shash_desc where the size of + * that data structure is to be calculated as + * sizeof(struct shash_desc) + crypto_shash_descsize(alg) + * + * Return: size of the operational state + */ static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) { return tfm->descsize; @@ -329,29 +721,129 @@ static inline void *shash_desc_ctx(struct shash_desc *desc) return desc->__ctx; } +/** + * crypto_shash_setkey() - set key for message digest + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the keyed message digest cipher. The + * cipher handle must point to a keyed message digest cipher in order for this + * function to succeed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen); + +/** + * crypto_shash_digest() - calculate message digest for buffer + * @desc: see crypto_shash_final() + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This function is a "short-hand" for the function calls of crypto_shash_init, + * crypto_shash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate three functions. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out); +/** + * crypto_shash_export() - extract operational state for message digest + * @desc: reference to the operational state handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * This function exports the hash state of the operational state handle into the + * caller-allocated output buffer out which must have sufficient size (e.g. by + * calling crypto_shash_descsize). + * + * Return: 0 if the export creation was successful; < 0 if an error occurred + */ static inline int crypto_shash_export(struct shash_desc *desc, void *out) { return crypto_shash_alg(desc->tfm)->export(desc, out); } +/** + * crypto_shash_import() - import operational state + * @desc: reference to the operational state handle the state imported into + * @in: buffer holding the state + * + * This function imports the hash state into the operational state handle from + * the input buffer. That buffer should have been generated with the + * crypto_ahash_export function. + * + * Return: 0 if the import was successful; < 0 if an error occurred + */ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) { return crypto_shash_alg(desc->tfm)->import(desc, in); } +/** + * crypto_shash_init() - (re)initialize message digest + * @desc: operational state handle that is already filled + * + * The call (re-)initializes the message digest referenced by the + * operational state handle. Any potentially existing state created by + * previous operations is discarded. + * + * Return: 0 if the message digest initialization was successful; < 0 if an + * error occurred + */ static inline int crypto_shash_init(struct shash_desc *desc) { return crypto_shash_alg(desc->tfm)->init(desc); } +/** + * crypto_shash_update() - add data to message digest for processing + * @desc: operational state handle that is already initialized + * @data: input data to be added to the message digest + * @len: length of the input data + * + * Updates the message digest state of the operational state handle. + * + * Return: 0 if the message digest update was successful; < 0 if an error + * occurred + */ int crypto_shash_update(struct shash_desc *desc, const u8 *data, unsigned int len); + +/** + * crypto_shash_final() - calculate message digest + * @desc: operational state handle that is already filled with data + * @out: output buffer filled with the message digest + * + * Finalize the message digest operation and create the message digest + * based on all data added to the cipher handle. The message digest is placed + * into the output buffer. The caller must ensure that the output buffer is + * large enough by using crypto_shash_digestsize. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ int crypto_shash_final(struct shash_desc *desc, u8 *out); + +/** + * crypto_shash_finup() - calculate message digest of buffer + * @desc: see crypto_shash_final() + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This function is a "short-hand" for the function calls of + * crypto_shash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate functions. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ int crypto_shash_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out); diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index d61c11170213..cd62bf4289e9 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -42,6 +42,7 @@ struct af_alg_completion { struct af_alg_control { struct af_alg_iv *iv; int op; + unsigned int aead_assoclen; }; struct af_alg_type { diff --git a/include/crypto/rng.h b/include/crypto/rng.h index c93f9b917925..a16fb10142bf 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -20,11 +20,38 @@ extern struct crypto_rng *crypto_default_rng; int crypto_get_default_rng(void); void crypto_put_default_rng(void); +/** + * DOC: Random number generator API + * + * The random number generator API is used with the ciphers of type + * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto) + */ + static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) { return (struct crypto_rng *)tfm; } +/** + * crypto_alloc_rng() -- allocate RNG handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a random number generator. The returned struct + * crypto_rng is the cipher handle that is required for any subsequent + * API invocation for that random number generator. + * + * For all random number generators, this call creates a new private copy of + * the random number generator that does not share a state with other + * instances. The only exception is the "krng" random number generator which + * is a kernel crypto API use case for the get_random_bytes() function of the + * /dev/random driver. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask) { @@ -40,6 +67,14 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm) return &tfm->base; } +/** + * crypto_rng_alg - obtain name of RNG + * @tfm: cipher handle + * + * Return the generic name (cra_name) of the initialized random number generator + * + * Return: generic name string + */ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) { return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng; @@ -50,23 +85,68 @@ static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm) return &crypto_rng_tfm(tfm)->crt_rng; } +/** + * crypto_free_rng() - zeroize and free RNG handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_rng(struct crypto_rng *tfm) { crypto_free_tfm(crypto_rng_tfm(tfm)); } +/** + * crypto_rng_get_bytes() - get random number + * @tfm: cipher handle + * @rdata: output buffer holding the random numbers + * @dlen: length of the output buffer + * + * This function fills the caller-allocated buffer with random numbers using the + * random number generator referenced by the cipher handle. + * + * Return: > 0 function was successful and returns the number of generated + * bytes; < 0 if an error occurred + */ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen) { return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen); } +/** + * crypto_rng_reset() - re-initialize the RNG + * @tfm: cipher handle + * @seed: seed input data + * @slen: length of the seed input data + * + * The reset function completely re-initializes the random number generator + * referenced by the cipher handle by clearing the current state. The new state + * is initialized with the caller provided seed or automatically, depending + * on the random number generator type (the ANSI X9.31 RNG requires + * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding). + * The seed is provided as a parameter to this function call. The provided seed + * should have the length of the seed size defined for the random number + * generator as defined by crypto_rng_seedsize. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ static inline int crypto_rng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) { return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen); } +/** + * crypto_rng_seedsize() - obtain seed size of RNG + * @tfm: cipher handle + * + * The function returns the seed size for the random number generator + * referenced by the cipher handle. This value may be zero if the random + * number generator does not implement or require a reseeding. For example, + * the SP800-90A DRBGs implement an automated reseeding after reaching a + * pre-defined threshold. + * + * Return: seed size for the random number generator + */ static inline int crypto_rng_seedsize(struct crypto_rng *tfm) { return crypto_rng_alg(tfm)->seedsize; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 53ed87698a74..8ba35c622e22 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -125,8 +125,8 @@ struct dma_buf_attachment; extern __printf(2, 3) void drm_ut_debug_printk(const char *function_name, const char *format, ...); -extern __printf(2, 3) -void drm_err(const char *func, const char *format, ...); +extern __printf(1, 2) +void drm_err(const char *format, ...); /***********************************************************************/ /** \name DRM template customization defaults */ @@ -155,7 +155,7 @@ void drm_err(const char *func, const char *format, ...); * \param arg arguments */ #define DRM_ERROR(fmt, ...) \ - drm_err(__func__, fmt, ##__VA_ARGS__) + drm_err(fmt, ##__VA_ARGS__) /** * Rate limited error output. Like DRM_ERROR() but won't flood the log. @@ -170,7 +170,7 @@ void drm_err(const char *func, const char *format, ...); DEFAULT_RATELIMIT_BURST); \ \ if (__ratelimit(&_rs)) \ - drm_err(__func__, fmt, ##__VA_ARGS__); \ + drm_err(fmt, ##__VA_ARGS__); \ }) #define DRM_INFO(fmt, ...) \ @@ -809,7 +809,7 @@ struct drm_device { struct drm_local_map *agp_buffer_map; unsigned int agp_buffer_token; - struct drm_mode_config mode_config; /**< Current mode config */ + struct drm_mode_config mode_config; /**< Current mode config */ /** \name GEM information */ /*@{ */ @@ -986,7 +986,7 @@ extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf); extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, dma_addr_t *addrs, int max_pages); -extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages); +extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages); extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); @@ -1028,10 +1028,25 @@ void drm_pci_agp_destroy(struct drm_device *dev); extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); +#ifdef CONFIG_PCI extern int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver); extern int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master); +#else +static inline int drm_get_pci_dev(struct pci_dev *pdev, + const struct pci_device_id *ent, + struct drm_driver *driver) +{ + return -ENOSYS; +} + +static inline int drm_pci_set_busid(struct drm_device *dev, + struct drm_master *master) +{ + return -ENOSYS; +} +#endif #define DRM_PCIE_SPEED_25 1 #define DRM_PCIE_SPEED_50 2 diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h new file mode 100644 index 000000000000..ad2229574dd9 --- /dev/null +++ b/include/drm/drm_atomic.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + +#ifndef DRM_ATOMIC_H_ +#define DRM_ATOMIC_H_ + +#include <drm/drm_crtc.h> + +struct drm_atomic_state * __must_check +drm_atomic_state_alloc(struct drm_device *dev); +void drm_atomic_state_clear(struct drm_atomic_state *state); +void drm_atomic_state_free(struct drm_atomic_state *state); + +struct drm_crtc_state * __must_check +drm_atomic_get_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc); +struct drm_plane_state * __must_check +drm_atomic_get_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane); +struct drm_connector_state * __must_check +drm_atomic_get_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector); + +int __must_check +drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state, + struct drm_plane *plane, struct drm_crtc *crtc); +void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, + struct drm_framebuffer *fb); +int __must_check +drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, + struct drm_crtc *crtc); +int __must_check +drm_atomic_add_affected_connectors(struct drm_atomic_state *state, + struct drm_crtc *crtc); +int +drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, + struct drm_crtc *crtc); + +void drm_atomic_legacy_backoff(struct drm_atomic_state *state); + +int __must_check drm_atomic_check_only(struct drm_atomic_state *state); +int __must_check drm_atomic_commit(struct drm_atomic_state *state); +int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); + +#endif /* DRM_ATOMIC_H_ */ diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h new file mode 100644 index 000000000000..f956b413311e --- /dev/null +++ b/include/drm/drm_atomic_helper.h @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2014 Red Hat + * Copyright (C) 2014 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + +#ifndef DRM_ATOMIC_HELPER_H_ +#define DRM_ATOMIC_HELPER_H_ + +#include <drm/drm_crtc.h> + +int drm_atomic_helper_check(struct drm_device *dev, + struct drm_atomic_state *state); +int drm_atomic_helper_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool async); + +void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, + struct drm_atomic_state *old_state); + +void drm_atomic_helper_commit_pre_planes(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_commit_post_planes(struct drm_device *dev, + struct drm_atomic_state *old_state); + +int drm_atomic_helper_prepare_planes(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_commit_planes(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_cleanup_planes(struct drm_device *dev, + struct drm_atomic_state *old_state); + +void drm_atomic_helper_swap_state(struct drm_device *dev, + struct drm_atomic_state *state); + +/* implementations for legacy interfaces */ +int drm_atomic_helper_update_plane(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h); +int drm_atomic_helper_disable_plane(struct drm_plane *plane); +int drm_atomic_helper_set_config(struct drm_mode_set *set); + +int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc, + struct drm_property *property, + uint64_t val); +int drm_atomic_helper_plane_set_property(struct drm_plane *plane, + struct drm_property *property, + uint64_t val); +int drm_atomic_helper_connector_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val); +int drm_atomic_helper_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags); + +/* default implementations for state handling */ +void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc); +struct drm_crtc_state * +drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc); +void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state); + +void drm_atomic_helper_plane_reset(struct drm_plane *plane); +struct drm_plane_state * +drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane); +void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state); + +void drm_atomic_helper_connector_reset(struct drm_connector *connector); +struct drm_connector_state * +drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector); +void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, + struct drm_connector_state *state); + +/** + * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC + * @plane: the loop cursor + * @crtc: the crtc whose planes are iterated + * + * This iterates over the current state, useful (for example) when applying + * atomic state after it has been checked and swapped. To iterate over the + * planes which *will* be attached (for ->atomic_check()) see + * drm_crtc_for_each_pending_plane() + */ +#define drm_atomic_crtc_for_each_plane(plane, crtc) \ + drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask) + +/** + * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state + * @plane: the loop cursor + * @crtc_state: the incoming crtc-state + * + * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be + * attached if the specified state is applied. Useful during (for example) + * ->atomic_check() operations, to validate the incoming state + */ +#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \ + drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) + +#endif /* DRM_ATOMIC_HELPER_H_ */ diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index c40070a92d6b..b86329813ad3 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -42,6 +42,7 @@ struct drm_object_properties; struct drm_file; struct drm_clip_rect; struct device_node; +struct fence; #define DRM_MODE_OBJECT_CRTC 0xcccccccc #define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 @@ -136,14 +137,22 @@ struct drm_display_info { u8 cea_rev; }; +/* data corresponds to displayid vend/prod/serial */ +struct drm_tile_group { + struct kref refcount; + struct drm_device *dev; + int id; + u8 group_data[8]; +}; + struct drm_framebuffer_funcs { /* note: use drm_framebuffer_remove() */ void (*destroy)(struct drm_framebuffer *framebuffer); int (*create_handle)(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle); - /** - * Optinal callback for the dirty fb ioctl. + /* + * Optional callback for the dirty fb ioctl. * * Userspace can notify the driver via this callback * that a area of the framebuffer has changed and should @@ -196,7 +205,7 @@ struct drm_framebuffer { struct drm_property_blob { struct drm_mode_object base; struct list_head head; - unsigned int length; + size_t length; unsigned char data[]; }; @@ -215,7 +224,7 @@ struct drm_property { uint64_t *values; struct drm_device *dev; - struct list_head enum_blob_list; + struct list_head enum_list; }; struct drm_crtc; @@ -224,19 +233,65 @@ struct drm_encoder; struct drm_pending_vblank_event; struct drm_plane; struct drm_bridge; +struct drm_atomic_state; /** - * drm_crtc_funcs - control CRTCs for a given device + * struct drm_crtc_state - mutable CRTC state + * @enable: whether the CRTC should be enabled, gates all other state + * @mode_changed: for use by helpers and drivers when computing state updates + * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes + * @last_vblank_count: for helpers and drivers to capture the vblank of the + * update to ensure framebuffer cleanup isn't done too early + * @planes_changed: for use by helpers and drivers when computing state updates + * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings + * @mode: current mode timings + * @event: optional pointer to a DRM event to signal upon completion of the + * state update + * @state: backpointer to global drm_atomic_state + */ +struct drm_crtc_state { + bool enable; + + /* computed state bits used by helpers and drivers */ + bool planes_changed : 1; + bool mode_changed : 1; + + /* attached planes bitmask: + * WARNING: transitional helpers do not maintain plane_mask so + * drivers not converted over to atomic helpers should not rely + * on plane_mask being accurate! + */ + u32 plane_mask; + + /* last_vblank_count: for vblank waits before cleanup */ + u32 last_vblank_count; + + /* adjusted_mode: for use by helpers and drivers */ + struct drm_display_mode adjusted_mode; + + struct drm_display_mode mode; + + struct drm_pending_vblank_event *event; + + struct drm_atomic_state *state; +}; + +/** + * struct drm_crtc_funcs - control CRTCs for a given device * @save: save CRTC state * @restore: restore CRTC state * @reset: reset CRTC after state has been invalidated (e.g. resume) * @cursor_set: setup the cursor + * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set * @cursor_move: move the cursor * @gamma_set: specify color ramp for CRTC * @destroy: deinit and free object * @set_property: called when a property is changed * @set_config: apply a new CRTC configuration * @page_flip: initiate a page flip + * @atomic_duplicate_state: duplicate the atomic state for this CRTC + * @atomic_destroy_state: destroy an atomic state for this CRTC + * @atomic_set_property: set a property on an atomic state for this CRTC * * The drm_crtc_funcs structure is the central CRTC management structure * in the DRM. Each CRTC controls one or more connectors (note that the name @@ -287,16 +342,28 @@ struct drm_crtc_funcs { int (*set_property)(struct drm_crtc *crtc, struct drm_property *property, uint64_t val); + + /* atomic update handling */ + struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc); + void (*atomic_destroy_state)(struct drm_crtc *crtc, + struct drm_crtc_state *state); + int (*atomic_set_property)(struct drm_crtc *crtc, + struct drm_crtc_state *state, + struct drm_property *property, + uint64_t val); }; /** - * drm_crtc - central CRTC control structure + * struct drm_crtc - central CRTC control structure * @dev: parent DRM device + * @port: OF node used by drm_of_find_possible_crtcs() * @head: list management * @mutex: per-CRTC locking * @base: base KMS object for ID tracking etc. * @primary: primary plane for this CRTC * @cursor: cursor plane for this CRTC + * @cursor_x: current x position of the cursor, used for universal cursor planes + * @cursor_y: current y position of the cursor, used for universal cursor planes * @enabled: is this CRTC enabled? * @mode: current mode timings * @hwmode: mode timings as programmed to hw regs @@ -309,10 +376,13 @@ struct drm_crtc_funcs { * @gamma_size: size of gamma ramp * @gamma_store: gamma ramp values * @framedur_ns: precise frame timing - * @framedur_ns: precise line timing + * @linedur_ns: precise line timing * @pixeldur_ns: precise pixel timing * @helper_private: mid-layer private data * @properties: property tracking for this CRTC + * @state: current atomic state for this CRTC + * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for + * legacy ioctls * * Each CRTC may have one or more connectors associated with it. This structure * allows the CRTC to be controlled. @@ -322,7 +392,7 @@ struct drm_crtc { struct device_node *port; struct list_head head; - /** + /* * crtc mutex * * This provides a read lock for the overall crtc state (mode, dpms @@ -368,6 +438,8 @@ struct drm_crtc { struct drm_object_properties properties; + struct drm_crtc_state *state; + /* * For legacy crtc ioctls so that atomic drivers can get at the locking * acquire context. @@ -375,9 +447,22 @@ struct drm_crtc { struct drm_modeset_acquire_ctx *acquire_ctx; }; +/** + * struct drm_connector_state - mutable connector state + * @crtc: CRTC to connect connector to, NULL if disabled + * @best_encoder: can be used by helpers and drivers to select the encoder + * @state: backpointer to global drm_atomic_state + */ +struct drm_connector_state { + struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */ + + struct drm_encoder *best_encoder; + + struct drm_atomic_state *state; +}; /** - * drm_connector_funcs - control connectors on a given device + * struct drm_connector_funcs - control connectors on a given device * @dpms: set power state (see drm_crtc_funcs above) * @save: save connector state * @restore: restore connector state @@ -387,6 +472,9 @@ struct drm_crtc { * @set_property: property for this connector may need an update * @destroy: make object go away * @force: notify the driver that the connector is forced on + * @atomic_duplicate_state: duplicate the atomic state for this connector + * @atomic_destroy_state: destroy an atomic state for this connector + * @atomic_set_property: set a property on an atomic state for this connector * * Each CRTC may have one or more connectors attached to it. The functions * below allow the core DRM code to control connectors, enumerate available modes, @@ -411,10 +499,19 @@ struct drm_connector_funcs { uint64_t val); void (*destroy)(struct drm_connector *connector); void (*force)(struct drm_connector *connector); + + /* atomic update handling */ + struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector); + void (*atomic_destroy_state)(struct drm_connector *connector, + struct drm_connector_state *state); + int (*atomic_set_property)(struct drm_connector *connector, + struct drm_connector_state *state, + struct drm_property *property, + uint64_t val); }; /** - * drm_encoder_funcs - encoder controls + * struct drm_encoder_funcs - encoder controls * @reset: reset state (e.g. at init or resume time) * @destroy: cleanup and free associated data * @@ -428,7 +525,7 @@ struct drm_encoder_funcs { #define DRM_CONNECTOR_MAX_ENCODER 3 /** - * drm_encoder - central DRM encoder structure + * struct drm_encoder - central DRM encoder structure * @dev: parent DRM device * @head: list management * @base: base KMS object @@ -472,7 +569,7 @@ struct drm_encoder { #define MAX_ELD_BYTES 128 /** - * drm_connector - central DRM connector control structure + * struct drm_connector - central DRM connector control structure * @dev: parent DRM device * @kdev: kernel device for sysfs attributes * @attr: sysfs attributes @@ -483,6 +580,7 @@ struct drm_encoder { * @connector_type_id: index into connector type enum * @interlace_allowed: can this connector handle interlaced modes? * @doublescan_allowed: can this connector handle doublescan? + * @stereo_allowed: can this connector handle stereo modes? * @modes: modes available on this connector (from fill_modes() + user) * @status: one of the drm_connector_status enums (connected, not, or unknown) * @probed_modes: list of modes derived directly from the display @@ -490,10 +588,13 @@ struct drm_encoder { * @funcs: connector control functions * @edid_blob_ptr: DRM property containing EDID if present * @properties: property tracking for this connector + * @path_blob_ptr: DRM blob property data for the DP MST path property * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling * @dpms: current dpms state * @helper_private: mid-layer private data + * @cmdline_mode: mode line parsed from the kernel cmdline for this connector * @force: a %DRM_FORCE_<foo> state for forced mode sets + * @override_edid: has the EDID been overwritten through debugfs for testing? * @encoder_ids: valid encoders for this connector * @encoder: encoder driving this connector, if any * @eld: EDID-like data, if present @@ -503,6 +604,18 @@ struct drm_encoder { * @video_latency: video latency info from ELD, if found * @audio_latency: audio latency info from ELD, if found * @null_edid_counter: track sinks that give us all zeros for the EDID + * @bad_edid_counter: track sinks that give us an EDID with invalid checksum + * @debugfs_entry: debugfs directory for this connector + * @state: current atomic state for this connector + * @has_tile: is this connector connected to a tiled monitor + * @tile_group: tile group for the connected monitor + * @tile_is_single_monitor: whether the tile is one monitor housing + * @num_h_tile: number of horizontal tiles in the tile group + * @num_v_tile: number of vertical tiles in the tile group + * @tile_h_loc: horizontal location of this tile + * @tile_v_loc: vertical location of this tile + * @tile_h_size: horizontal size of this tile. + * @tile_v_size: vertical size of this tile. * * Each connector may be connected to one or more CRTCs, or may be clonable by * another connector if they can share a CRTC. Each connector also has a specific @@ -538,6 +651,8 @@ struct drm_connector { struct drm_property_blob *path_blob_ptr; + struct drm_property_blob *tile_blob_ptr; + uint8_t polled; /* DRM_CONNECTOR_POLL_* */ /* requested DPMS state */ @@ -563,14 +678,63 @@ struct drm_connector { unsigned bad_edid_counter; struct dentry *debugfs_entry; + + struct drm_connector_state *state; + + /* DisplayID bits */ + bool has_tile; + struct drm_tile_group *tile_group; + bool tile_is_single_monitor; + + uint8_t num_h_tile, num_v_tile; + uint8_t tile_h_loc, tile_v_loc; + uint16_t tile_h_size, tile_v_size; +}; + +/** + * struct drm_plane_state - mutable plane state + * @crtc: currently bound CRTC, NULL if disabled + * @fb: currently bound framebuffer + * @fence: optional fence to wait for before scanning out @fb + * @crtc_x: left position of visible portion of plane on crtc + * @crtc_y: upper position of visible portion of plane on crtc + * @crtc_w: width of visible portion of plane on crtc + * @crtc_h: height of visible portion of plane on crtc + * @src_x: left position of visible portion of plane within + * plane (in 16.16) + * @src_y: upper position of visible portion of plane within + * plane (in 16.16) + * @src_w: width of visible portion of plane (in 16.16) + * @src_h: height of visible portion of plane (in 16.16) + * @state: backpointer to global drm_atomic_state + */ +struct drm_plane_state { + struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */ + struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */ + struct fence *fence; + + /* Signed dest location allows it to be partially off screen */ + int32_t crtc_x, crtc_y; + uint32_t crtc_w, crtc_h; + + /* Source values are 16.16 fixed point */ + uint32_t src_x, src_y; + uint32_t src_h, src_w; + + struct drm_atomic_state *state; }; + /** - * drm_plane_funcs - driver plane control functions + * struct drm_plane_funcs - driver plane control functions * @update_plane: update the plane configuration * @disable_plane: shut down the plane * @destroy: clean up plane resources + * @reset: reset plane after state has been invalidated (e.g. resume) * @set_property: called when a property is changed + * @atomic_duplicate_state: duplicate the atomic state for this plane + * @atomic_destroy_state: destroy an atomic state for this plane + * @atomic_set_property: set a property on an atomic state for this plane */ struct drm_plane_funcs { int (*update_plane)(struct drm_plane *plane, @@ -585,6 +749,15 @@ struct drm_plane_funcs { int (*set_property)(struct drm_plane *plane, struct drm_property *property, uint64_t val); + + /* atomic update handling */ + struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane); + void (*atomic_destroy_state)(struct drm_plane *plane, + struct drm_plane_state *state); + int (*atomic_set_property)(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val); }; enum drm_plane_type { @@ -594,7 +767,7 @@ enum drm_plane_type { }; /** - * drm_plane - central DRM plane control structure + * struct drm_plane - central DRM plane control structure * @dev: DRM device this plane belongs to * @head: for list management * @base: base mode object @@ -603,14 +776,19 @@ enum drm_plane_type { * @format_count: number of formats supported * @crtc: currently bound CRTC * @fb: currently bound fb + * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by + * drm_mode_set_config_internal() to implement correct refcounting. * @funcs: helper functions * @properties: property tracking for this plane * @type: type of plane (overlay, primary, cursor) + * @state: current atomic state for this plane */ struct drm_plane { struct drm_device *dev; struct list_head head; + struct drm_modeset_lock mutex; + struct drm_mode_object base; uint32_t possible_crtcs; @@ -620,8 +798,6 @@ struct drm_plane { struct drm_crtc *crtc; struct drm_framebuffer *fb; - /* Temporary tracking of the old fb while a modeset is ongoing. Used - * by drm_mode_set_config_internal to implement correct refcounting. */ struct drm_framebuffer *old_fb; const struct drm_plane_funcs *funcs; @@ -629,10 +805,14 @@ struct drm_plane { struct drm_object_properties properties; enum drm_plane_type type; + + void *helper_private; + + struct drm_plane_state *state; }; /** - * drm_bridge_funcs - drm_bridge control functions + * struct drm_bridge_funcs - drm_bridge control functions * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge * @disable: Called right before encoder prepare, disables the bridge * @post_disable: Called right after encoder prepare, for lockstepped disable @@ -656,7 +836,7 @@ struct drm_bridge_funcs { }; /** - * drm_bridge - central DRM bridge control structure + * struct drm_bridge - central DRM bridge control structure * @dev: DRM device this bridge belongs to * @head: list management * @base: base mode object @@ -674,8 +854,35 @@ struct drm_bridge { }; /** - * drm_mode_set - new values for a CRTC config change - * @head: list management + * struct struct drm_atomic_state - the global state object for atomic updates + * @dev: parent DRM device + * @flags: state flags like async update + * @planes: pointer to array of plane pointers + * @plane_states: pointer to array of plane states pointers + * @crtcs: pointer to array of CRTC pointers + * @crtc_states: pointer to array of CRTC states pointers + * @num_connector: size of the @connectors and @connector_states arrays + * @connectors: pointer to array of connector pointers + * @connector_states: pointer to array of connector states pointers + * @acquire_ctx: acquire context for this atomic modeset state update + */ +struct drm_atomic_state { + struct drm_device *dev; + uint32_t flags; + struct drm_plane **planes; + struct drm_plane_state **plane_states; + struct drm_crtc **crtcs; + struct drm_crtc_state **crtc_states; + int num_connector; + struct drm_connector **connectors; + struct drm_connector_state **connector_states; + + struct drm_modeset_acquire_ctx *acquire_ctx; +}; + + +/** + * struct drm_mode_set - new values for a CRTC config change * @fb: framebuffer to use for new config * @crtc: CRTC whose configuration we're about to change * @mode: mode timings to use @@ -705,6 +912,9 @@ struct drm_mode_set { * struct drm_mode_config_funcs - basic driver provided mode setting functions * @fb_create: create a new framebuffer object * @output_poll_changed: function to handle output configuration changes + * @atomic_check: check whether a give atomic state update is possible + * @atomic_commit: commit an atomic state update previously verified with + * atomic_check() * * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that * involve drivers. @@ -714,13 +924,20 @@ struct drm_mode_config_funcs { struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd); void (*output_poll_changed)(struct drm_device *dev); + + int (*atomic_check)(struct drm_device *dev, + struct drm_atomic_state *a); + int (*atomic_commit)(struct drm_device *dev, + struct drm_atomic_state *a, + bool async); }; /** - * drm_mode_group - group of mode setting resources for potential sub-grouping + * struct drm_mode_group - group of mode setting resources for potential sub-grouping * @num_crtcs: CRTC count * @num_encoders: encoder count * @num_connectors: connector count + * @num_bridges: bridge count * @id_list: list of KMS object IDs in this group * * Currently this simply tracks the global mode setting state. But in the @@ -740,10 +957,14 @@ struct drm_mode_group { }; /** - * drm_mode_config - Mode configuration control structure + * struct drm_mode_config - Mode configuration control structure * @mutex: mutex protecting KMS related lists and structures + * @connection_mutex: ww mutex protecting connector state and routing + * @acquire_ctx: global implicit acquire context used by atomic drivers for + * legacy ioctls * @idr_mutex: mutex for KMS ID allocation and management * @crtc_idr: main KMS ID tracking object + * @fb_lock: mutex to protect fb state and lists * @num_fb: number of fbs available * @fb_list: list of framebuffers available * @num_connector: number of connectors on this device @@ -752,17 +973,28 @@ struct drm_mode_group { * @bridge_list: list of bridge objects * @num_encoder: number of encoders on this device * @encoder_list: list of encoder objects + * @num_overlay_plane: number of overlay planes on this device + * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device + * @plane_list: list of plane objects * @num_crtc: number of CRTCs on this device * @crtc_list: list of CRTC objects + * @property_list: list of property objects * @min_width: minimum pixel width on this device * @min_height: minimum pixel height on this device * @max_width: maximum pixel width on this device * @max_height: maximum pixel height on this device * @funcs: core driver provided mode setting functions * @fb_base: base address of the framebuffer - * @poll_enabled: track polling status for this device + * @poll_enabled: track polling support for this device + * @poll_running: track polling status for this device * @output_poll_work: delayed work for polling in process context + * @property_blob_list: list of all the blob property objects * @*_property: core property tracking + * @preferred_depth: preferred RBG pixel depth, used by fb helpers + * @prefer_shadow: hint to userspace to prefer shadow-fb rendering + * @async_page_flip: does this device support async flips on the primary plane? + * @cursor_width: hint to userspace for max cursor width + * @cursor_height: hint to userspace for max cursor height * * Core mode resource tracking structure. All CRTC, encoders, and connectors * enumerated by the driver are added here, as are global properties. Some @@ -774,16 +1006,10 @@ struct drm_mode_config { struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */ struct mutex idr_mutex; /* for IDR management */ struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ + struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ /* this is limited to one for now */ - - /** - * fb_lock - mutex to protect fb state - * - * Besides the global fb list his also protects the fbs list in the - * file_priv - */ - struct mutex fb_lock; + struct mutex fb_lock; /* proctects global and per-file fb lists */ int num_fb; struct list_head fb_list; @@ -824,6 +1050,7 @@ struct drm_mode_config { struct drm_property *edid_property; struct drm_property *dpms_property; struct drm_property *path_property; + struct drm_property *tile_property; struct drm_property *plane_type_property; struct drm_property *rotation_property; @@ -851,6 +1078,10 @@ struct drm_mode_config { struct drm_property *aspect_ratio_property; struct drm_property *dirty_info_property; + /* properties for virtual machine layout */ + struct drm_property *suggested_x_property; + struct drm_property *suggested_y_property; + /* dumb ioctl parameters */ uint32_t preferred_depth, prefer_shadow; @@ -861,6 +1092,19 @@ struct drm_mode_config { uint32_t cursor_width, cursor_height; }; +/** + * drm_for_each_plane_mask - iterate over planes specified by bitmask + * @plane: the loop cursor + * @dev: the DRM device + * @plane_mask: bitmask of plane indices + * + * Iterate over all planes specified by bitmask. + */ +#define drm_for_each_plane_mask(plane, dev, plane_mask) \ + list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ + if ((plane_mask) & (1 << drm_plane_index(plane))) + + #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) #define obj_to_connector(x) container_of(x, struct drm_connector, base) #define obj_to_encoder(x) container_of(x, struct drm_encoder, base) @@ -880,9 +1124,6 @@ extern int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_plane *primary, struct drm_plane *cursor, const struct drm_crtc_funcs *funcs); -extern int drm_crtc_init(struct drm_device *dev, - struct drm_crtc *crtc, - const struct drm_crtc_funcs *funcs); extern void drm_crtc_cleanup(struct drm_crtc *crtc); extern unsigned int drm_crtc_index(struct drm_crtc *crtc); @@ -978,9 +1219,10 @@ extern void drm_mode_config_reset(struct drm_device *dev); extern void drm_mode_config_cleanup(struct drm_device *dev); extern int drm_mode_connector_set_path_property(struct drm_connector *connector, - char *path); + const char *path); +int drm_mode_connector_set_tile_property(struct drm_connector *connector); extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, - struct edid *edid); + const struct edid *edid); static inline bool drm_property_type_is(struct drm_property *property, uint32_t type) @@ -1041,11 +1283,13 @@ extern void drm_property_destroy(struct drm_device *dev, struct drm_property *pr extern int drm_property_add_enum(struct drm_property *property, int index, uint64_t value, const char *name); extern int drm_mode_create_dvi_i_properties(struct drm_device *dev); -extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, - char *formats[]); +extern int drm_mode_create_tv_properties(struct drm_device *dev, + unsigned int num_modes, + char *modes[]); extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev); extern int drm_mode_create_dirty_info_property(struct drm_device *dev); +extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev); extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, struct drm_encoder *encoder); @@ -1113,6 +1357,13 @@ extern void drm_set_preferred_mode(struct drm_connector *connector, extern int drm_edid_header_is_valid(const u8 *raw_edid); extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid); extern bool drm_edid_is_valid(struct edid *edid); + +extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, + char topology[8]); +extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, + char topology[8]); +extern void drm_mode_put_tile_group(struct drm_device *dev, + struct drm_tile_group *tg); struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, int hsize, int vsize, int fresh, bool rb); diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index a3d75fefd010..7adbb65ea8ae 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -68,6 +68,7 @@ struct drm_crtc_helper_funcs { int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb); + void (*mode_set_nofb)(struct drm_crtc *crtc); /* Move the crtc on the current fb to the given position *optional* */ int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, @@ -81,6 +82,12 @@ struct drm_crtc_helper_funcs { /* disable crtc when not in use - more explicit than dpms off */ void (*disable)(struct drm_crtc *crtc); + + /* atomic helpers */ + int (*atomic_check)(struct drm_crtc *crtc, + struct drm_crtc_state *state); + void (*atomic_begin)(struct drm_crtc *crtc); + void (*atomic_flush)(struct drm_crtc *crtc); }; /** @@ -161,6 +168,12 @@ static inline void drm_connector_helper_add(struct drm_connector *connector, extern void drm_helper_resume_force_mode(struct drm_device *dev); +int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, int x, int y, + struct drm_framebuffer *old_fb); +int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb); + /* drm_probe_helper.c */ extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h new file mode 100644 index 000000000000..623b4e98e748 --- /dev/null +++ b/include/drm/drm_displayid.h @@ -0,0 +1,76 @@ +/* + * Copyright © 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef DRM_DISPLAYID_H +#define DRM_DISPLAYID_H + +#define DATA_BLOCK_PRODUCT_ID 0x00 +#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01 +#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02 +#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03 +#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04 +#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05 +#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06 +#define DATA_BLOCK_VESA_TIMING 0x07 +#define DATA_BLOCK_CEA_TIMING 0x08 +#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09 +#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a +#define DATA_BLOCK_GP_ASCII_STRING 0x0b +#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c +#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d +#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e +#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f +#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10 +#define DATA_BLOCK_TILED_DISPLAY 0x12 + +#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f + +#define PRODUCT_TYPE_EXTENSION 0 +#define PRODUCT_TYPE_TEST 1 +#define PRODUCT_TYPE_PANEL 2 +#define PRODUCT_TYPE_MONITOR 3 +#define PRODUCT_TYPE_TV 4 +#define PRODUCT_TYPE_REPEATER 5 +#define PRODUCT_TYPE_DIRECT_DRIVE 6 + +struct displayid_hdr { + u8 rev; + u8 bytes; + u8 prod_id; + u8 ext_count; +} __packed; + +struct displayid_block { + u8 tag; + u8 rev; + u8 num_bytes; +} __packed; + +struct displayid_tiled_block { + struct displayid_block base; + u8 tile_cap; + u8 topo[3]; + u8 tile_size[4]; + u8 tile_pixel_bezel[5]; + u8 topology_id[8]; +} __packed; + +#endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 9305c718d789..11f8c84f98ce 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -303,7 +303,8 @@ #define DP_TEST_CRC_B_CB 0x244 #define DP_TEST_SINK_MISC 0x246 -#define DP_TEST_CRC_SUPPORTED (1 << 5) +# define DP_TEST_CRC_SUPPORTED (1 << 5) +# define DP_TEST_COUNT_MASK 0x7 #define DP_TEST_RESPONSE 0x260 # define DP_TEST_ACK (1 << 0) @@ -313,7 +314,7 @@ #define DP_TEST_EDID_CHECKSUM 0x261 #define DP_TEST_SINK 0x270 -#define DP_TEST_SINK_START (1 << 0) +# define DP_TEST_SINK_START (1 << 0) #define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */ # define DP_PAYLOAD_TABLE_UPDATED (1 << 0) @@ -404,26 +405,6 @@ #define MODE_I2C_READ 4 #define MODE_I2C_STOP 8 -/** - * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp - * aux algorithm - * @running: set by the algo indicating whether an i2c is ongoing or whether - * the i2c bus is quiescent - * @address: i2c target address for the currently ongoing transfer - * @aux_ch: driver callback to transfer a single byte of the i2c payload - */ -struct i2c_algo_dp_aux_data { - bool running; - u16 address; - int (*aux_ch) (struct i2c_adapter *adapter, - int mode, uint8_t write_byte, - uint8_t *read_byte); -}; - -int -i2c_dp_aux_add_bus(struct i2c_adapter *adapter); - - #define DP_LINK_STATUS_SIZE 6 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count); @@ -550,6 +531,7 @@ struct drm_dp_aux { struct mutex hw_mutex; ssize_t (*transfer)(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg); + unsigned i2c_nack_count, i2c_defer_count; }; ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 338fc1053835..00c1da927245 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -28,7 +28,7 @@ struct drm_dp_mst_branch; /** - * struct drm_dp_vcpi - Virtual Channel Payload Identifer + * struct drm_dp_vcpi - Virtual Channel Payload Identifier * @vcpi: Virtual channel ID. * @pbn: Payload Bandwidth Number for this channel * @aligned_pbn: PBN aligned with slot size @@ -92,6 +92,8 @@ struct drm_dp_mst_port { struct drm_dp_vcpi vcpi; struct drm_connector *connector; struct drm_dp_mst_topology_mgr *mgr; + + struct edid *cached_edid; /* for DP logical ports - make tiling work */ }; /** @@ -371,7 +373,7 @@ struct drm_dp_sideband_msg_tx { struct drm_dp_mst_topology_mgr; struct drm_dp_mst_topology_cbs { /* create a connector for a port */ - struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *path); + struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector); void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); @@ -474,7 +476,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); -enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); +enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index b96031d947a0..87d85e81d3a7 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -27,12 +27,14 @@ #define EDID_LENGTH 128 #define DDC_ADDR 0x50 +#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */ #define CEA_EXT 0x02 #define VTB_EXT 0x10 #define DI_EXT 0x40 #define LS_EXT 0x50 #define MI_EXT 0x60 +#define DISPLAYID_EXT 0x70 struct est_timings { u8 t1; @@ -207,6 +209,61 @@ struct detailed_timing { #define DRM_EDID_HDMI_DC_30 (1 << 4) #define DRM_EDID_HDMI_DC_Y444 (1 << 3) +/* ELD Header Block */ +#define DRM_ELD_HEADER_BLOCK_SIZE 4 + +#define DRM_ELD_VER 0 +# define DRM_ELD_VER_SHIFT 3 +# define DRM_ELD_VER_MASK (0x1f << 3) + +#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ + +/* ELD Baseline Block for ELD_Ver == 2 */ +#define DRM_ELD_CEA_EDID_VER_MNL 4 +# define DRM_ELD_CEA_EDID_VER_SHIFT 5 +# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5) +# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5) +# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5) +# define DRM_ELD_MNL_SHIFT 0 +# define DRM_ELD_MNL_MASK (0x1f << 0) + +#define DRM_ELD_SAD_COUNT_CONN_TYPE 5 +# define DRM_ELD_SAD_COUNT_SHIFT 4 +# define DRM_ELD_SAD_COUNT_MASK (0xf << 4) +# define DRM_ELD_CONN_TYPE_SHIFT 2 +# define DRM_ELD_CONN_TYPE_MASK (3 << 2) +# define DRM_ELD_CONN_TYPE_HDMI (0 << 2) +# define DRM_ELD_CONN_TYPE_DP (1 << 2) +# define DRM_ELD_SUPPORTS_AI (1 << 1) +# define DRM_ELD_SUPPORTS_HDCP (1 << 0) + +#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */ +# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */ + +#define DRM_ELD_SPEAKER 7 +# define DRM_ELD_SPEAKER_RLRC (1 << 6) +# define DRM_ELD_SPEAKER_FLRC (1 << 5) +# define DRM_ELD_SPEAKER_RC (1 << 4) +# define DRM_ELD_SPEAKER_RLR (1 << 3) +# define DRM_ELD_SPEAKER_FC (1 << 2) +# define DRM_ELD_SPEAKER_LFE (1 << 1) +# define DRM_ELD_SPEAKER_FLR (1 << 0) + +#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */ +# define DRM_ELD_PORT_ID_LEN 8 + +#define DRM_ELD_MANUFACTURER_NAME0 16 +#define DRM_ELD_MANUFACTURER_NAME1 17 + +#define DRM_ELD_PRODUCT_CODE0 18 +#define DRM_ELD_PRODUCT_CODE1 19 + +#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */ + +#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad)) + struct edid { u8 header[8]; /* Vendor & product info */ @@ -279,4 +336,56 @@ int drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, const struct drm_display_mode *mode); +/** + * drm_eld_mnl - Get ELD monitor name length in bytes. + * @eld: pointer to an eld memory structure with mnl set + */ +static inline int drm_eld_mnl(const uint8_t *eld) +{ + return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; +} + +/** + * drm_eld_sad_count - Get ELD SAD count. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline int drm_eld_sad_count(const uint8_t *eld) +{ + return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >> + DRM_ELD_SAD_COUNT_SHIFT; +} + +/** + * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes + * @eld: pointer to an eld memory structure with mnl and sad_count set + * + * This is a helper for determining the payload size of the baseline block, in + * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block. + */ +static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld) +{ + return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE + + drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3; +} + +/** + * drm_eld_size - Get ELD size in bytes + * @eld: pointer to a complete eld memory structure + * + * The returned value does not include the vendor block. It's vendor specific, + * and comprises of the remaining bytes in the ELD memory buffer after + * drm_eld_size() bytes of header and baseline block. + * + * The returned value is guaranteed to be a multiple of 4. + */ +static inline int drm_eld_size(const uint8_t *eld) +{ + return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4; +} + +struct edid *drm_do_get_edid(struct drm_connector *connector, + int (*get_edid_block)(void *data, u8 *buf, unsigned int block, + size_t len), + void *data); + #endif /* __DRM_EDID_H__ */ diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index f4ad254e3488..b597068103aa 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -34,9 +34,14 @@ struct drm_fb_helper; #include <linux/kgdb.h> +struct drm_fb_offset { + int x, y; +}; + struct drm_fb_helper_crtc { struct drm_mode_set mode_set; struct drm_display_mode *desired_mode; + int x, y; }; struct drm_fb_helper_surface_size { @@ -72,6 +77,7 @@ struct drm_fb_helper_funcs { bool (*initial_config)(struct drm_fb_helper *fb_helper, struct drm_fb_helper_crtc **crtcs, struct drm_display_mode **modes, + struct drm_fb_offset *offsets, bool *enabled, int width, int height); }; diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h index 9eed34dcd6af..d387cf06ae05 100644 --- a/include/drm/drm_flip_work.h +++ b/include/drm/drm_flip_work.h @@ -25,6 +25,7 @@ #define DRM_FLIP_WORK_H #include <linux/kfifo.h> +#include <linux/spinlock.h> #include <linux/workqueue.h> /** @@ -32,9 +33,9 @@ * * Util to queue up work to run from work-queue context after flip/vblank. * Typically this can be used to defer unref of framebuffer's, cursor - * bo's, etc until after vblank. The APIs are all safe (and lockless) - * for up to one producer and once consumer at a time. The single-consumer - * aspect is ensured by committing the queued work to a single work-queue. + * bo's, etc until after vblank. The APIs are all thread-safe. + * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called + * in atomic context. */ struct drm_flip_work; @@ -51,26 +52,40 @@ struct drm_flip_work; typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); /** + * struct drm_flip_task - flip work task + * @node: list entry element + * @data: data to pass to work->func + */ +struct drm_flip_task { + struct list_head node; + void *data; +}; + +/** * struct drm_flip_work - flip work queue * @name: debug name - * @pending: number of queued but not committed items - * @count: number of committed items * @func: callback fxn called for each committed item * @worker: worker which calls @func - * @fifo: queue of committed items + * @queued: queued tasks + * @commited: commited tasks + * @lock: lock to access queued and commited lists */ struct drm_flip_work { const char *name; - atomic_t pending, count; drm_flip_func_t func; struct work_struct worker; - DECLARE_KFIFO_PTR(fifo, void *); + struct list_head queued; + struct list_head commited; + spinlock_t lock; }; +struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags); +void drm_flip_work_queue_task(struct drm_flip_work *work, + struct drm_flip_task *task); void drm_flip_work_queue(struct drm_flip_work *work, void *val); void drm_flip_work_commit(struct drm_flip_work *work, struct workqueue_struct *wq); -int drm_flip_work_init(struct drm_flip_work *work, int size, +void drm_flip_work_init(struct drm_flip_work *work, const char *name, drm_flip_func_t func); void drm_flip_work_cleanup(struct drm_flip_work *work); diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 1e6ae1458f7a..780511a459c0 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -119,6 +119,13 @@ struct drm_gem_object { * simply leave it as NULL. */ struct dma_buf_attachment *import_attach; + + /** + * dumb - created as dumb buffer + * Whether the gem object was created using the dumb buffer interface + * as such it may not be used for GPU rendering. + */ + bool dumb; }; void drm_gem_object_release(struct drm_gem_object *obj); diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 2ff35f3de9c5..acd6af8a8e67 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -4,6 +4,13 @@ #include <drm/drmP.h> #include <drm/drm_gem.h> +/** + * struct drm_gem_cma_object - GEM object backed by CMA memory allocations + * @base: base GEM object + * @paddr: physical address of the backing memory + * @sgt: scatter/gather table for imported PRIME buffers + * @vaddr: kernel virtual address of the backing memory + */ struct drm_gem_cma_object { struct drm_gem_object base; dma_addr_t paddr; @@ -19,23 +26,30 @@ to_drm_gem_cma_obj(struct drm_gem_object *gem_obj) return container_of(gem_obj, struct drm_gem_cma_object, base); } -/* free gem object. */ +/* free GEM object */ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj); -/* create memory region for drm framebuffer. */ +/* create memory region for DRM framebuffer */ +int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args); + +/* create memory region for DRM framebuffer */ int drm_gem_cma_dumb_create(struct drm_file *file_priv, - struct drm_device *drm, struct drm_mode_create_dumb *args); + struct drm_device *drm, + struct drm_mode_create_dumb *args); -/* map memory region for drm framebuffer to user space. */ +/* map memory region for DRM framebuffer to user space */ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *drm, uint32_t handle, uint64_t *offset); + struct drm_device *drm, u32 handle, + u64 *offset); -/* set vm_flags and we can change the vm attribute to other one at here. */ +/* set vm_flags and we can change the VM attribute to other one at here */ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); -/* allocate physical memory. */ +/* allocate physical memory */ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, - unsigned int size); + size_t size); extern const struct vm_operations_struct drm_gem_cma_vm_ops; diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 8569dc5a1026..f1d8d0dbb4f1 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -26,6 +26,7 @@ struct mipi_dsi_device; * struct mipi_dsi_msg - read/write DSI buffer * @channel: virtual channel id * @type: payload data type + * @flags: flags controlling this message transmission * @tx_len: length of @tx_buf * @tx_buf: data to be written * @rx_len: length of @rx_buf @@ -43,12 +44,44 @@ struct mipi_dsi_msg { void *rx_buf; }; +bool mipi_dsi_packet_format_is_short(u8 type); +bool mipi_dsi_packet_format_is_long(u8 type); + +/** + * struct mipi_dsi_packet - represents a MIPI DSI packet in protocol format + * @size: size (in bytes) of the packet + * @header: the four bytes that make up the header (Data ID, Word Count or + * Packet Data, and ECC) + * @payload_length: number of bytes in the payload + * @payload: a pointer to a buffer containing the payload, if any + */ +struct mipi_dsi_packet { + size_t size; + u8 header[4]; + size_t payload_length; + const u8 *payload; +}; + +int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, + const struct mipi_dsi_msg *msg); + /** * struct mipi_dsi_host_ops - DSI bus operations * @attach: attach DSI device to DSI host * @detach: detach DSI device from DSI host - * @transfer: send and/or receive DSI packet, return number of received bytes, - * or error + * @transfer: transmit a DSI packet + * + * DSI packets transmitted by .transfer() are passed in as mipi_dsi_msg + * structures. This structure contains information about the type of packet + * being transmitted as well as the transmit and receive buffers. When an + * error is encountered during transmission, this function will return a + * negative error code. On success it shall return the number of bytes + * transmitted for write packets or the number of bytes received for read + * packets. + * + * Note that typically DSI packet transmission is atomic, so the .transfer() + * function will seldomly return anything other than the number of bytes + * contained in the transmit buffer on success. */ struct mipi_dsi_host_ops { int (*attach)(struct mipi_dsi_host *host, @@ -56,7 +89,7 @@ struct mipi_dsi_host_ops { int (*detach)(struct mipi_dsi_host *host, struct mipi_dsi_device *dsi); ssize_t (*transfer)(struct mipi_dsi_host *host, - struct mipi_dsi_msg *msg); + const struct mipi_dsi_msg *msg); }; /** @@ -130,12 +163,57 @@ static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev) return container_of(dev, struct mipi_dsi_device, dev); } +struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np); int mipi_dsi_attach(struct mipi_dsi_device *dsi); int mipi_dsi_detach(struct mipi_dsi_device *dsi); -ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data, - size_t len); +int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi, + u16 value); + +ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload, + size_t size); +ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params, + size_t num_params, void *data, size_t size); + +/** + * enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode + * @MIPI_DSI_DCS_TEAR_MODE_VBLANK: the TE output line consists of V-Blanking + * information only + * @MIPI_DSI_DCS_TEAR_MODE_VHBLANK : the TE output line consists of both + * V-Blanking and H-Blanking information + */ +enum mipi_dsi_dcs_tear_mode { + MIPI_DSI_DCS_TEAR_MODE_VBLANK, + MIPI_DSI_DCS_TEAR_MODE_VHBLANK, +}; + +#define MIPI_DSI_DCS_POWER_MODE_DISPLAY (1 << 2) +#define MIPI_DSI_DCS_POWER_MODE_NORMAL (1 << 3) +#define MIPI_DSI_DCS_POWER_MODE_SLEEP (1 << 4) +#define MIPI_DSI_DCS_POWER_MODE_PARTIAL (1 << 5) +#define MIPI_DSI_DCS_POWER_MODE_IDLE (1 << 6) + +ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi, + const void *data, size_t len); +ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd, + const void *data, size_t len); ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data, size_t len); +int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode); +int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format); +int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start, + u16 end); +int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start, + u16 end); +int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, + enum mipi_dsi_dcs_tear_mode mode); +int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format); /** * struct mipi_dsi_driver - DSI driver @@ -167,9 +245,13 @@ static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data) dev_set_drvdata(&dsi->dev, data); } -int mipi_dsi_driver_register(struct mipi_dsi_driver *driver); +int mipi_dsi_driver_register_full(struct mipi_dsi_driver *driver, + struct module *owner); void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver); +#define mipi_dsi_driver_register(driver) \ + mipi_dsi_driver_register_full(driver, THIS_MODULE) + #define module_mipi_dsi_driver(__mipi_dsi_driver) \ module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \ mipi_dsi_driver_unregister) diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index 75a5c45e21c7..70595ff565ba 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -33,6 +33,7 @@ struct drm_modeset_lock; * @ww_ctx: base acquire ctx * @contended: used internally for -EDEADLK handling * @locked: list of held locks + * @trylock_only: trylock mode used in atomic contexts/panic notifiers * * Each thread competing for a set of locks must use one acquire * ctx. And if any lock fxn returns -EDEADLK, it must backoff and @@ -126,11 +127,13 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock); struct drm_device; struct drm_crtc; +struct drm_plane; void drm_modeset_lock_all(struct drm_device *dev); int __drm_modeset_lock_all(struct drm_device *dev, bool trylock); void drm_modeset_unlock_all(struct drm_device *dev); -void drm_modeset_lock_crtc(struct drm_crtc *crtc); +void drm_modeset_lock_crtc(struct drm_crtc *crtc, + struct drm_plane *plane); void drm_modeset_unlock_crtc(struct drm_crtc *crtc); void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); struct drm_modeset_acquire_ctx * diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h index 52e6870534b2..a185392cafeb 100644 --- a/include/drm/drm_plane_helper.h +++ b/include/drm/drm_plane_helper.h @@ -25,6 +25,7 @@ #define DRM_PLANE_HELPER_H #include <drm/drm_rect.h> +#include <drm/drm_crtc.h> /* * Drivers that don't allow primary plane scaling may pass this macro in place @@ -42,6 +43,37 @@ * planes. */ +extern int drm_crtc_init(struct drm_device *dev, + struct drm_crtc *crtc, + const struct drm_crtc_funcs *funcs); + +/** + * drm_plane_helper_funcs - helper operations for CRTCs + * @prepare_fb: prepare a framebuffer for use by the plane + * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane + * @atomic_check: check that a given atomic state is valid and can be applied + * @atomic_update: apply an atomic state to the plane + * + * The helper operations are called by the mid-layer CRTC helper. + */ +struct drm_plane_helper_funcs { + int (*prepare_fb)(struct drm_plane *plane, + struct drm_framebuffer *fb); + void (*cleanup_fb)(struct drm_plane *plane, + struct drm_framebuffer *fb); + + int (*atomic_check)(struct drm_plane *plane, + struct drm_plane_state *state); + void (*atomic_update)(struct drm_plane *plane, + struct drm_plane_state *old_state); +}; + +static inline void drm_plane_helper_add(struct drm_plane *plane, + const struct drm_plane_helper_funcs *funcs) +{ + plane->helper_private = (void *)funcs; +} + extern int drm_plane_helper_check_update(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -68,4 +100,16 @@ extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev, int num_formats); +int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h); +int drm_plane_helper_disable(struct drm_plane *plane); + +/* For use by drm_crtc_helper.c */ +int drm_plane_helper_commit(struct drm_plane *plane, + struct drm_plane_state *plane_state, + struct drm_framebuffer *old_fb); #endif diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index a70d45647898..180ad0e6de21 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -259,4 +259,21 @@ INTEL_VGA_DEVICE(0x22b2, info), \ INTEL_VGA_DEVICE(0x22b3, info) +#define INTEL_SKL_IDS(info) \ + INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \ + INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \ + INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \ + INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \ + INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \ + INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \ + INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ + INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \ + INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \ + INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */ + #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index 460441714413..b620c317c772 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -68,6 +68,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, * non-blocking reserves should be tried. * @list: thread private list of ttm_validate_buffer structs. * @intr: should the wait be interruptible + * @dups: [out] optional list of duplicates. * * Tries to reserve bos pointed to by the list entries for validation. * If the function returns 0, all buffers are marked as "unfenced", @@ -83,6 +84,11 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, * calling process receives a signal while waiting. In that case, no * buffers on the list will be reserved upon return. * + * If dups is non NULL all buffers already reserved by the current thread + * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned + * on the first already reserved buffer and all buffers from the list are + * unreserved again. + * * Buffers reserved by this function should be unreserved by * a call to either ttm_eu_backoff_reservation() or * ttm_eu_fence_buffer_objects() when command submission is complete or @@ -90,7 +96,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, */ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, - struct list_head *list, bool intr); + struct list_head *list, bool intr, + struct list_head *dups); /** * function ttm_eu_fence_buffer_objects. diff --git a/include/dt-bindings/dma/at91.h b/include/dt-bindings/dma/at91.h index e835037a77b4..ab6cbba45401 100644 --- a/include/dt-bindings/dma/at91.h +++ b/include/dt-bindings/dma/at91.h @@ -9,6 +9,8 @@ #ifndef __DT_BINDINGS_AT91_DMA_H__ #define __DT_BINDINGS_AT91_DMA_H__ +/* ---------- HDMAC ---------- */ + /* * Source and/or destination peripheral ID */ @@ -24,4 +26,27 @@ #define AT91_DMA_CFG_FIFOCFG_ALAP (0x1 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* largest defined AHB burst */ #define AT91_DMA_CFG_FIFOCFG_ASAP (0x2 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* single AHB access */ + +/* ---------- XDMAC ---------- */ +#define AT91_XDMAC_DT_MEM_IF_MASK (0x1) +#define AT91_XDMAC_DT_MEM_IF_OFFSET (13) +#define AT91_XDMAC_DT_MEM_IF(mem_if) (((mem_if) & AT91_XDMAC_DT_MEM_IF_MASK) \ + << AT91_XDMAC_DT_MEM_IF_OFFSET) +#define AT91_XDMAC_DT_GET_MEM_IF(cfg) (((cfg) >> AT91_XDMAC_DT_MEM_IF_OFFSET) \ + & AT91_XDMAC_DT_MEM_IF_MASK) + +#define AT91_XDMAC_DT_PER_IF_MASK (0x1) +#define AT91_XDMAC_DT_PER_IF_OFFSET (14) +#define AT91_XDMAC_DT_PER_IF(per_if) (((per_if) & AT91_XDMAC_DT_PER_IF_MASK) \ + << AT91_XDMAC_DT_PER_IF_OFFSET) +#define AT91_XDMAC_DT_GET_PER_IF(cfg) (((cfg) >> AT91_XDMAC_DT_PER_IF_OFFSET) \ + & AT91_XDMAC_DT_PER_IF_MASK) + +#define AT91_XDMAC_DT_PERID_MASK (0x7f) +#define AT91_XDMAC_DT_PERID_OFFSET (24) +#define AT91_XDMAC_DT_PERID(perid) (((perid) & AT91_XDMAC_DT_PERID_MASK) \ + << AT91_XDMAC_DT_PERID_OFFSET) +#define AT91_XDMAC_DT_GET_PERID(cfg) (((cfg) >> AT91_XDMAC_DT_PERID_OFFSET) \ + & AT91_XDMAC_DT_PERID_MASK) + #endif /* __DT_BINDINGS_AT91_DMA_H__ */ diff --git a/include/dt-bindings/interrupt-controller/mips-gic.h b/include/dt-bindings/interrupt-controller/mips-gic.h new file mode 100644 index 000000000000..cf35a577e371 --- /dev/null +++ b/include/dt-bindings/interrupt-controller/mips-gic.h @@ -0,0 +1,9 @@ +#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H +#define _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H + +#include <dt-bindings/interrupt-controller/irq.h> + +#define GIC_SHARED 0 +#define GIC_LOCAL 1 + +#endif diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h new file mode 100644 index 000000000000..6c901930eb3e --- /dev/null +++ b/include/dt-bindings/phy/phy.h @@ -0,0 +1,19 @@ +/* + * + * This header provides constants for the phy framework + * + * Copyright (C) 2014 STMicroelectronics + * Author: Gabriel Fernandez <gabriel.fernandez@st.com> + * License terms: GNU General Public License (GPL), version 2 + */ + +#ifndef _DT_BINDINGS_PHY +#define _DT_BINDINGS_PHY + +#define PHY_NONE 0 +#define PHY_TYPE_SATA 1 +#define PHY_TYPE_PCIE 2 +#define PHY_TYPE_USB2 3 +#define PHY_TYPE_USB3 4 + +#endif /* _DT_BINDINGS_PHY */ diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h new file mode 100644 index 000000000000..fa74d7cc960c --- /dev/null +++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h @@ -0,0 +1,142 @@ +/* + * This header provides constants for the Qualcomm PMIC GPIO binding. + */ + +#ifndef _DT_BINDINGS_PINCTRL_QCOM_PMIC_GPIO_H +#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_GPIO_H + +#define PMIC_GPIO_PULL_UP_30 0 +#define PMIC_GPIO_PULL_UP_1P5 1 +#define PMIC_GPIO_PULL_UP_31P5 2 +#define PMIC_GPIO_PULL_UP_1P5_30 3 + +#define PMIC_GPIO_STRENGTH_NO 0 +#define PMIC_GPIO_STRENGTH_HIGH 1 +#define PMIC_GPIO_STRENGTH_MED 2 +#define PMIC_GPIO_STRENGTH_LOW 3 + +/* + * Note: PM8018 GPIO3 and GPIO4 are supporting + * only S3 and L2 options (1.8V) + */ +#define PM8018_GPIO_L6 0 +#define PM8018_GPIO_L5 1 +#define PM8018_GPIO_S3 2 +#define PM8018_GPIO_L14 3 +#define PM8018_GPIO_L2 4 +#define PM8018_GPIO_L4 5 +#define PM8018_GPIO_VDD 6 + +/* + * Note: PM8038 GPIO7 and GPIO8 are supporting + * only L11 and L4 options (1.8V) + */ +#define PM8038_GPIO_VPH 0 +#define PM8038_GPIO_BB 1 +#define PM8038_GPIO_L11 2 +#define PM8038_GPIO_L15 3 +#define PM8038_GPIO_L4 4 +#define PM8038_GPIO_L3 5 +#define PM8038_GPIO_L17 6 + +#define PM8058_GPIO_VPH 0 +#define PM8058_GPIO_BB 1 +#define PM8058_GPIO_S3 2 +#define PM8058_GPIO_L3 3 +#define PM8058_GPIO_L7 4 +#define PM8058_GPIO_L6 5 +#define PM8058_GPIO_L5 6 +#define PM8058_GPIO_L2 7 + +#define PM8917_GPIO_VPH 0 +#define PM8917_GPIO_S4 2 +#define PM8917_GPIO_L15 3 +#define PM8917_GPIO_L4 4 +#define PM8917_GPIO_L3 5 +#define PM8917_GPIO_L17 6 + +#define PM8921_GPIO_VPH 0 +#define PM8921_GPIO_BB 1 +#define PM8921_GPIO_S4 2 +#define PM8921_GPIO_L15 3 +#define PM8921_GPIO_L4 4 +#define PM8921_GPIO_L3 5 +#define PM8921_GPIO_L17 6 + +/* + * Note: PM8941 gpios from 15 to 18 are supporting + * only S3 and L6 options (1.8V) + */ +#define PM8941_GPIO_VPH 0 +#define PM8941_GPIO_L1 1 +#define PM8941_GPIO_S3 2 +#define PM8941_GPIO_L6 3 + +/* + * Note: PMA8084 gpios from 15 to 18 are supporting + * only S4 and L6 options (1.8V) + */ +#define PMA8084_GPIO_VPH 0 +#define PMA8084_GPIO_L1 1 +#define PMA8084_GPIO_S4 2 +#define PMA8084_GPIO_L6 3 + +/* To be used with "function" */ +#define PMIC_GPIO_FUNC_NORMAL "normal" +#define PMIC_GPIO_FUNC_PAIRED "paired" +#define PMIC_GPIO_FUNC_FUNC1 "func1" +#define PMIC_GPIO_FUNC_FUNC2 "func2" +#define PMIC_GPIO_FUNC_DTEST1 "dtest1" +#define PMIC_GPIO_FUNC_DTEST2 "dtest2" +#define PMIC_GPIO_FUNC_DTEST3 "dtest3" +#define PMIC_GPIO_FUNC_DTEST4 "dtest4" + +#define PM8038_GPIO1_2_LPG_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO3_5V_BOOST_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO4_SSBI_ALT_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO5_6_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO10_11_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO6_7_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO9_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO6_12_KYPD_DRV PMIC_GPIO_FUNC_FUNC2 + +#define PM8058_GPIO7_8_MP3_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO7_8_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO9_26_KYPD_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO24_26_LPG_DRV PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO33_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO34_35_MP3_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO36_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO37_UPL_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO37_UART_M_RX PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO38_XO_SLEEP_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO38_39_CLK_32KHZ PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO39_MP3_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO40_EXT_BB_EN PMIC_GPIO_FUNC_FUNC1 + +#define PM8917_GPIO9_18_KEYP_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO20_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2 +#define PM8917_GPIO25_26_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO37_38_XO_SLEEP_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO37_38_MP3_CLK PMIC_GPIO_FUNC_FUNC2 + +#define PM8941_GPIO9_14_KYPD_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO15_18_DIV_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO15_18_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2 +#define PM8941_GPIO23_26_KYPD_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO23_26_LPG_DRV_HI PMIC_GPIO_FUNC_FUNC2 +#define PM8941_GPIO31_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO33_36_LPG_DRV_3D PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO33_36_LPG_DRV_HI PMIC_GPIO_FUNC_FUNC2 + +#define PMA8084_GPIO4_5_LPG_DRV PMIC_GPIO_FUNC_FUNC1 +#define PMA8084_GPIO7_10_LPG_DRV PMIC_GPIO_FUNC_FUNC1 +#define PMA8084_GPIO5_14_KEYP_DRV PMIC_GPIO_FUNC_FUNC2 +#define PMA8084_GPIO19_21_KEYP_DRV PMIC_GPIO_FUNC_FUNC2 +#define PMA8084_GPIO15_18_DIV_CLK PMIC_GPIO_FUNC_FUNC1 +#define PMA8084_GPIO15_18_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2 +#define PMA8084_GPIO22_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 + +#endif diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h new file mode 100644 index 000000000000..d2c7dabe3223 --- /dev/null +++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h @@ -0,0 +1,44 @@ +/* + * This header provides constants for the Qualcomm PMIC's + * Multi-Purpose Pin binding. + */ + +#ifndef _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H +#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H + +/* power-source */ +#define PM8841_MPP_VPH 0 +#define PM8841_MPP_S3 2 + +#define PM8941_MPP_VPH 0 +#define PM8941_MPP_L1 1 +#define PM8941_MPP_S3 2 +#define PM8941_MPP_L6 3 + +#define PMA8084_MPP_VPH 0 +#define PMA8084_MPP_L1 1 +#define PMA8084_MPP_S4 2 +#define PMA8084_MPP_L6 3 + +/* + * Analog Input - Set the source for analog input. + * To be used with "qcom,amux-route" property + */ +#define PMIC_MPP_AMUX_ROUTE_CH5 0 +#define PMIC_MPP_AMUX_ROUTE_CH6 1 +#define PMIC_MPP_AMUX_ROUTE_CH7 2 +#define PMIC_MPP_AMUX_ROUTE_CH8 3 +#define PMIC_MPP_AMUX_ROUTE_ABUS1 4 +#define PMIC_MPP_AMUX_ROUTE_ABUS2 5 +#define PMIC_MPP_AMUX_ROUTE_ABUS3 6 +#define PMIC_MPP_AMUX_ROUTE_ABUS4 7 + +/* To be used with "function" */ +#define PMIC_MPP_FUNC_NORMAL "normal" +#define PMIC_MPP_FUNC_PAIRED "paired" +#define PMIC_MPP_FUNC_DTEST1 "dtest1" +#define PMIC_MPP_FUNC_DTEST2 "dtest2" +#define PMIC_MPP_FUNC_DTEST3 "dtest3" +#define PMIC_MPP_FUNC_DTEST4 "dtest4" + +#endif diff --git a/include/dt-bindings/thermal/tegra124-soctherm.h b/include/dt-bindings/thermal/tegra124-soctherm.h new file mode 100644 index 000000000000..85aaf66690f9 --- /dev/null +++ b/include/dt-bindings/thermal/tegra124-soctherm.h @@ -0,0 +1,13 @@ +/* + * This header provides constants for binding nvidia,tegra124-soctherm. + */ + +#ifndef _DT_BINDINGS_THERMAL_TEGRA124_SOCTHERM_H +#define _DT_BINDINGS_THERMAL_TEGRA124_SOCTHERM_H + +#define TEGRA124_SOCTHERM_SENSOR_CPU 0 +#define TEGRA124_SOCTHERM_SENSOR_MEM 1 +#define TEGRA124_SOCTHERM_SENSOR_GPU 2 +#define TEGRA124_SOCTHERM_SENSOR_PLLX 3 + +#endif diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index ad9db6045b2f..b3f45a578344 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -60,7 +60,8 @@ struct arch_timer_cpu { #ifdef CONFIG_KVM_ARM_TIMER int kvm_timer_hyp_init(void); -int kvm_timer_init(struct kvm *kvm); +void kvm_timer_enable(struct kvm *kvm); +void kvm_timer_init(struct kvm *kvm); void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, const struct kvm_irq_level *irq); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); @@ -77,11 +78,8 @@ static inline int kvm_timer_hyp_init(void) return 0; }; -static inline int kvm_timer_init(struct kvm *kvm) -{ - return 0; -} - +static inline void kvm_timer_enable(struct kvm *kvm) {} +static inline void kvm_timer_init(struct kvm *kvm) {} static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, const struct kvm_irq_level *irq) {} static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {} diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 206dcc3b3f7a..ac4888dc86bc 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -274,7 +274,7 @@ struct kvm_exit_mmio; #ifdef CONFIG_KVM_ARM_VGIC int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); int kvm_vgic_hyp_init(void); -int kvm_vgic_init(struct kvm *kvm); +int kvm_vgic_map_resources(struct kvm *kvm); int kvm_vgic_create(struct kvm *kvm); void kvm_vgic_destroy(struct kvm *kvm); void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); @@ -287,7 +287,8 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_exit_mmio *mmio); #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) -#define vgic_initialized(k) ((k)->arch.vgic.ready) +#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) +#define vgic_ready(k) ((k)->arch.vgic.ready) int vgic_v2_probe(struct device_node *vgic_node, const struct vgic_ops **ops, @@ -321,7 +322,7 @@ static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, return -ENXIO; } -static inline int kvm_vgic_init(struct kvm *kvm) +static inline int kvm_vgic_map_resources(struct kvm *kvm) { return 0; } @@ -373,6 +374,11 @@ static inline bool vgic_initialized(struct kvm *kvm) { return true; } + +static inline bool vgic_ready(struct kvm *kvm) +{ + return true; +} #endif #endif diff --git a/include/linux/acpi.h b/include/linux/acpi.h index a81dd437a53c..856d381b1d5b 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -28,6 +28,7 @@ #include <linux/errno.h> #include <linux/ioport.h> /* for struct resource */ #include <linux/device.h> +#include <linux/property.h> #ifndef _LINUX #define _LINUX @@ -123,6 +124,10 @@ int acpi_numa_init (void); int acpi_table_init (void); int acpi_table_parse(char *id, acpi_tbl_table_handler handler); +int __init acpi_parse_entries(char *id, unsigned long table_size, + acpi_tbl_entry_handler handler, + struct acpi_table_header *table_header, + int entry_id, unsigned int max_entries); int __init acpi_table_parse_entries(char *id, unsigned long table_size, int entry_id, acpi_tbl_entry_handler handler, @@ -424,14 +429,11 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, const struct device *dev); -static inline bool acpi_driver_match_device(struct device *dev, - const struct device_driver *drv) -{ - return !!acpi_match_device(drv->acpi_match_table, dev); -} - +extern bool acpi_driver_match_device(struct device *dev, + const struct device_driver *drv); int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); int acpi_device_modalias(struct device *, char *, int); +void acpi_walk_dep_device_list(acpi_handle handle); struct platform_device *acpi_create_platform_device(struct acpi_device *); #define ACPI_PTR(_ptr) (_ptr) @@ -444,6 +446,23 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *); #define ACPI_COMPANION_SET(dev, adev) do { } while (0) #define ACPI_HANDLE(dev) (NULL) +struct fwnode_handle; + +static inline bool is_acpi_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) +{ + return NULL; +} + static inline const char *acpi_dev_name(struct acpi_device *adev) { return NULL; @@ -554,16 +573,26 @@ static inline void arch_reserve_mem_area(acpi_physical_address addr, #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) #endif -#if defined(CONFIG_ACPI) && defined(CONFIG_PM_RUNTIME) +#if defined(CONFIG_ACPI) && defined(CONFIG_PM) int acpi_dev_runtime_suspend(struct device *dev); int acpi_dev_runtime_resume(struct device *dev); int acpi_subsys_runtime_suspend(struct device *dev); int acpi_subsys_runtime_resume(struct device *dev); +struct acpi_device *acpi_dev_pm_get_node(struct device *dev); +int acpi_dev_pm_attach(struct device *dev, bool power_on); #else static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } +static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) +{ + return NULL; +} +static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) +{ + return -ENODEV; +} #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) @@ -586,20 +615,6 @@ static inline int acpi_subsys_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_freeze(struct device *dev) { return 0; } #endif -#if defined(CONFIG_ACPI) && defined(CONFIG_PM) -struct acpi_device *acpi_dev_pm_get_node(struct device *dev); -int acpi_dev_pm_attach(struct device *dev, bool power_on); -#else -static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) -{ - return NULL; -} -static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) -{ - return -ENODEV; -} -#endif - #ifdef CONFIG_ACPI __printf(3, 4) void acpi_handle_printk(const char *level, acpi_handle handle, @@ -660,4 +675,114 @@ do { \ #endif #endif +struct acpi_gpio_params { + unsigned int crs_entry_index; + unsigned int line_index; + bool active_low; +}; + +struct acpi_gpio_mapping { + const char *name; + const struct acpi_gpio_params *data; + unsigned int size; +}; + +#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) +int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios); + +static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) +{ + if (adev) + adev->driver_gpios = NULL; +} +#else +static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios) +{ + return -ENXIO; +} +static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} +#endif + +/* Device properties */ + +#define MAX_ACPI_REFERENCE_ARGS 8 +struct acpi_reference_args { + struct acpi_device *adev; + size_t nargs; + u64 args[MAX_ACPI_REFERENCE_ARGS]; +}; + +#ifdef CONFIG_ACPI +int acpi_dev_get_property(struct acpi_device *adev, const char *name, + acpi_object_type type, const union acpi_object **obj); +int acpi_dev_get_property_array(struct acpi_device *adev, const char *name, + acpi_object_type type, + const union acpi_object **obj); +int acpi_dev_get_property_reference(struct acpi_device *adev, + const char *name, size_t index, + struct acpi_reference_args *args); + +int acpi_dev_prop_get(struct acpi_device *adev, const char *propname, + void **valptr); +int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, + enum dev_prop_type proptype, void *val); +int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, + enum dev_prop_type proptype, void *val, size_t nval); + +struct acpi_device *acpi_get_next_child(struct device *dev, + struct acpi_device *child); +#else +static inline int acpi_dev_get_property(struct acpi_device *adev, + const char *name, acpi_object_type type, + const union acpi_object **obj) +{ + return -ENXIO; +} +static inline int acpi_dev_get_property_array(struct acpi_device *adev, + const char *name, + acpi_object_type type, + const union acpi_object **obj) +{ + return -ENXIO; +} +static inline int acpi_dev_get_property_reference(struct acpi_device *adev, + const char *name, const char *cells_name, + size_t index, struct acpi_reference_args *args) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_get(struct acpi_device *adev, + const char *propname, + void **valptr) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_read_single(struct acpi_device *adev, + const char *propname, + enum dev_prop_type proptype, + void *val) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_read(struct acpi_device *adev, + const char *propname, + enum dev_prop_type proptype, + void *val, size_t nval) +{ + return -ENXIO; +} + +static inline struct acpi_device *acpi_get_next_child(struct device *dev, + struct acpi_device *child) +{ + return NULL; +} + +#endif + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index c324f5700d1a..2afc618b15ce 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -23,6 +23,7 @@ #define AMBA_NR_IRQS 9 #define AMBA_CID 0xb105f00d +#define CORESIGHT_CID 0xb105900d struct clk; @@ -97,6 +98,16 @@ void amba_release_regions(struct amba_device *); #define amba_pclk_disable(d) \ do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) +static inline int amba_pclk_prepare(struct amba_device *dev) +{ + return clk_prepare(dev->pclk); +} + +static inline void amba_pclk_unprepare(struct amba_device *dev) +{ + clk_unprepare(dev->pclk); +} + /* Some drivers don't use the struct amba_device */ #define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) #define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h index a495a959e8a7..33eb274cd0e6 100644 --- a/include/linux/ath9k_platform.h +++ b/include/linux/ath9k_platform.h @@ -31,8 +31,11 @@ struct ath9k_platform_data { u32 gpio_mask; u32 gpio_val; + bool endian_check; bool is_clk_25mhz; bool tx_gain_buffalo; + bool disable_2ghz; + bool disable_5ghz; int (*get_mac_revision)(void); int (*external_reset)(void); diff --git a/include/linux/audit.h b/include/linux/audit.h index e58fe7df8b9c..0c04917c2f12 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -130,6 +130,7 @@ extern void audit_putname(struct filename *name); #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ extern void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags); +extern void __audit_file(const struct file *); extern void __audit_inode_child(const struct inode *parent, const struct dentry *dentry, const unsigned char type); @@ -183,6 +184,11 @@ static inline void audit_inode(struct filename *name, __audit_inode(name, dentry, flags); } } +static inline void audit_file(struct file *file) +{ + if (unlikely(!audit_dummy_context())) + __audit_file(file); +} static inline void audit_inode_parent_hidden(struct filename *name, const struct dentry *dentry) { @@ -357,6 +363,9 @@ static inline void audit_inode(struct filename *name, const struct dentry *dentry, unsigned int parent) { } +static inline void audit_file(struct file *file) +{ +} static inline void audit_inode_parent_hidden(struct filename *name, const struct dentry *dentry) { } diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 729f48e6b20b..eb1c6a47b67f 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -447,4 +447,6 @@ extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset); #define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ extern u32 bcma_core_dma_translation(struct bcma_device *core); +extern unsigned int bcma_core_irq(struct bcma_device *core, int num); + #endif /* LINUX_BCMA_H_ */ diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h index fb61f3fb4ddb..0b3b32aeeb8a 100644 --- a/include/linux/bcma/bcma_driver_mips.h +++ b/include/linux/bcma/bcma_driver_mips.h @@ -43,12 +43,12 @@ struct bcma_drv_mips { extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); -extern unsigned int bcma_core_irq(struct bcma_device *core); +extern unsigned int bcma_core_mips_irq(struct bcma_device *dev); #else static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { } -static inline unsigned int bcma_core_irq(struct bcma_device *core) +static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev) { return 0; } diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 61f29e5ea840..576e4639ca60 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -53,6 +53,10 @@ struct linux_binprm { #define BINPRM_FLAGS_EXECFD_BIT 1 #define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) +/* filename of the binary will be inaccessible after exec */ +#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2 +#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT) + /* Function parameter for binfmt->coredump */ struct coredump_params { const siginfo_t *siginfo; diff --git a/include/linux/bio.h b/include/linux/bio.h index 7347f486ceca..efead0b532c4 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -443,6 +443,11 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); +void generic_start_io_acct(int rw, unsigned long sectors, + struct hd_struct *part); +void generic_end_io_acct(int rw, struct hd_struct *part, + unsigned long start_time); + #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" #endif diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index e1c8d080c427..202e4034fe26 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -45,6 +45,7 @@ * bitmap_set(dst, pos, nbits) Set specified bit area * bitmap_clear(dst, pos, nbits) Clear specified bit area * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area + * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) @@ -60,6 +61,7 @@ * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region * bitmap_release_region(bitmap, pos, order) Free specified bit region * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region + * bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex */ /* @@ -114,11 +116,36 @@ extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); extern void bitmap_set(unsigned long *map, unsigned int start, int len); extern void bitmap_clear(unsigned long *map, unsigned int start, int len); -extern unsigned long bitmap_find_next_zero_area(unsigned long *map, - unsigned long size, - unsigned long start, - unsigned int nr, - unsigned long align_mask); + +extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset); + +/** + * bitmap_find_next_zero_area - find a contiguous aligned zero area + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @align_mask: Alignment mask for zero area + * + * The @align_mask should be one less than a power of 2; the effect is that + * the bit offset of all zero areas this function finds is multiples of that + * power of 2. A @align_mask of 0 means no alignment is required. + */ +static inline unsigned long +bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask) +{ + return bitmap_find_next_zero_area_off(map, size, start, nr, + align_mask, 0); +} extern int bitmap_scnprintf(char *buf, unsigned int len, const unsigned long *src, int nbits); @@ -145,6 +172,8 @@ extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int o extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); +extern int bitmap_print_to_pagebuf(bool list, char *buf, + const unsigned long *maskp, int nmaskbits); #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) #define BITMAP_LAST_WORD_MASK(nbits) \ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 15f7034aa377..8aded9ab2e4e 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -79,7 +79,13 @@ struct blk_mq_tag_set { struct list_head tag_list; }; -typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *, bool); +struct blk_mq_queue_data { + struct request *rq; + struct list_head *list; + bool last; +}; + +typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); @@ -140,6 +146,7 @@ enum { BLK_MQ_F_TAG_SHARED = 1 << 1, BLK_MQ_F_SG_MERGE = 1 << 2, BLK_MQ_F_SYSFS_UP = 1 << 3, + BLK_MQ_F_DEFER_ISSUE = 1 << 4, BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, @@ -162,6 +169,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); void blk_mq_insert_request(struct request *, bool, bool, bool); void blk_mq_run_queues(struct request_queue *q, bool async); void blk_mq_free_request(struct request *rq); +void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6d76b8b4aa2b..92f4b4b288dd 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -398,7 +398,7 @@ struct request_queue { */ struct kobject mq_kobj; -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM struct device *dev; int rpm_status; unsigned int nr_pending; @@ -1057,7 +1057,7 @@ extern void blk_put_queue(struct request_queue *); /* * block layer runtime pm functions */ -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); extern int blk_pre_runtime_suspend(struct request_queue *q); extern void blk_post_runtime_suspend(struct request_queue *q, int err); @@ -1184,7 +1184,6 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); enum blk_default_limits { BLK_MAX_SEGMENTS = 128, BLK_SAFE_MAX_SECTORS = 255, - BLK_DEF_MAX_SECTORS = 1024, BLK_MAX_SEGMENT_SIZE = 65536, BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, }; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3cf91754a957..bbfceb756452 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -22,7 +22,7 @@ struct bpf_map_ops { /* funcs callable from userspace and from eBPF programs */ void *(*map_lookup_elem)(struct bpf_map *map, void *key); - int (*map_update_elem)(struct bpf_map *map, void *key, void *value); + int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); int (*map_delete_elem)(struct bpf_map *map, void *key); }; @@ -128,9 +128,18 @@ struct bpf_prog_aux { struct work_struct work; }; +#ifdef CONFIG_BPF_SYSCALL void bpf_prog_put(struct bpf_prog *prog); +#else +static inline void bpf_prog_put(struct bpf_prog *prog) {} +#endif struct bpf_prog *bpf_prog_get(u32 ufd); /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); +/* verifier prototypes for helper functions called from eBPF programs */ +extern struct bpf_func_proto bpf_map_lookup_elem_proto; +extern struct bpf_func_proto bpf_map_update_elem_proto; +extern struct bpf_func_proto bpf_map_delete_elem_proto; + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h new file mode 100644 index 000000000000..3daf5ed392c9 --- /dev/null +++ b/include/linux/cacheinfo.h @@ -0,0 +1,100 @@ +#ifndef _LINUX_CACHEINFO_H +#define _LINUX_CACHEINFO_H + +#include <linux/bitops.h> +#include <linux/cpumask.h> +#include <linux/smp.h> + +struct device_node; +struct attribute; + +enum cache_type { + CACHE_TYPE_NOCACHE = 0, + CACHE_TYPE_INST = BIT(0), + CACHE_TYPE_DATA = BIT(1), + CACHE_TYPE_SEPARATE = CACHE_TYPE_INST | CACHE_TYPE_DATA, + CACHE_TYPE_UNIFIED = BIT(2), +}; + +/** + * struct cacheinfo - represent a cache leaf node + * @type: type of the cache - data, inst or unified + * @level: represents the hierarcy in the multi-level cache + * @coherency_line_size: size of each cache line usually representing + * the minimum amount of data that gets transferred from memory + * @number_of_sets: total number of sets, a set is a collection of cache + * lines sharing the same index + * @ways_of_associativity: number of ways in which a particular memory + * block can be placed in the cache + * @physical_line_partition: number of physical cache lines sharing the + * same cachetag + * @size: Total size of the cache + * @shared_cpu_map: logical cpumask representing all the cpus sharing + * this cache node + * @attributes: bitfield representing various cache attributes + * @of_node: if devicetree is used, this represents either the cpu node in + * case there's no explicit cache node or the cache node itself in the + * device tree + * @disable_sysfs: indicates whether this node is visible to the user via + * sysfs or not + * @priv: pointer to any private data structure specific to particular + * cache design + * + * While @of_node, @disable_sysfs and @priv are used for internal book + * keeping, the remaining members form the core properties of the cache + */ +struct cacheinfo { + enum cache_type type; + unsigned int level; + unsigned int coherency_line_size; + unsigned int number_of_sets; + unsigned int ways_of_associativity; + unsigned int physical_line_partition; + unsigned int size; + cpumask_t shared_cpu_map; + unsigned int attributes; +#define CACHE_WRITE_THROUGH BIT(0) +#define CACHE_WRITE_BACK BIT(1) +#define CACHE_WRITE_POLICY_MASK \ + (CACHE_WRITE_THROUGH | CACHE_WRITE_BACK) +#define CACHE_READ_ALLOCATE BIT(2) +#define CACHE_WRITE_ALLOCATE BIT(3) +#define CACHE_ALLOCATE_POLICY_MASK \ + (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE) + + struct device_node *of_node; + bool disable_sysfs; + void *priv; +}; + +struct cpu_cacheinfo { + struct cacheinfo *info_list; + unsigned int num_levels; + unsigned int num_leaves; +}; + +/* + * Helpers to make sure "func" is executed on the cpu whose cache + * attributes are being detected + */ +#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \ +static inline void _##func(void *ret) \ +{ \ + int cpu = smp_processor_id(); \ + *(int *)ret = __##func(cpu); \ +} \ + \ +int func(unsigned int cpu) \ +{ \ + int ret; \ + smp_call_function_single(cpu, _##func, &ret, true); \ + return ret; \ +} + +struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu); +int init_cache_level(unsigned int cpu); +int populate_cache_leaves(unsigned int cpu); + +const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); + +#endif /* _LINUX_CACHEINFO_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index b37ea95bc348..c05ff0f9f9a5 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -127,6 +127,9 @@ void unregister_candev(struct net_device *dev); int can_restart_now(struct net_device *dev); void can_bus_off(struct net_device *dev); +void can_change_state(struct net_device *dev, struct can_frame *cf, + enum can_state tx_state, enum can_state rx_state); + void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, unsigned int idx); unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h index 5f3386844134..260d78b587c4 100644 --- a/include/linux/ceph/auth.h +++ b/include/linux/ceph/auth.h @@ -13,6 +13,7 @@ struct ceph_auth_client; struct ceph_authorizer; +struct ceph_msg; struct ceph_auth_handshake { struct ceph_authorizer *authorizer; @@ -20,6 +21,10 @@ struct ceph_auth_handshake { size_t authorizer_buf_len; void *authorizer_reply_buf; size_t authorizer_reply_buf_len; + int (*sign_message)(struct ceph_auth_handshake *auth, + struct ceph_msg *msg); + int (*check_message_signature)(struct ceph_auth_handshake *auth, + struct ceph_msg *msg); }; struct ceph_auth_client_ops { @@ -66,6 +71,11 @@ struct ceph_auth_client_ops { void (*reset)(struct ceph_auth_client *ac); void (*destroy)(struct ceph_auth_client *ac); + + int (*sign_message)(struct ceph_auth_handshake *auth, + struct ceph_msg *msg); + int (*check_message_signature)(struct ceph_auth_handshake *auth, + struct ceph_msg *msg); }; struct ceph_auth_client { @@ -113,4 +123,20 @@ extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type); +static inline int ceph_auth_sign_message(struct ceph_auth_handshake *auth, + struct ceph_msg *msg) +{ + if (auth->sign_message) + return auth->sign_message(auth, msg); + return 0; +} + +static inline +int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth, + struct ceph_msg *msg) +{ + if (auth->check_message_signature) + return auth->check_message_signature(auth, msg); + return 0; +} #endif diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h index 07ad423cc37f..07ca15e76100 100644 --- a/include/linux/ceph/buffer.h +++ b/include/linux/ceph/buffer.h @@ -10,8 +10,7 @@ /* * a simple reference counted buffer. * - * use kmalloc for small sizes (<= one page), vmalloc for larger - * sizes. + * use kmalloc for smaller sizes, vmalloc for larger sizes. */ struct ceph_buffer { struct kref kref; diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index d12659ce550d..71e05bbf8ceb 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -84,6 +84,7 @@ static inline u64 ceph_sanitize_features(u64 features) CEPH_FEATURE_PGPOOL3 | \ CEPH_FEATURE_OSDENC | \ CEPH_FEATURE_CRUSH_TUNABLES | \ + CEPH_FEATURE_MSG_AUTH | \ CEPH_FEATURE_CRUSH_TUNABLES2 | \ CEPH_FEATURE_REPLY_CREATE_INODE | \ CEPH_FEATURE_OSDHASHPSPOOL | \ diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 3c97d5e9b951..c0dadaac26e3 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -522,8 +522,11 @@ struct ceph_mds_reply_dirfrag { __le32 dist[]; } __attribute__ ((packed)); -#define CEPH_LOCK_FCNTL 1 -#define CEPH_LOCK_FLOCK 2 +#define CEPH_LOCK_FCNTL 1 +#define CEPH_LOCK_FLOCK 2 +#define CEPH_LOCK_FCNTL_INTR 3 +#define CEPH_LOCK_FLOCK_INTR 4 + #define CEPH_LOCK_SHARED 1 #define CEPH_LOCK_EXCL 2 @@ -549,6 +552,7 @@ struct ceph_filelock { int ceph_flags_to_mode(int flags); +#define CEPH_INLINE_NONE ((__u64)-1) /* capability bits */ #define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */ @@ -613,6 +617,8 @@ int ceph_flags_to_mode(int flags); CEPH_CAP_LINK_SHARED | \ CEPH_CAP_FILE_SHARED | \ CEPH_CAP_XATTR_SHARED) +#define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \ + CEPH_CAP_FILE_RD) #define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \ CEPH_CAP_LINK_SHARED | \ diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 07bc359b88ac..8b11a79ca1cb 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -29,6 +29,7 @@ #define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ #define CEPH_OPT_MYIP (1<<2) /* specified my ip */ #define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ +#define CEPH_OPT_NOMSGAUTH (1<<4) /* not require cephx message signature */ #define CEPH_OPT_DEFAULT (0) @@ -184,7 +185,6 @@ extern bool libceph_compatible(void *data); extern const char *ceph_msg_type_name(int type); extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); extern void *ceph_kvmalloc(size_t size, gfp_t flags); -extern void ceph_kvfree(const void *ptr); extern struct ceph_options *ceph_parse_options(char *options, const char *dev_name, const char *dev_name_end, diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 40ae58e3e9db..d9d396c16503 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -42,6 +42,10 @@ struct ceph_connection_operations { struct ceph_msg * (*alloc_msg) (struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip); + int (*sign_message) (struct ceph_connection *con, struct ceph_msg *msg); + + int (*check_message_signature) (struct ceph_connection *con, + struct ceph_msg *msg); }; /* use format string %s%d */ @@ -142,7 +146,10 @@ struct ceph_msg_data_cursor { */ struct ceph_msg { struct ceph_msg_header hdr; /* header */ - struct ceph_msg_footer footer; /* footer */ + union { + struct ceph_msg_footer footer; /* footer */ + struct ceph_msg_footer_old old_footer; /* old format footer */ + }; struct kvec front; /* unaligned blobs of message */ struct ceph_buffer *middle; diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h index 3d94a73b5f30..1c1887206ffa 100644 --- a/include/linux/ceph/msgr.h +++ b/include/linux/ceph/msgr.h @@ -152,7 +152,8 @@ struct ceph_msg_header { receiver: mask against ~PAGE_MASK */ struct ceph_entity_name src; - __le32 reserved; + __le16 compat_version; + __le16 reserved; __le32 crc; /* header crc32c */ } __attribute__ ((packed)); @@ -164,13 +165,21 @@ struct ceph_msg_header { /* * follows data payload */ +struct ceph_msg_footer_old { + __le32 front_crc, middle_crc, data_crc; + __u8 flags; +} __attribute__ ((packed)); + struct ceph_msg_footer { __le32 front_crc, middle_crc, data_crc; + // sig holds the 64 bits of the digital signature for the message PLR + __le64 sig; __u8 flags; } __attribute__ ((packed)); #define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */ #define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */ +#define CEPH_MSG_FOOTER_SIGNED (1<<2) /* msg was signed */ #endif diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 03aeb27fcc69..5d86416d35f2 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -87,6 +87,13 @@ struct ceph_osd_req_op { struct ceph_osd_data osd_data; } extent; struct { + __le32 name_len; + __le32 value_len; + __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ + __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ + struct ceph_osd_data osd_data; + } xattr; + struct { const char *class_name; const char *method_name; struct ceph_osd_data request_info; @@ -295,6 +302,9 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *class, const char *method); +extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, + u16 opcode, const char *name, const void *value, + size_t size, u8 cmp_op, u8 cmp_mode); extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u64 cookie, u64 version, int flag); @@ -318,7 +328,8 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, struct ceph_file_layout *layout, struct ceph_vino vino, u64 offset, u64 *len, - int num_ops, int opcode, int flags, + unsigned int which, int num_ops, + int opcode, int flags, struct ceph_snap_context *snapc, u32 truncate_seq, u64 truncate_size, bool use_mempool); diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h index 5f871d84ddce..13d71fe18b0c 100644 --- a/include/linux/ceph/pagelist.h +++ b/include/linux/ceph/pagelist.h @@ -1,8 +1,10 @@ #ifndef __FS_CEPH_PAGELIST_H #define __FS_CEPH_PAGELIST_H -#include <linux/list.h> +#include <asm/byteorder.h> #include <linux/atomic.h> +#include <linux/list.h> +#include <linux/types.h> struct ceph_pagelist { struct list_head head; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 1d5196889048..da0dae0600e6 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -113,6 +113,19 @@ static inline void css_get(struct cgroup_subsys_state *css) } /** + * css_get_many - obtain references on the specified css + * @css: target css + * @n: number of references to get + * + * The caller must already have a reference. + */ +static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_get_many(&css->refcnt, n); +} + +/** * css_tryget - try to obtain a reference on the specified css * @css: target css * @@ -159,6 +172,19 @@ static inline void css_put(struct cgroup_subsys_state *css) percpu_ref_put(&css->refcnt); } +/** + * css_put_many - put css references + * @css: target css + * @n: number of references to put + * + * Put references obtained via css_get() and css_tryget_online(). + */ +static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_put_many(&css->refcnt, n); +} + /* bits in struct cgroup flags field */ enum { /* Control Group requires release notifications to userspace */ @@ -367,8 +393,8 @@ struct css_set { * struct cftype: handler definitions for cgroup control files * * When reading/writing to a file: - * - the cgroup to use is file->f_dentry->d_parent->d_fsdata - * - the 'cftype' of the file is file->f_dentry->d_fsdata + * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata + * - the 'cftype' of the file is file->f_path.dentry->d_fsdata */ /* cftype->flags */ @@ -612,8 +638,10 @@ struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); int (*css_online)(struct cgroup_subsys_state *css); void (*css_offline)(struct cgroup_subsys_state *css); + void (*css_released)(struct cgroup_subsys_state *css); void (*css_free)(struct cgroup_subsys_state *css); void (*css_reset)(struct cgroup_subsys_state *css); + void (*css_e_css_changed)(struct cgroup_subsys_state *css); int (*can_attach)(struct cgroup_subsys_state *css, struct cgroup_taskset *tset); @@ -908,6 +936,8 @@ void css_task_iter_end(struct css_task_iter *it); int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); +struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, + struct cgroup_subsys *ss); struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, struct cgroup_subsys *ss); diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h new file mode 100644 index 000000000000..4d1019d56f7f --- /dev/null +++ b/include/linux/clock_cooling.h @@ -0,0 +1,65 @@ +/* + * linux/include/linux/clock_cooling.h + * + * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com> + * + * Copyright (C) 2013 Texas Instruments Inc. + * Contact: Eduardo Valentin <eduardo.valentin@ti.com> + * + * Highly based on cpu_cooling.c. + * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) + * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef __CPU_COOLING_H__ +#define __CPU_COOLING_H__ + +#include <linux/of.h> +#include <linux/thermal.h> +#include <linux/cpumask.h> + +#ifdef CONFIG_CLOCK_THERMAL +/** + * clock_cooling_register - function to create clock cooling device. + * @dev: struct device pointer to the device used as clock cooling device. + * @clock_name: string containing the clock used as cooling mechanism. + */ +struct thermal_cooling_device * +clock_cooling_register(struct device *dev, const char *clock_name); + +/** + * clock_cooling_unregister - function to remove clock cooling device. + * @cdev: thermal cooling device pointer. + */ +void clock_cooling_unregister(struct thermal_cooling_device *cdev); + +unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, + unsigned long freq); +#else /* !CONFIG_CLOCK_THERMAL */ +static inline struct thermal_cooling_device * +clock_cooling_register(struct device *dev, const char *clock_name) +{ + return NULL; +} +static inline +void clock_cooling_unregister(struct thermal_cooling_device *cdev) +{ +} +static inline +unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, + unsigned long freq) +{ + return THERMAL_CSTATE_INVALID; +} +#endif /* CONFIG_CLOCK_THERMAL */ + +#endif /* __CPU_COOLING_H__ */ diff --git a/include/linux/cma.h b/include/linux/cma.h index a93438beb33c..9384ba66e975 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -15,6 +15,7 @@ struct cma; +extern unsigned long totalcma_pages; extern phys_addr_t cma_get_base(struct cma *cma); extern unsigned long cma_get_size(struct cma *cma); diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 60bdf8dc02a3..3238ffa33f68 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -33,10 +33,11 @@ extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, enum migrate_mode mode, int *contended, - struct zone **candidate_zone); + int alloc_flags, int classzone_idx); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); -extern unsigned long compaction_suitable(struct zone *zone, int order); +extern unsigned long compaction_suitable(struct zone *zone, int order, + int alloc_flags, int classzone_idx); /* Do not skip compaction more than 64 times */ #define COMPACT_MAX_DEFER_SHIFT 6 @@ -103,7 +104,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, enum migrate_mode mode, int *contended, - struct zone **candidate_zone) + int alloc_flags, int classzone_idx) { return COMPACT_CONTINUE; } @@ -116,7 +117,8 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) { } -static inline unsigned long compaction_suitable(struct zone *zone, int order) +static inline unsigned long compaction_suitable(struct zone *zone, int order, + int alloc_flags, int classzone_idx) { return COMPACT_SKIPPED; } diff --git a/include/linux/compat.h b/include/linux/compat.h index e6494261eaff..7450ca2ac1fc 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -357,6 +357,9 @@ asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int); asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, const compat_uptr_t __user *envp); +asmlinkage long compat_sys_execveat(int dfd, const char __user *filename, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp, int flags); asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, diff --git a/include/linux/coresight.h b/include/linux/coresight.h new file mode 100644 index 000000000000..5d3c54311f7a --- /dev/null +++ b/include/linux/coresight.h @@ -0,0 +1,263 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_CORESIGHT_H +#define _LINUX_CORESIGHT_H + +#include <linux/device.h> + +/* Peripheral id registers (0xFD0-0xFEC) */ +#define CORESIGHT_PERIPHIDR4 0xfd0 +#define CORESIGHT_PERIPHIDR5 0xfd4 +#define CORESIGHT_PERIPHIDR6 0xfd8 +#define CORESIGHT_PERIPHIDR7 0xfdC +#define CORESIGHT_PERIPHIDR0 0xfe0 +#define CORESIGHT_PERIPHIDR1 0xfe4 +#define CORESIGHT_PERIPHIDR2 0xfe8 +#define CORESIGHT_PERIPHIDR3 0xfeC +/* Component id registers (0xFF0-0xFFC) */ +#define CORESIGHT_COMPIDR0 0xff0 +#define CORESIGHT_COMPIDR1 0xff4 +#define CORESIGHT_COMPIDR2 0xff8 +#define CORESIGHT_COMPIDR3 0xffC + +#define ETM_ARCH_V3_3 0x23 +#define ETM_ARCH_V3_5 0x25 +#define PFT_ARCH_V1_0 0x30 +#define PFT_ARCH_V1_1 0x31 + +#define CORESIGHT_UNLOCK 0xc5acce55 + +extern struct bus_type coresight_bustype; + +enum coresight_dev_type { + CORESIGHT_DEV_TYPE_NONE, + CORESIGHT_DEV_TYPE_SINK, + CORESIGHT_DEV_TYPE_LINK, + CORESIGHT_DEV_TYPE_LINKSINK, + CORESIGHT_DEV_TYPE_SOURCE, +}; + +enum coresight_dev_subtype_sink { + CORESIGHT_DEV_SUBTYPE_SINK_NONE, + CORESIGHT_DEV_SUBTYPE_SINK_PORT, + CORESIGHT_DEV_SUBTYPE_SINK_BUFFER, +}; + +enum coresight_dev_subtype_link { + CORESIGHT_DEV_SUBTYPE_LINK_NONE, + CORESIGHT_DEV_SUBTYPE_LINK_MERG, + CORESIGHT_DEV_SUBTYPE_LINK_SPLIT, + CORESIGHT_DEV_SUBTYPE_LINK_FIFO, +}; + +enum coresight_dev_subtype_source { + CORESIGHT_DEV_SUBTYPE_SOURCE_NONE, + CORESIGHT_DEV_SUBTYPE_SOURCE_PROC, + CORESIGHT_DEV_SUBTYPE_SOURCE_BUS, + CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE, +}; + +/** + * struct coresight_dev_subtype - further characterisation of a type + * @sink_subtype: type of sink this component is, as defined + by @coresight_dev_subtype_sink. + * @link_subtype: type of link this component is, as defined + by @coresight_dev_subtype_link. + * @source_subtype: type of source this component is, as defined + by @coresight_dev_subtype_source. + */ +struct coresight_dev_subtype { + enum coresight_dev_subtype_sink sink_subtype; + enum coresight_dev_subtype_link link_subtype; + enum coresight_dev_subtype_source source_subtype; +}; + +/** + * struct coresight_platform_data - data harvested from the DT specification + * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs. + * @name: name of the component as shown under sysfs. + * @nr_inport: number of input ports for this component. + * @outports: list of remote endpoint port number. + * @child_names:name of all child components connected to this device. + * @child_ports:child component port number the current component is + connected to. + * @nr_outport: number of output ports for this component. + * @clk: The clock this component is associated to. + */ +struct coresight_platform_data { + int cpu; + const char *name; + int nr_inport; + int *outports; + const char **child_names; + int *child_ports; + int nr_outport; + struct clk *clk; +}; + +/** + * struct coresight_desc - description of a component required from drivers + * @type: as defined by @coresight_dev_type. + * @subtype: as defined by @coresight_dev_subtype. + * @ops: generic operations for this component, as defined + by @coresight_ops. + * @pdata: platform data collected from DT. + * @dev: The device entity associated to this component. + * @groups: operations specific to this component. These will end up + in the component's sysfs sub-directory. + */ +struct coresight_desc { + enum coresight_dev_type type; + struct coresight_dev_subtype subtype; + const struct coresight_ops *ops; + struct coresight_platform_data *pdata; + struct device *dev; + const struct attribute_group **groups; +}; + +/** + * struct coresight_connection - representation of a single connection + * @outport: a connection's output port number. + * @chid_name: remote component's name. + * @child_port: remote component's port number @output is connected to. + * @child_dev: a @coresight_device representation of the component + connected to @outport. + */ +struct coresight_connection { + int outport; + const char *child_name; + int child_port; + struct coresight_device *child_dev; +}; + +/** + * struct coresight_device - representation of a device as used by the framework + * @conns: array of coresight_connections associated to this component. + * @nr_inport: number of input port associated to this component. + * @nr_outport: number of output port associated to this component. + * @type: as defined by @coresight_dev_type. + * @subtype: as defined by @coresight_dev_subtype. + * @ops: generic operations for this component, as defined + by @coresight_ops. + * @dev: The device entity associated to this component. + * @refcnt: keep track of what is in use. + * @path_link: link of current component into the path being enabled. + * @orphan: true if the component has connections that haven't been linked. + * @enable: 'true' if component is currently part of an active path. + * @activated: 'true' only if a _sink_ has been activated. A sink can be + activated but not yet enabled. Enabling for a _sink_ + happens when a source has been selected for that it. + */ +struct coresight_device { + struct coresight_connection *conns; + int nr_inport; + int nr_outport; + enum coresight_dev_type type; + struct coresight_dev_subtype subtype; + const struct coresight_ops *ops; + struct device dev; + atomic_t *refcnt; + struct list_head path_link; + bool orphan; + bool enable; /* true only if configured as part of a path */ + bool activated; /* true only if a sink is part of a path */ +}; + +#define to_coresight_device(d) container_of(d, struct coresight_device, dev) + +#define source_ops(csdev) csdev->ops->source_ops +#define sink_ops(csdev) csdev->ops->sink_ops +#define link_ops(csdev) csdev->ops->link_ops + +#define CORESIGHT_DEBUGFS_ENTRY(__name, __entry_name, \ + __mode, __get, __set, __fmt) \ +DEFINE_SIMPLE_ATTRIBUTE(__name ## _ops, __get, __set, __fmt); \ +static const struct coresight_ops_entry __name ## _entry = { \ + .name = __entry_name, \ + .mode = __mode, \ + .ops = &__name ## _ops \ +} + +/** + * struct coresight_ops_sink - basic operations for a sink + * Operations available for sinks + * @enable: enables the sink. + * @disable: disables the sink. + */ +struct coresight_ops_sink { + int (*enable)(struct coresight_device *csdev); + void (*disable)(struct coresight_device *csdev); +}; + +/** + * struct coresight_ops_link - basic operations for a link + * Operations available for links. + * @enable: enables flow between iport and oport. + * @disable: disables flow between iport and oport. + */ +struct coresight_ops_link { + int (*enable)(struct coresight_device *csdev, int iport, int oport); + void (*disable)(struct coresight_device *csdev, int iport, int oport); +}; + +/** + * struct coresight_ops_source - basic operations for a source + * Operations available for sources. + * @trace_id: returns the value of the component's trace ID as known + to the HW. + * @enable: enables tracing from a source. + * @disable: disables tracing for a source. + */ +struct coresight_ops_source { + int (*trace_id)(struct coresight_device *csdev); + int (*enable)(struct coresight_device *csdev); + void (*disable)(struct coresight_device *csdev); +}; + +struct coresight_ops { + const struct coresight_ops_sink *sink_ops; + const struct coresight_ops_link *link_ops; + const struct coresight_ops_source *source_ops; +}; + +#ifdef CONFIG_CORESIGHT +extern struct coresight_device * +coresight_register(struct coresight_desc *desc); +extern void coresight_unregister(struct coresight_device *csdev); +extern int coresight_enable(struct coresight_device *csdev); +extern void coresight_disable(struct coresight_device *csdev); +extern int coresight_is_bit_set(u32 val, int position, int value); +extern int coresight_timeout(void __iomem *addr, u32 offset, + int position, int value); +#ifdef CONFIG_OF +extern struct coresight_platform_data *of_get_coresight_platform_data( + struct device *dev, struct device_node *node); +#endif +#else +static inline struct coresight_device * +coresight_register(struct coresight_desc *desc) { return NULL; } +static inline void coresight_unregister(struct coresight_device *csdev) {} +static inline int +coresight_enable(struct coresight_device *csdev) { return -ENOSYS; } +static inline void coresight_disable(struct coresight_device *csdev) {} +static inline int coresight_is_bit_set(u32 val, int position, int value) + { return 0; } +static inline int coresight_timeout(void __iomem *addr, u32 offset, + int position, int value) { return 1; } +#ifdef CONFIG_OF +static inline struct coresight_platform_data *of_get_coresight_platform_data( + struct device *dev, struct device_node *node) { return NULL; } +#endif +#endif + +#endif diff --git a/include/linux/cpu.h b/include/linux/cpu.h index b2d9a43012b2..4260e8594bd7 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -19,6 +19,7 @@ struct device; struct device_node; +struct attribute_group; struct cpu { int node_id; /* The node which contains the CPU */ @@ -39,6 +40,9 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr); extern int cpu_add_dev_attr_group(struct attribute_group *attrs); extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); +extern struct device *cpu_device_create(struct device *parent, void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...); #ifdef CONFIG_HOTPLUG_CPU extern void unregister_cpu(struct cpu *cpu); extern ssize_t arch_cpu_probe(const char *, size_t); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 503b085b7832..4d078cebafd2 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -217,26 +217,26 @@ __ATTR(_name, 0644, show_##_name, store_##_name) struct cpufreq_driver { - char name[CPUFREQ_NAME_LEN]; - u8 flags; - void *driver_data; + char name[CPUFREQ_NAME_LEN]; + u8 flags; + void *driver_data; /* needed by all drivers */ - int (*init) (struct cpufreq_policy *policy); - int (*verify) (struct cpufreq_policy *policy); + int (*init)(struct cpufreq_policy *policy); + int (*verify)(struct cpufreq_policy *policy); /* define one out of two */ - int (*setpolicy) (struct cpufreq_policy *policy); + int (*setpolicy)(struct cpufreq_policy *policy); /* * On failure, should always restore frequency to policy->restore_freq * (i.e. old freq). */ - int (*target) (struct cpufreq_policy *policy, /* Deprecated */ - unsigned int target_freq, - unsigned int relation); - int (*target_index) (struct cpufreq_policy *policy, - unsigned int index); + int (*target)(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); /* Deprecated */ + int (*target_index)(struct cpufreq_policy *policy, + unsigned int index); /* * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION * unset. @@ -252,27 +252,31 @@ struct cpufreq_driver { * wish to switch to intermediate frequency for some target frequency. * In that case core will directly call ->target_index(). */ - unsigned int (*get_intermediate)(struct cpufreq_policy *policy, - unsigned int index); - int (*target_intermediate)(struct cpufreq_policy *policy, - unsigned int index); + unsigned int (*get_intermediate)(struct cpufreq_policy *policy, + unsigned int index); + int (*target_intermediate)(struct cpufreq_policy *policy, + unsigned int index); /* should be defined, if possible */ - unsigned int (*get) (unsigned int cpu); + unsigned int (*get)(unsigned int cpu); /* optional */ - int (*bios_limit) (int cpu, unsigned int *limit); + int (*bios_limit)(int cpu, unsigned int *limit); + + int (*exit)(struct cpufreq_policy *policy); + void (*stop_cpu)(struct cpufreq_policy *policy); + int (*suspend)(struct cpufreq_policy *policy); + int (*resume)(struct cpufreq_policy *policy); + + /* Will be called after the driver is fully initialized */ + void (*ready)(struct cpufreq_policy *policy); - int (*exit) (struct cpufreq_policy *policy); - void (*stop_cpu) (struct cpufreq_policy *policy); - int (*suspend) (struct cpufreq_policy *policy); - int (*resume) (struct cpufreq_policy *policy); - struct freq_attr **attr; + struct freq_attr **attr; /* platform specific boost support code */ - bool boost_supported; - bool boost_enabled; - int (*set_boost) (int state); + bool boost_supported; + bool boost_enabled; + int (*set_boost)(int state); }; /* flags */ diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 25e0df6155a4..a07e087f54b2 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -53,7 +53,7 @@ struct cpuidle_state { }; /* Idle State Flags */ -#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ +#define CPUIDLE_FLAG_TIME_INVALID (0x01) /* is residency time measurable? */ #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ #define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ @@ -90,7 +90,7 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); * cpuidle_get_last_residency - retrieves the last state's residency time * @dev: the target CPU * - * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_VALID isn't set + * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set */ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) { diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 0a9a6da21e74..b950e9d6008b 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -803,6 +803,23 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) } #endif /* NR_CPUS > BITS_PER_LONG */ +/** + * cpumap_print_to_pagebuf - copies the cpumask into the buffer either + * as comma-separated list of cpus or hex values of cpumask + * @list: indicates whether the cpumap must be list + * @mask: the cpumask to copy + * @buf: the buffer to copy into + * + * Returns the length of the (null-terminated) @buf string, zero if + * nothing is copied. + */ +static inline ssize_t +cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) +{ + return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask), + nr_cpumask_bits); +} + /* * * From here down, all obsolete. Use cpumask_ variants! diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 2f073db7392e..1b357997cac5 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -48,29 +48,16 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p); void cpuset_init_current_mems_allowed(void); int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); -extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); -extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); +extern int __cpuset_node_allowed(int node, gfp_t gfp_mask); -static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) +static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) { - return nr_cpusets() <= 1 || - __cpuset_node_allowed_softwall(node, gfp_mask); + return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask); } -static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) +static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { - return nr_cpusets() <= 1 || - __cpuset_node_allowed_hardwall(node, gfp_mask); -} - -static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) -{ - return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); -} - -static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) -{ - return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); + return cpuset_node_allowed(zone_to_nid(z), gfp_mask); } extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, @@ -179,22 +166,12 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) return 1; } -static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) -{ - return 1; -} - -static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) -{ - return 1; -} - -static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) +static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) { return 1; } -static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) +static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { return 1; } diff --git a/include/linux/cred.h b/include/linux/cred.h index b2d0820837c4..2fb2ca2127ed 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -68,6 +68,7 @@ extern void groups_free(struct group_info *); extern int set_current_groups(struct group_info *); extern void set_groups(struct cred *, struct group_info *); extern int groups_search(const struct group_info *, kgid_t); +extern bool may_setgroups(void); /* access the groups "array" with this macro */ #define GROUP_AT(gi, i) \ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index d45e949699ea..9c8776d0ada8 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -26,6 +26,19 @@ #include <linux/uaccess.h> /* + * Autoloaded crypto modules should only use a prefixed name to avoid allowing + * arbitrary modules to be loaded. Loading from userspace may still need the + * unprefixed names, so retains those aliases as well. + * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 + * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro + * expands twice on the same line. Instead, use a separate base name for the + * alias. + */ +#define MODULE_ALIAS_CRYPTO(name) \ + __MODULE_INFO(alias, alias_userspace, name); \ + __MODULE_INFO(alias, alias_crypto, "crypto-" name) + +/* * Algorithm masks and types. */ #define CRYPTO_ALG_TYPE_MASK 0x0000000f @@ -127,6 +140,13 @@ struct skcipher_givcrypt_request; typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); +/** + * DOC: Block Cipher Context Data Structures + * + * These data structures define the operating context for each block cipher + * type. + */ + struct crypto_async_request { struct list_head list; crypto_completion_t complete; @@ -194,9 +214,63 @@ struct hash_desc { u32 flags; }; -/* - * Algorithms: modular crypto algorithm implementations, managed - * via crypto_register_alg() and crypto_unregister_alg(). +/** + * DOC: Block Cipher Algorithm Definitions + * + * These data structures define modular crypto algorithm implementations, + * managed via crypto_register_alg() and crypto_unregister_alg(). + */ + +/** + * struct ablkcipher_alg - asynchronous block cipher definition + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt + * the supplied scatterlist containing the blocks of data. The crypto + * API consumer is responsible for aligning the entries of the + * scatterlist properly and making sure the chunks are correctly + * sized. In case a software fallback was put in place in the + * @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. In case the + * key was stored in transformation context, the key might need to be + * re-programmed into the hardware in this function. This function + * shall not modify the transformation context, as this function may + * be called in parallel with the same transformation object. + * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt + * and the conditions are exactly the same. + * @givencrypt: Update the IV for encryption. With this function, a cipher + * implementation may provide the function on how to update the IV + * for encryption. + * @givdecrypt: Update the IV for decryption. This is the reverse of + * @givencrypt . + * @geniv: The transformation implementation may use an "IV generator" provided + * by the kernel crypto API. Several use cases have a predefined + * approach how IVs are to be updated. For such use cases, the kernel + * crypto API provides ready-to-use implementations that can be + * referenced with this variable. + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * + * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are + * mandatory and must be filled. */ struct ablkcipher_alg { int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, @@ -213,6 +287,32 @@ struct ablkcipher_alg { unsigned int ivsize; }; +/** + * struct aead_alg - AEAD cipher definition + * @maxauthsize: Set the maximum authentication tag size supported by the + * transformation. A transformation may support smaller tag sizes. + * As the authentication tag is a message digest to ensure the + * integrity of the encrypted data, a consumer typically wants the + * largest authentication tag possible as defined by this + * variable. + * @setauthsize: Set authentication size for the AEAD transformation. This + * function is used to specify the consumer requested size of the + * authentication tag to be either generated by the transformation + * during encryption or the size of the authentication tag to be + * supplied during the decryption operation. This function is also + * responsible for checking the authentication tag size for + * validity. + * @setkey: see struct ablkcipher_alg + * @encrypt: see struct ablkcipher_alg + * @decrypt: see struct ablkcipher_alg + * @givencrypt: see struct ablkcipher_alg + * @givdecrypt: see struct ablkcipher_alg + * @geniv: see struct ablkcipher_alg + * @ivsize: see struct ablkcipher_alg + * + * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are + * mandatory and must be filled. + */ struct aead_alg { int (*setkey)(struct crypto_aead *tfm, const u8 *key, unsigned int keylen); @@ -228,6 +328,18 @@ struct aead_alg { unsigned int maxauthsize; }; +/** + * struct blkcipher_alg - synchronous block cipher definition + * @min_keysize: see struct ablkcipher_alg + * @max_keysize: see struct ablkcipher_alg + * @setkey: see struct ablkcipher_alg + * @encrypt: see struct ablkcipher_alg + * @decrypt: see struct ablkcipher_alg + * @geniv: see struct ablkcipher_alg + * @ivsize: see struct ablkcipher_alg + * + * All fields except @geniv and @ivsize are mandatory and must be filled. + */ struct blkcipher_alg { int (*setkey)(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); @@ -245,6 +357,53 @@ struct blkcipher_alg { unsigned int ivsize; }; +/** + * struct cipher_alg - single-block symmetric ciphers definition + * @cia_min_keysize: Minimum key size supported by the transformation. This is + * the smallest key length supported by this transformation + * algorithm. This must be set to one of the pre-defined + * values as this is not hardware specific. Possible values + * for this field can be found via git grep "_MIN_KEY_SIZE" + * include/crypto/ + * @cia_max_keysize: Maximum key size supported by the transformation. This is + * the largest key length supported by this transformation + * algorithm. This must be set to one of the pre-defined values + * as this is not hardware specific. Possible values for this + * field can be found via git grep "_MAX_KEY_SIZE" + * include/crypto/ + * @cia_setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function + * can be called multiple times during the existence of the + * transformation object, so one must make sure the key is properly + * reprogrammed into the hardware. This function is also + * responsible for checking the key length for validity. + * @cia_encrypt: Encrypt a single block. This function is used to encrypt a + * single block of data, which must be @cra_blocksize big. This + * always operates on a full @cra_blocksize and it is not possible + * to encrypt a block of smaller size. The supplied buffers must + * therefore also be at least of @cra_blocksize size. Both the + * input and output buffers are always aligned to @cra_alignmask. + * In case either of the input or output buffer supplied by user + * of the crypto API is not aligned to @cra_alignmask, the crypto + * API will re-align the buffers. The re-alignment means that a + * new buffer will be allocated, the data will be copied into the + * new buffer, then the processing will happen on the new buffer, + * then the data will be copied back into the original buffer and + * finally the new buffer will be freed. In case a software + * fallback was put in place in the @cra_init call, this function + * might need to use the fallback if the algorithm doesn't support + * all of the key sizes. In case the key was stored in + * transformation context, the key might need to be re-programmed + * into the hardware in this function. This function shall not + * modify the transformation context, as this function may be + * called in parallel with the same transformation object. + * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to + * @cia_encrypt, and the conditions are exactly the same. + * + * All fields are mandatory and must be filled. + */ struct cipher_alg { unsigned int cia_min_keysize; unsigned int cia_max_keysize; @@ -261,6 +420,25 @@ struct compress_alg { unsigned int slen, u8 *dst, unsigned int *dlen); }; +/** + * struct rng_alg - random number generator definition + * @rng_make_random: The function defined by this variable obtains a random + * number. The random number generator transform must generate + * the random number out of the context provided with this + * call. + * @rng_reset: Reset of the random number generator by clearing the entire state. + * With the invocation of this function call, the random number + * generator shall completely reinitialize its state. If the random + * number generator requires a seed for setting up a new state, + * the seed must be provided by the consumer while invoking this + * function. The required size of the seed is defined with + * @seedsize . + * @seedsize: The seed size required for a random number generator + * initialization defined with this variable. Some random number + * generators like the SP800-90A DRBG does not require a seed as the + * seeding is implemented internally without the need of support by + * the consumer. In this case, the seed size is set to zero. + */ struct rng_alg { int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen); @@ -277,6 +455,81 @@ struct rng_alg { #define cra_compress cra_u.compress #define cra_rng cra_u.rng +/** + * struct crypto_alg - definition of a cryptograpic cipher algorithm + * @cra_flags: Flags describing this transformation. See include/linux/crypto.h + * CRYPTO_ALG_* flags for the flags which go in here. Those are + * used for fine-tuning the description of the transformation + * algorithm. + * @cra_blocksize: Minimum block size of this transformation. The size in bytes + * of the smallest possible unit which can be transformed with + * this algorithm. The users must respect this value. + * In case of HASH transformation, it is possible for a smaller + * block than @cra_blocksize to be passed to the crypto API for + * transformation, in case of any other transformation type, an + * error will be returned upon any attempt to transform smaller + * than @cra_blocksize chunks. + * @cra_ctxsize: Size of the operational context of the transformation. This + * value informs the kernel crypto API about the memory size + * needed to be allocated for the transformation context. + * @cra_alignmask: Alignment mask for the input and output data buffer. The data + * buffer containing the input data for the algorithm must be + * aligned to this alignment mask. The data buffer for the + * output data must be aligned to this alignment mask. Note that + * the Crypto API will do the re-alignment in software, but + * only under special conditions and there is a performance hit. + * The re-alignment happens at these occasions for different + * @cra_u types: cipher -- For both input data and output data + * buffer; ahash -- For output hash destination buf; shash -- + * For output hash destination buf. + * This is needed on hardware which is flawed by design and + * cannot pick data from arbitrary addresses. + * @cra_priority: Priority of this transformation implementation. In case + * multiple transformations with same @cra_name are available to + * the Crypto API, the kernel will use the one with highest + * @cra_priority. + * @cra_name: Generic name (usable by multiple implementations) of the + * transformation algorithm. This is the name of the transformation + * itself. This field is used by the kernel when looking up the + * providers of particular transformation. + * @cra_driver_name: Unique name of the transformation provider. This is the + * name of the provider of the transformation. This can be any + * arbitrary value, but in the usual case, this contains the + * name of the chip or provider and the name of the + * transformation algorithm. + * @cra_type: Type of the cryptographic transformation. This is a pointer to + * struct crypto_type, which implements callbacks common for all + * trasnformation types. There are multiple options: + * &crypto_blkcipher_type, &crypto_ablkcipher_type, + * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type. + * This field might be empty. In that case, there are no common + * callbacks. This is the case for: cipher, compress, shash. + * @cra_u: Callbacks implementing the transformation. This is a union of + * multiple structures. Depending on the type of transformation selected + * by @cra_type and @cra_flags above, the associated structure must be + * filled with callbacks. This field might be empty. This is the case + * for ahash, shash. + * @cra_init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. In case the + * cryptographic hardware has some special requirements which need to + * be handled by software, this function shall check for the precise + * requirement of the transformation and put any software fallbacks + * in place. + * @cra_exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @cra_init, used to remove various changes set in + * @cra_init. + * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE + * @cra_list: internally used + * @cra_users: internally used + * @cra_refcnt: internally used + * @cra_destroy: internally used + * + * The struct crypto_alg describes a generic Crypto API algorithm and is common + * for all of the transformations. Any variable not documented here shall not + * be used by a cipher implementation as it is internal to the Crypto API. + */ struct crypto_alg { struct list_head cra_list; struct list_head cra_users; @@ -581,6 +834,50 @@ static inline u32 crypto_skcipher_mask(u32 mask) return mask; } +/** + * DOC: Asynchronous Block Cipher API + * + * Asynchronous block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). + * + * Asynchronous cipher operations imply that the function invocation for a + * cipher request returns immediately before the completion of the operation. + * The cipher request is scheduled as a separate kernel thread and therefore + * load-balanced on the different CPUs via the process scheduler. To allow + * the kernel crypto API to inform the caller about the completion of a cipher + * request, the caller must provide a callback function. That function is + * invoked with the cipher handle when the request completes. + * + * To support the asynchronous operation, additional information than just the + * cipher handle must be supplied to the kernel crypto API. That additional + * information is given by filling in the ablkcipher_request data structure. + * + * For the asynchronous block cipher API, the state is maintained with the tfm + * cipher handle. A single tfm can be used across multiple calls and in + * parallel. For asynchronous block cipher calls, context data supplied and + * only used by the caller can be referenced the request data structure in + * addition to the IV used for the cipher request. The maintenance of such + * state information would be important for a crypto driver implementer to + * have, because when calling the callback function upon completion of the + * cipher operation, that callback function may need some information about + * which operation just finished if it invoked multiple in parallel. This + * state information is unused by the kernel crypto API. + */ + +/** + * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ablkcipher cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an ablkcipher. The returned struct + * crypto_ablkcipher is the cipher handle that is required for any subsequent + * API invocation for that ablkcipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask); @@ -590,11 +887,25 @@ static inline struct crypto_tfm *crypto_ablkcipher_tfm( return &tfm->base; } +/** + * crypto_free_ablkcipher() - zeroize and free cipher handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) { crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); } +/** + * crypto_has_ablkcipher() - Search for the availability of an ablkcipher. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ablkcipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the ablkcipher is known to the kernel crypto API; false + * otherwise + */ static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, u32 mask) { @@ -608,12 +919,31 @@ static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; } +/** + * crypto_ablkcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the ablkcipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ static inline unsigned int crypto_ablkcipher_ivsize( struct crypto_ablkcipher *tfm) { return crypto_ablkcipher_crt(tfm)->ivsize; } +/** + * crypto_ablkcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the ablkcipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ static inline unsigned int crypto_ablkcipher_blocksize( struct crypto_ablkcipher *tfm) { @@ -643,6 +973,22 @@ static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); } +/** + * crypto_ablkcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the ablkcipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { @@ -651,12 +997,32 @@ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, return crt->setkey(crt->base, key, keylen); } +/** + * crypto_ablkcipher_reqtfm() - obtain cipher handle from request + * @req: ablkcipher_request out of which the cipher handle is to be obtained + * + * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request + * data structure. + * + * Return: crypto_ablkcipher handle + */ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( struct ablkcipher_request *req) { return __crypto_ablkcipher_cast(req->base.tfm); } +/** + * crypto_ablkcipher_encrypt() - encrypt plaintext + * @req: reference to the ablkcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Encrypt plaintext data using the ablkcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * ablkcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) { struct ablkcipher_tfm *crt = @@ -664,6 +1030,17 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) return crt->encrypt(req); } +/** + * crypto_ablkcipher_decrypt() - decrypt ciphertext + * @req: reference to the ablkcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Decrypt ciphertext data using the ablkcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * ablkcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) { struct ablkcipher_tfm *crt = @@ -671,12 +1048,37 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) return crt->decrypt(req); } +/** + * DOC: Asynchronous Cipher Request Handle + * + * The ablkcipher_request data structure contains all pointers to data + * required for the asynchronous cipher operation. This includes the cipher + * handle (which can be used by multiple ablkcipher_request instances), pointer + * to plaintext and ciphertext, asynchronous callback function, etc. It acts + * as a handle to the ablkcipher_request_* API calls in a similar way as + * ablkcipher handle to the crypto_ablkcipher_* API calls. + */ + +/** + * crypto_ablkcipher_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: number of bytes + */ static inline unsigned int crypto_ablkcipher_reqsize( struct crypto_ablkcipher *tfm) { return crypto_ablkcipher_crt(tfm)->reqsize; } +/** + * ablkcipher_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing ablkcipher handle in the request + * data structure with a different one. + */ static inline void ablkcipher_request_set_tfm( struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) { @@ -689,6 +1091,18 @@ static inline struct ablkcipher_request *ablkcipher_request_cast( return container_of(req, struct ablkcipher_request, base); } +/** + * ablkcipher_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the ablkcipher + * encrypt and decrypt API calls. During the allocation, the provided ablkcipher + * handle is registered in the request data structure. + * + * Return: allocated request handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ static inline struct ablkcipher_request *ablkcipher_request_alloc( struct crypto_ablkcipher *tfm, gfp_t gfp) { @@ -703,11 +1117,40 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc( return req; } +/** + * ablkcipher_request_free() - zeroize and free request data structure + * @req: request data structure cipher handle to be freed + */ static inline void ablkcipher_request_free(struct ablkcipher_request *req) { kzfree(req); } +/** + * ablkcipher_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * crypto_async_request data structure provided to the callback function. + * + * This function allows setting the callback function that is triggered once the + * cipher operation completes. + * + * The callback function is registered with the ablkcipher_request handle and + * must comply with the following template: + * + * void callback_function(struct crypto_async_request *req, int error) + */ static inline void ablkcipher_request_set_callback( struct ablkcipher_request *req, u32 flags, crypto_completion_t compl, void *data) @@ -717,6 +1160,22 @@ static inline void ablkcipher_request_set_callback( req->base.flags = flags; } +/** + * ablkcipher_request_set_crypt() - set data buffers + * @req: request handle + * @src: source scatter / gather list + * @dst: destination scatter / gather list + * @nbytes: number of bytes to process from @src + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_ablkcipher_ivsize + * + * This function allows setting of the source data and destination data + * scatter / gather lists. + * + * For encryption, the source is treated as the plaintext and the + * destination is the ciphertext. For a decryption operation, the use is + * reversed: the source is the ciphertext and the destination is the plaintext. + */ static inline void ablkcipher_request_set_crypt( struct ablkcipher_request *req, struct scatterlist *src, struct scatterlist *dst, @@ -728,11 +1187,55 @@ static inline void ablkcipher_request_set_crypt( req->info = iv; } +/** + * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API + * + * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD + * (listed as type "aead" in /proc/crypto) + * + * The most prominent examples for this type of encryption is GCM and CCM. + * However, the kernel supports other types of AEAD ciphers which are defined + * with the following cipher string: + * + * authenc(keyed message digest, block cipher) + * + * For example: authenc(hmac(sha256), cbc(aes)) + * + * The example code provided for the asynchronous block cipher operation + * applies here as well. Naturally all *ablkcipher* symbols must be exchanged + * the *aead* pendants discussed in the following. In addtion, for the AEAD + * operation, the aead_request_set_assoc function must be used to set the + * pointer to the associated data memory location before performing the + * encryption or decryption operation. In case of an encryption, the associated + * data memory is filled during the encryption operation. For decryption, the + * associated data memory must contain data that is used to verify the integrity + * of the decrypted data. Another deviation from the asynchronous block cipher + * operation is that the caller should explicitly check for -EBADMSG of the + * crypto_aead_decrypt. That error indicates an authentication error, i.e. + * a breach in the integrity of the message. In essence, that -EBADMSG error + * code is the key bonus an AEAD cipher has over "standard" block chaining + * modes. + */ + static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) { return (struct crypto_aead *)tfm; } +/** + * crypto_alloc_aead() - allocate AEAD cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * AEAD cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an AEAD. The returned struct + * crypto_aead is the cipher handle that is required for any subsequent + * API invocation for that AEAD. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) @@ -740,6 +1243,10 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) return &tfm->base; } +/** + * crypto_free_aead() - zeroize and free aead handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_aead(struct crypto_aead *tfm) { crypto_free_tfm(crypto_aead_tfm(tfm)); @@ -750,16 +1257,47 @@ static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm) return &crypto_aead_tfm(tfm)->crt_aead; } +/** + * crypto_aead_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the aead referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) { return crypto_aead_crt(tfm)->ivsize; } +/** + * crypto_aead_authsize() - obtain maximum authentication data size + * @tfm: cipher handle + * + * The maximum size of the authentication data for the AEAD cipher referenced + * by the AEAD cipher handle is returned. The authentication data size may be + * zero if the cipher implements a hard-coded maximum. + * + * The authentication data may also be known as "tag value". + * + * Return: authentication data size / tag size in bytes + */ static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) { return crypto_aead_crt(tfm)->authsize; } +/** + * crypto_aead_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the AEAD referenced with the cipher handle is returned. + * The caller may use that information to allocate appropriate memory for the + * data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) { return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); @@ -785,6 +1323,22 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); } +/** + * crypto_aead_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the AEAD referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { @@ -793,6 +1347,16 @@ static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, return crt->setkey(crt->base, key, keylen); } +/** + * crypto_aead_setauthsize() - set authentication data size + * @tfm: cipher handle + * @authsize: size of the authentication data / tag in bytes + * + * Set the authentication data size / tag size. AEAD requires an authentication + * tag (or MAC) in addition to the associated data. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) @@ -800,27 +1364,105 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) return __crypto_aead_cast(req->base.tfm); } +/** + * crypto_aead_encrypt() - encrypt plaintext + * @req: reference to the aead_request handle that holds all information + * needed to perform the cipher operation + * + * Encrypt plaintext data using the aead_request handle. That data structure + * and how it is filled with data is discussed with the aead_request_* + * functions. + * + * IMPORTANT NOTE The encryption operation creates the authentication data / + * tag. That data is concatenated with the created ciphertext. + * The ciphertext memory size is therefore the given number of + * block cipher blocks + the size defined by the + * crypto_aead_setauthsize invocation. The caller must ensure + * that sufficient memory is available for the ciphertext and + * the authentication tag. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ static inline int crypto_aead_encrypt(struct aead_request *req) { return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); } +/** + * crypto_aead_decrypt() - decrypt ciphertext + * @req: reference to the ablkcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Decrypt ciphertext data using the aead_request handle. That data structure + * and how it is filled with data is discussed with the aead_request_* + * functions. + * + * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the + * authentication data / tag. That authentication data / tag + * must have the size defined by the crypto_aead_setauthsize + * invocation. + * + * + * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD + * cipher operation performs the authentication of the data during the + * decryption operation. Therefore, the function returns this error if + * the authentication of the ciphertext was unsuccessful (i.e. the + * integrity of the ciphertext or the associated data was violated); + * < 0 if an error occurred. + */ static inline int crypto_aead_decrypt(struct aead_request *req) { return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); } +/** + * DOC: Asynchronous AEAD Request Handle + * + * The aead_request data structure contains all pointers to data required for + * the AEAD cipher operation. This includes the cipher handle (which can be + * used by multiple aead_request instances), pointer to plaintext and + * ciphertext, asynchronous callback function, etc. It acts as a handle to the + * aead_request_* API calls in a similar way as AEAD handle to the + * crypto_aead_* API calls. + */ + +/** + * crypto_aead_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: number of bytes + */ static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) { return crypto_aead_crt(tfm)->reqsize; } +/** + * aead_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing aead handle in the request + * data structure with a different one. + */ static inline void aead_request_set_tfm(struct aead_request *req, struct crypto_aead *tfm) { req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); } +/** + * aead_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the AEAD + * encrypt and decrypt API calls. During the allocation, the provided aead + * handle is registered in the request data structure. + * + * Return: allocated request handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, gfp_t gfp) { @@ -834,11 +1476,40 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, return req; } +/** + * aead_request_free() - zeroize and free request data structure + * @req: request data structure cipher handle to be freed + */ static inline void aead_request_free(struct aead_request *req) { kzfree(req); } +/** + * aead_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * crypto_async_request data structure provided to the callback function. + * + * Setting the callback function that is triggered once the cipher operation + * completes + * + * The callback function is registered with the aead_request handle and + * must comply with the following template: + * + * void callback_function(struct crypto_async_request *req, int error) + */ static inline void aead_request_set_callback(struct aead_request *req, u32 flags, crypto_completion_t compl, @@ -849,6 +1520,36 @@ static inline void aead_request_set_callback(struct aead_request *req, req->base.flags = flags; } +/** + * aead_request_set_crypt - set data buffers + * @req: request handle + * @src: source scatter / gather list + * @dst: destination scatter / gather list + * @cryptlen: number of bytes to process from @src + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_aead_ivsize() + * + * Setting the source data and destination data scatter / gather lists. + * + * For encryption, the source is treated as the plaintext and the + * destination is the ciphertext. For a decryption operation, the use is + * reversed: the source is the ciphertext and the destination is the plaintext. + * + * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption, + * the caller must concatenate the ciphertext followed by the + * authentication tag and provide the entire data stream to the + * decryption operation (i.e. the data length used for the + * initialization of the scatterlist and the data length for the + * decryption operation is identical). For encryption, however, + * the authentication tag is created while encrypting the data. + * The destination buffer must hold sufficient space for the + * ciphertext and the authentication tag while the encryption + * invocation must only point to the plaintext data size. The + * following code snippet illustrates the memory usage + * buffer = kmalloc(ptbuflen + (enc ? authsize : 0)); + * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0)); + * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv); + */ static inline void aead_request_set_crypt(struct aead_request *req, struct scatterlist *src, struct scatterlist *dst, @@ -860,6 +1561,15 @@ static inline void aead_request_set_crypt(struct aead_request *req, req->iv = iv; } +/** + * aead_request_set_assoc() - set the associated data scatter / gather list + * @req: request handle + * @assoc: associated data scatter / gather list + * @assoclen: number of bytes to process from @assoc + * + * For encryption, the memory is filled with the associated data. For + * decryption, the memory must point to the associated data. + */ static inline void aead_request_set_assoc(struct aead_request *req, struct scatterlist *assoc, unsigned int assoclen) @@ -868,6 +1578,36 @@ static inline void aead_request_set_assoc(struct aead_request *req, req->assoclen = assoclen; } +/** + * DOC: Synchronous Block Cipher API + * + * The synchronous block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto) + * + * Synchronous calls, have a context in the tfm. But since a single tfm can be + * used in multiple calls and in parallel, this info should not be changeable + * (unless a lock is used). This applies, for example, to the symmetric key. + * However, the IV is changeable, so there is an iv field in blkcipher_tfm + * structure for synchronous blkcipher api. So, its the only state info that can + * be kept for synchronous calls without using a big lock across a tfm. + * + * The block cipher API allows the use of a complete cipher, i.e. a cipher + * consisting of a template (a block chaining mode) and a single block cipher + * primitive (e.g. AES). + * + * The plaintext data buffer and the ciphertext data buffer are pointed to + * by using scatter/gather lists. The cipher operation is performed + * on all segments of the provided scatter/gather lists. + * + * The kernel crypto API supports a cipher operation "in-place" which means that + * the caller may provide the same scatter/gather list for the plaintext and + * cipher text. After the completion of the cipher operation, the plaintext + * data is replaced with the ciphertext data in case of an encryption and vice + * versa for a decryption. The caller must ensure that the scatter/gather lists + * for the output data point to sufficiently large buffers, i.e. multiples of + * the block size of the cipher. + */ + static inline struct crypto_blkcipher *__crypto_blkcipher_cast( struct crypto_tfm *tfm) { @@ -881,6 +1621,20 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast( return __crypto_blkcipher_cast(tfm); } +/** + * crypto_alloc_blkcipher() - allocate synchronous block cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * blkcipher cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a block cipher. The returned struct + * crypto_blkcipher is the cipher handle that is required for any subsequent + * API invocation for that block cipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ static inline struct crypto_blkcipher *crypto_alloc_blkcipher( const char *alg_name, u32 type, u32 mask) { @@ -897,11 +1651,25 @@ static inline struct crypto_tfm *crypto_blkcipher_tfm( return &tfm->base; } +/** + * crypto_free_blkcipher() - zeroize and free the block cipher handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) { crypto_free_tfm(crypto_blkcipher_tfm(tfm)); } +/** + * crypto_has_blkcipher() - Search for the availability of a block cipher + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the block cipher is known to the kernel crypto API; false + * otherwise + */ static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; @@ -911,6 +1679,12 @@ static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) return crypto_has_alg(alg_name, type, mask); } +/** + * crypto_blkcipher_name() - return the name / cra_name from the cipher handle + * @tfm: cipher handle + * + * Return: The character string holding the name of the cipher + */ static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) { return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); @@ -928,11 +1702,30 @@ static inline struct blkcipher_alg *crypto_blkcipher_alg( return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; } +/** + * crypto_blkcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the block cipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) { return crypto_blkcipher_alg(tfm)->ivsize; } +/** + * crypto_blkcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the block cipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation. + * + * Return: block size of cipher + */ static inline unsigned int crypto_blkcipher_blocksize( struct crypto_blkcipher *tfm) { @@ -962,6 +1755,22 @@ static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); } +/** + * crypto_blkcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the block cipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, const u8 *key, unsigned int keylen) { @@ -969,6 +1778,24 @@ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, key, keylen); } +/** + * crypto_blkcipher_encrypt() - encrypt plaintext + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * ciphertext + * @src: scatter/gather list that holds the plaintext + * @nbytes: number of bytes of the plaintext to encrypt. + * + * Encrypt plaintext data using the IV set by the caller with a preceding + * call of crypto_blkcipher_set_iv. + * + * The blkcipher_desc data structure must be filled by the caller and can + * reside on the stack. The caller must fill desc as follows: desc.tfm is filled + * with the block cipher handle; desc.flags is filled with either + * CRYPTO_TFM_REQ_MAY_SLEEP or 0. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, @@ -978,6 +1805,25 @@ static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); } +/** + * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * ciphertext + * @src: scatter/gather list that holds the plaintext + * @nbytes: number of bytes of the plaintext to encrypt. + * + * Encrypt plaintext data with the use of an IV that is solely used for this + * cipher operation. Any previously set IV is not used. + * + * The blkcipher_desc data structure must be filled by the caller and can + * reside on the stack. The caller must fill desc as follows: desc.tfm is filled + * with the block cipher handle; desc.info is filled with the IV to be used for + * the current operation; desc.flags is filled with either + * CRYPTO_TFM_REQ_MAY_SLEEP or 0. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, @@ -986,6 +1832,23 @@ static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); } +/** + * crypto_blkcipher_decrypt() - decrypt ciphertext + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * plaintext + * @src: scatter/gather list that holds the ciphertext + * @nbytes: number of bytes of the ciphertext to decrypt. + * + * Decrypt ciphertext data using the IV set by the caller with a preceding + * call of crypto_blkcipher_set_iv. + * + * The blkcipher_desc data structure must be filled by the caller as documented + * for the crypto_blkcipher_encrypt call above. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + * + */ static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, @@ -995,6 +1858,22 @@ static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); } +/** + * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * plaintext + * @src: scatter/gather list that holds the ciphertext + * @nbytes: number of bytes of the ciphertext to decrypt. + * + * Decrypt ciphertext data with the use of an IV that is solely used for this + * cipher operation. Any previously set IV is not used. + * + * The blkcipher_desc data structure must be filled by the caller as documented + * for the crypto_blkcipher_encrypt_iv call above. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, @@ -1003,18 +1882,54 @@ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); } +/** + * crypto_blkcipher_set_iv() - set IV for cipher + * @tfm: cipher handle + * @src: buffer holding the IV + * @len: length of the IV in bytes + * + * The caller provided IV is set for the block cipher referenced by the cipher + * handle. + */ static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, const u8 *src, unsigned int len) { memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); } +/** + * crypto_blkcipher_get_iv() - obtain IV from cipher + * @tfm: cipher handle + * @dst: buffer filled with the IV + * @len: length of the buffer dst + * + * The caller can obtain the IV set for the block cipher referenced by the + * cipher handle and store it into the user-provided buffer. If the buffer + * has an insufficient space, the IV is truncated to fit the buffer. + */ static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, u8 *dst, unsigned int len) { memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); } +/** + * DOC: Single Block Cipher API + * + * The single block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). + * + * Using the single block cipher API calls, operations with the basic cipher + * primitive can be implemented. These cipher primitives exclude any block + * chaining operations including IV handling. + * + * The purpose of this single block cipher API is to support the implementation + * of templates or other concepts that only need to perform the cipher operation + * on one block at a time. Templates invoke the underlying cipher primitive + * block-wise and process either the input or the output data of these cipher + * operations. + */ + static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) { return (struct crypto_cipher *)tfm; @@ -1026,6 +1941,20 @@ static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) return __crypto_cipher_cast(tfm); } +/** + * crypto_alloc_cipher() - allocate single block cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * single block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a single block cipher. The returned struct + * crypto_cipher is the cipher handle that is required for any subsequent API + * invocation for that single block cipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, u32 type, u32 mask) { @@ -1041,11 +1970,25 @@ static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) return &tfm->base; } +/** + * crypto_free_cipher() - zeroize and free the single block cipher handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_cipher(struct crypto_cipher *tfm) { crypto_free_tfm(crypto_cipher_tfm(tfm)); } +/** + * crypto_has_cipher() - Search for the availability of a single block cipher + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * single block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the single block cipher is known to the kernel crypto API; + * false otherwise + */ static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; @@ -1060,6 +2003,16 @@ static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) return &crypto_cipher_tfm(tfm)->crt_cipher; } +/** + * crypto_cipher_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the single block cipher referenced with the cipher handle + * tfm is returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) { return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); @@ -1087,6 +2040,22 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); } +/** + * crypto_cipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the single block cipher referenced by the + * cipher handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, const u8 *key, unsigned int keylen) { @@ -1094,6 +2063,15 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, key, keylen); } +/** + * crypto_cipher_encrypt_one() - encrypt one block of plaintext + * @tfm: cipher handle + * @dst: points to the buffer that will be filled with the ciphertext + * @src: buffer holding the plaintext to be encrypted + * + * Invoke the encryption operation of one block. The caller must ensure that + * the plaintext and ciphertext buffers are at least one block in size. + */ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src) { @@ -1101,6 +2079,15 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, dst, src); } +/** + * crypto_cipher_decrypt_one() - decrypt one block of ciphertext + * @tfm: cipher handle + * @dst: points to the buffer that will be filled with the plaintext + * @src: buffer holding the ciphertext to be decrypted + * + * Invoke the decryption operation of one block. The caller must ensure that + * the plaintext and ciphertext buffers are at least one block in size. + */ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src) { @@ -1108,6 +2095,13 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, dst, src); } +/** + * DOC: Synchronous Message Digest API + * + * The synchronous message digest API is used with the ciphers of type + * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto) + */ + static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) { return (struct crypto_hash *)tfm; @@ -1120,6 +2114,20 @@ static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm) return __crypto_hash_cast(tfm); } +/** + * crypto_alloc_hash() - allocate synchronous message digest handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a message digest. The returned struct + * crypto_hash is the cipher handle that is required for any subsequent + * API invocation for that message digest. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, u32 type, u32 mask) { @@ -1136,11 +2144,25 @@ static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm) return &tfm->base; } +/** + * crypto_free_hash() - zeroize and free message digest handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_hash(struct crypto_hash *tfm) { crypto_free_tfm(crypto_hash_tfm(tfm)); } +/** + * crypto_has_hash() - Search for the availability of a message digest + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the message digest cipher is known to the kernel crypto + * API; false otherwise + */ static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; @@ -1156,6 +2178,15 @@ static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm) return &crypto_hash_tfm(tfm)->crt_hash; } +/** + * crypto_hash_blocksize() - obtain block size for message digest + * @tfm: cipher handle + * + * The block size for the message digest cipher referenced with the cipher + * handle is returned. + * + * Return: block size of cipher + */ static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) { return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); @@ -1166,6 +2197,15 @@ static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm) return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); } +/** + * crypto_hash_digestsize() - obtain message digest size + * @tfm: cipher handle + * + * The size for the message digest created by the message digest cipher + * referenced with the cipher handle is returned. + * + * Return: message digest size + */ static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) { return crypto_hash_crt(tfm)->digestsize; @@ -1186,11 +2226,38 @@ static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags) crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); } +/** + * crypto_hash_init() - (re)initialize message digest handle + * @desc: cipher request handle that to be filled by caller -- + * desc.tfm is filled with the hash cipher handle; + * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0. + * + * The call (re-)initializes the message digest referenced by the hash cipher + * request handle. Any potentially existing state created by previous + * operations is discarded. + * + * Return: 0 if the message digest initialization was successful; < 0 if an + * error occurred + */ static inline int crypto_hash_init(struct hash_desc *desc) { return crypto_hash_crt(desc->tfm)->init(desc); } +/** + * crypto_hash_update() - add data to message digest for processing + * @desc: cipher request handle + * @sg: scatter / gather list pointing to the data to be added to the message + * digest + * @nbytes: number of bytes to be processed from @sg + * + * Updates the message digest state of the cipher handle pointed to by the + * hash cipher request handle with the input data pointed to by the + * scatter/gather list. + * + * Return: 0 if the message digest update was successful; < 0 if an error + * occurred + */ static inline int crypto_hash_update(struct hash_desc *desc, struct scatterlist *sg, unsigned int nbytes) @@ -1198,11 +2265,39 @@ static inline int crypto_hash_update(struct hash_desc *desc, return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); } +/** + * crypto_hash_final() - calculate message digest + * @desc: cipher request handle + * @out: message digest output buffer -- The caller must ensure that the out + * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize + * function). + * + * Finalize the message digest operation and create the message digest + * based on all data added to the cipher handle. The message digest is placed + * into the output buffer. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) { return crypto_hash_crt(desc->tfm)->final(desc, out); } +/** + * crypto_hash_digest() - calculate message digest for a buffer + * @desc: see crypto_hash_final() + * @sg: see crypto_hash_update() + * @nbytes: see crypto_hash_update() + * @out: see crypto_hash_final() + * + * This function is a "short-hand" for the function calls of crypto_hash_init, + * crypto_hash_update and crypto_hash_final. The parameters have the same + * meaning as discussed for those separate three functions. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ static inline int crypto_hash_digest(struct hash_desc *desc, struct scatterlist *sg, unsigned int nbytes, u8 *out) @@ -1210,6 +2305,17 @@ static inline int crypto_hash_digest(struct hash_desc *desc, return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); } +/** + * crypto_hash_setkey() - set key for message digest + * @hash: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the message digest cipher. The cipher + * handle must point to a keyed hash in order for this function to succeed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ static inline int crypto_hash_setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen) { diff --git a/include/linux/dcache.h b/include/linux/dcache.h index b2a2a08523bf..5a813988e6d4 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -124,15 +124,15 @@ struct dentry { void *d_fsdata; /* fs-specific data */ struct list_head d_lru; /* LRU list */ + struct list_head d_child; /* child of parent list */ + struct list_head d_subdirs; /* our children */ /* - * d_child and d_rcu can share memory + * d_alias and d_rcu can share memory */ union { - struct list_head d_child; /* child of parent list */ + struct hlist_node d_alias; /* inode alias list */ struct rcu_head d_rcu; } d_u; - struct list_head d_subdirs; /* our children */ - struct hlist_node d_alias; /* inode alias list */ }; /* @@ -230,7 +230,6 @@ extern seqlock_t rename_lock; */ extern void d_instantiate(struct dentry *, struct inode *); extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); -extern struct dentry * d_materialise_unique(struct dentry *, struct inode *); extern int d_instantiate_no_diralias(struct dentry *, struct inode *); extern void __d_drop(struct dentry *dentry); extern void d_drop(struct dentry *dentry); diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 4d0b4d1aa132..da4c4983adbe 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -20,6 +20,7 @@ #include <linux/types.h> +struct device; struct file_operations; struct debugfs_blob_wrapper { @@ -92,20 +93,25 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode, struct dentry *parent, struct debugfs_regset32 *regset); -int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, - int nregs, void __iomem *base, char *prefix); +void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, + int nregs, void __iomem *base, char *prefix); struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, struct dentry *parent, u32 *array, u32 elements); +struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, + struct dentry *parent, + int (*read_fn)(struct seq_file *s, + void *data)); + bool debugfs_initialized(void); #else #include <linux/err.h> -/* +/* * We do not return NULL from these functions if CONFIG_DEBUG_FS is not enabled * so users have a chance to detect if there was a real error or not. We don't * want to duplicate the design decision mistakes of procfs and devfs again. @@ -233,10 +239,9 @@ static inline struct dentry *debugfs_create_regset32(const char *name, return ERR_PTR(-ENODEV); } -static inline int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, +static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, int nregs, void __iomem *base, char *prefix) { - return 0; } static inline bool debugfs_initialized(void) @@ -251,6 +256,15 @@ static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t return ERR_PTR(-ENODEV); } +static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev, + const char *name, + struct dentry *parent, + int (*read_fn)(struct seq_file *s, + void *data)) +{ + return ERR_PTR(-ENODEV); +} + #endif #endif diff --git a/include/linux/device.h b/include/linux/device.h index ce1f21608b16..fb506738f7b7 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -911,6 +911,11 @@ static inline void device_unlock(struct device *dev) mutex_unlock(&dev->mutex); } +static inline void device_lock_assert(struct device *dev) +{ + lockdep_assert_held(&dev->mutex); +} + void driver_init(void); /* @@ -1118,6 +1123,41 @@ do { \ }) #endif +#ifdef CONFIG_PRINTK +#define dev_level_once(dev_level, dev, fmt, ...) \ +do { \ + static bool __print_once __read_mostly; \ + \ + if (!__print_once) { \ + __print_once = true; \ + dev_level(dev, fmt, ##__VA_ARGS__); \ + } \ +} while (0) +#else +#define dev_level_once(dev_level, dev, fmt, ...) \ +do { \ + if (0) \ + dev_level(dev, fmt, ##__VA_ARGS__); \ +} while (0) +#endif + +#define dev_emerg_once(dev, fmt, ...) \ + dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__) +#define dev_alert_once(dev, fmt, ...) \ + dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__) +#define dev_crit_once(dev, fmt, ...) \ + dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__) +#define dev_err_once(dev, fmt, ...) \ + dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__) +#define dev_warn_once(dev, fmt, ...) \ + dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__) +#define dev_notice_once(dev, fmt, ...) \ + dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__) +#define dev_info_once(dev, fmt, ...) \ + dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) +#define dev_dbg_once(dev, fmt, ...) \ + dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__) + #define dev_level_ratelimited(dev_level, dev, fmt, ...) \ do { \ static DEFINE_RATELIMIT_STATE(_rs, \ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index d5d388160f42..c3007cb4bfa6 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -129,11 +129,14 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) extern u64 dma_get_required_mask(struct device *dev); -#ifndef set_arch_dma_coherent_ops -static inline int set_arch_dma_coherent_ops(struct device *dev) -{ - return 0; -} +#ifndef arch_setup_dma_ops +static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, + u64 size, struct iommu_ops *iommu, + bool coherent) { } +#endif + +#ifndef arch_teardown_dma_ops +static inline void arch_teardown_dma_ops(struct device *dev) { } #endif static inline unsigned int dma_get_max_seg_size(struct device *dev) diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 653a1fd07ae8..40cd75e21ea2 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -447,7 +447,8 @@ struct dmaengine_unmap_data { * communicate status * @phys: physical address of the descriptor * @chan: target channel for this operation - * @tx_submit: set the prepared descriptor(s) to be executed by the engine + * @tx_submit: accept the descriptor, assign ordered cookie and mark the + * descriptor pending. To be pushed on .issue_pending() call * @callback: routine to call after this operation is complete * @callback_param: general parameter to pass to the callback routine * ---async_tx api specific fields--- diff --git a/include/linux/drbd.h b/include/linux/drbd.h index debb70d40547..8723f2a99e15 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -172,7 +172,7 @@ enum drbd_ret_code { ERR_RES_NOT_KNOWN = 158, ERR_RES_IN_USE = 159, ERR_MINOR_CONFIGURED = 160, - ERR_MINOR_EXISTS = 161, + ERR_MINOR_OR_VOLUME_EXISTS = 161, ERR_INVALID_REQUEST = 162, ERR_NEED_APV_100 = 163, ERR_NEED_ALLOW_TWO_PRI = 164, diff --git a/include/linux/elf.h b/include/linux/elf.h index 67a5fa7830c4..20fa8d8ae313 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -15,6 +15,11 @@ set_personality(PER_LINUX | (current->personality & (~PER_MASK))) #endif +#ifndef SET_PERSONALITY2 +#define SET_PERSONALITY2(ex, state) \ + SET_PERSONALITY(ex) +#endif + #if ELF_CLASS == ELFCLASS32 extern Elf32_Dyn _DYNAMIC []; diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 733980fce8e3..41c891d05f04 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -392,4 +392,16 @@ static inline unsigned long compare_ether_header(const void *a, const void *b) #endif } +/** + * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame + * @skb: Buffer to pad + * + * An Ethernet frame should have a minimum size of 60 bytes. This function + * takes short frames and pads them with zeros up to the 60 byte limit. + */ +static inline int eth_skb_pad(struct sk_buff *skb) +{ + return skb_put_padto(skb, ETH_ZLEN); +} + #endif /* _LINUX_ETHERDEVICE_H */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index c1a2d60dfb82..653dc9c4ebac 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -59,6 +59,26 @@ enum ethtool_phys_id_state { ETHTOOL_ID_OFF }; +enum { + ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */ + ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */ + + /* + * Add your fresh new hash function bits above and remember to update + * rss_hash_func_strings[] in ethtool.c + */ + ETH_RSS_HASH_FUNCS_COUNT +}; + +#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) +#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT) + +#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) +#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) + +#define ETH_RSS_HASH_UNKNOWN 0 +#define ETH_RSS_HASH_NO_CHANGE 0 + struct net_device; /* Some generic methods drivers may use in their ethtool_ops */ @@ -158,17 +178,14 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) * Returns zero if not supported for this specific device. * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. * Returns zero if not supported for this specific device. - * @get_rxfh: Get the contents of the RX flow hash indirection table and hash - * key. - * Will only be called if one or both of @get_rxfh_indir_size and - * @get_rxfh_key_size are implemented and return non-zero. - * Returns a negative error code or zero. - * @set_rxfh: Set the contents of the RX flow hash indirection table and/or - * hash key. In case only the indirection table or hash key is to be - * changed, the other argument will be %NULL. - * Will only be called if one or both of @get_rxfh_indir_size and - * @get_rxfh_key_size are implemented and return non-zero. + * @get_rxfh: Get the contents of the RX flow hash indirection table, hash key + * and/or hash function. * Returns a negative error code or zero. + * @set_rxfh: Set the contents of the RX flow hash indirection table, hash + * key, and/or hash function. Arguments which are set to %NULL or zero + * will remain unchanged. + * Returns a negative error code or zero. An error code must be returned + * if at least one unsupported change was requested. * @get_channels: Get number of channels. * @set_channels: Set number of channels. Returns a negative error code or * zero. @@ -241,9 +258,10 @@ struct ethtool_ops { int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); - int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key); + int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key, + u8 *hfunc); int (*set_rxfh)(struct net_device *, const u32 *indir, - const u8 *key); + const u8 *key, const u8 hfunc); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 860313a33a43..87f14e90e984 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -33,7 +33,8 @@ #define F2FS_META_INO(sbi) (sbi->meta_ino_num) /* This flag is used by node and meta inodes, and by recovery */ -#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) +#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) +#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM) /* * For further optimization on multi-head logs, on-disk layout supports maximum @@ -170,14 +171,12 @@ struct f2fs_extent { #define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */ #define F2FS_INLINE_DATA 0x02 /* file inline data flag */ +#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */ +#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ #define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \ F2FS_INLINE_XATTR_ADDRS - 1)) -#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\ - sizeof(__le32) * (DEF_ADDRS_PER_INODE + \ - DEF_NIDS_PER_INODE - 1)) - struct f2fs_inode { __le16 i_mode; /* file mode */ __u8 i_advise; /* file hints */ @@ -435,6 +434,24 @@ struct f2fs_dentry_block { __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN]; } __packed; +/* for inline dir */ +#define NR_INLINE_DENTRY (MAX_INLINE_DATA * BITS_PER_BYTE / \ + ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ + BITS_PER_BYTE + 1)) +#define INLINE_DENTRY_BITMAP_SIZE ((NR_INLINE_DENTRY + \ + BITS_PER_BYTE - 1) / BITS_PER_BYTE) +#define INLINE_RESERVED_SIZE (MAX_INLINE_DATA - \ + ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ + NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE)) + +/* inline directory entry structure */ +struct f2fs_inline_dentry { + __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE]; + __u8 reserved[INLINE_RESERVED_SIZE]; + struct f2fs_dir_entry dentry[NR_INLINE_DENTRY]; + __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN]; +} __packed; + /* file types used in inode_info->flags */ enum { F2FS_FT_UNKNOWN, diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index c6f996f2abb6..798fad9e420d 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -5,6 +5,7 @@ #include <linux/types.h> #include <linux/debugfs.h> +#include <linux/ratelimit.h> #include <linux/atomic.h> /* @@ -25,14 +26,18 @@ struct fault_attr { unsigned long reject_end; unsigned long count; + struct ratelimit_state ratelimit_state; + struct dentry *dname; }; -#define FAULT_ATTR_INITIALIZER { \ - .interval = 1, \ - .times = ATOMIC_INIT(1), \ - .require_end = ULONG_MAX, \ - .stacktrace_depth = 32, \ - .verbose = 2, \ +#define FAULT_ATTR_INITIALIZER { \ + .interval = 1, \ + .times = ATOMIC_INIT(1), \ + .require_end = ULONG_MAX, \ + .stacktrace_depth = 32, \ + .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \ + .verbose = 2, \ + .dname = NULL, \ } #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER diff --git a/include/linux/fence.h b/include/linux/fence.h index d174585b874b..39efee130d2b 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h @@ -128,8 +128,8 @@ struct fence_cb { * from irq context, so normal spinlocks can be used. * * A return value of false indicates the fence already passed, - * or some failure occured that made it impossible to enable - * signaling. True indicates succesful enabling. + * or some failure occurred that made it impossible to enable + * signaling. True indicates successful enabling. * * fence->status may be set in enable_signaling, but only when false is * returned. diff --git a/include/linux/file.h b/include/linux/file.h index 4d69123377a2..f87d30882a24 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -66,7 +66,6 @@ extern void set_close_on_exec(unsigned int fd, int flag); extern bool get_close_on_exec(unsigned int fd); extern void put_filp(struct file *); extern int get_unused_fd_flags(unsigned flags); -#define get_unused_fd() get_unused_fd_flags(0) extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); diff --git a/include/linux/filter.h b/include/linux/filter.h index ca95abd2bed1..caac2087a4d5 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -381,6 +381,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); void bpf_prog_destroy(struct bpf_prog *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); +int sk_attach_bpf(u32 ufd, struct sock *sk); int sk_detach_filter(struct sock *sk); int bpf_check_classic(const struct sock_filter *filter, unsigned int flen); diff --git a/include/linux/fs.h b/include/linux/fs.h index 9ab779e8a63c..f90c0282c114 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -18,6 +18,7 @@ #include <linux/pid.h> #include <linux/bug.h> #include <linux/mutex.h> +#include <linux/rwsem.h> #include <linux/capability.h> #include <linux/semaphore.h> #include <linux/fiemap.h> @@ -401,7 +402,7 @@ struct address_space { atomic_t i_mmap_writable;/* count VM_SHARED mappings */ struct rb_root i_mmap; /* tree of private and shared mappings */ struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ - struct mutex i_mmap_mutex; /* protect tree, count, list */ + struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ /* Protected by tree_lock together with the radix tree */ unsigned long nrpages; /* number of total pages */ unsigned long nrshadows; /* number of shadow entries */ @@ -467,6 +468,26 @@ struct block_device { int mapping_tagged(struct address_space *mapping, int tag); +static inline void i_mmap_lock_write(struct address_space *mapping) +{ + down_write(&mapping->i_mmap_rwsem); +} + +static inline void i_mmap_unlock_write(struct address_space *mapping) +{ + up_write(&mapping->i_mmap_rwsem); +} + +static inline void i_mmap_lock_read(struct address_space *mapping) +{ + down_read(&mapping->i_mmap_rwsem); +} + +static inline void i_mmap_unlock_read(struct address_space *mapping) +{ + up_read(&mapping->i_mmap_rwsem); +} + /* * Might pages of this file be mapped into userspace? */ @@ -606,9 +627,6 @@ struct inode { const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ struct file_lock *i_flock; struct address_space i_data; -#ifdef CONFIG_QUOTA - struct dquot *i_dquot[MAXQUOTAS]; -#endif struct list_head i_devices; union { struct pipe_inode_info *i_pipe; @@ -789,7 +807,6 @@ struct file { struct rcu_head fu_rcuhead; } f_u; struct path f_path; -#define f_dentry f_path.dentry struct inode *f_inode; /* cached value */ const struct file_operations *f_op; @@ -1224,6 +1241,7 @@ struct super_block { struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; + unsigned int s_quota_types; /* Bitmask of supported quota types */ struct quota_info s_dquot; /* Diskquota specific options */ struct sb_writers s_writers; @@ -1467,7 +1485,10 @@ int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); * This allows the kernel to read directories into kernel space or * to have different dirent layouts depending on the binary type. */ -typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); +struct dir_context; +typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, + unsigned); + struct dir_context { const filldir_t actor; loff_t pos; @@ -1497,6 +1518,7 @@ struct file_operations { long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); + void (*mremap)(struct file *, struct vm_area_struct *); int (*open) (struct inode *, struct file *); int (*flush) (struct file *, fl_owner_t id); int (*release) (struct inode *, struct file *); @@ -1513,7 +1535,7 @@ struct file_operations { int (*setlease)(struct file *, long, struct file_lock **, void **); long (*fallocate)(struct file *file, int mode, loff_t offset, loff_t len); - int (*show_fdinfo)(struct seq_file *m, struct file *f); + void (*show_fdinfo)(struct seq_file *m, struct file *f); }; struct inode_operations { @@ -1560,6 +1582,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, struct iovec *fast_pointer, struct iovec **ret_pointer); +extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t vfs_readv(struct file *, const struct iovec __user *, @@ -1577,7 +1600,9 @@ struct super_operations { void (*evict_inode) (struct inode *); void (*put_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); + int (*freeze_super) (struct super_block *); int (*freeze_fs) (struct super_block *); + int (*thaw_super) (struct super_block *); int (*unfreeze_fs) (struct super_block *); int (*statfs) (struct dentry *, struct kstatfs *); int (*remount_fs) (struct super_block *, int *, char *); @@ -1590,6 +1615,7 @@ struct super_operations { #ifdef CONFIG_QUOTA ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); + struct dquot **(*get_dquots)(struct inode *); #endif int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); long (*nr_cached_objects)(struct super_block *, int); @@ -2060,7 +2086,7 @@ struct filename { extern long vfs_truncate(struct path *, loff_t); extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, struct file *filp); -extern int do_fallocate(struct file *file, int mode, loff_t offset, +extern int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len); extern long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode); @@ -2072,6 +2098,7 @@ extern int vfs_open(const struct path *, struct file *, const struct cred *); extern struct file * dentry_open(const struct path *, int, const struct cred *); extern int filp_close(struct file *, fl_owner_t id); +extern struct filename *getname_flags(const char __user *, int, int *); extern struct filename *getname(const char __user *); extern struct filename *getname_kernel(const char *); @@ -2149,7 +2176,6 @@ static inline int sb_is_blkdev_sb(struct super_block *sb) extern int sync_filesystem(struct super_block *); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; -extern const struct file_operations bad_sock_fops; #ifdef CONFIG_BLOCK extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); @@ -2786,6 +2812,11 @@ static inline void inode_has_no_xattr(struct inode *inode) inode->i_flags |= S_NOSEC; } +static inline bool is_root_inode(struct inode *inode) +{ + return inode == inode->i_sb->s_root->d_inode; +} + static inline bool dir_emit(struct dir_context *ctx, const char *name, int namelen, u64 ino, unsigned type) diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h index 84d60cb841b1..bf0321eabbda 100644 --- a/include/linux/fsl_ifc.h +++ b/include/linux/fsl_ifc.h @@ -29,7 +29,16 @@ #include <linux/of_platform.h> #include <linux/interrupt.h> -#define FSL_IFC_BANK_COUNT 4 +/* + * The actual number of banks implemented depends on the IFC version + * - IFC version 1.0 implements 4 banks. + * - IFC version 1.1 onward implements 8 banks. + */ +#define FSL_IFC_BANK_COUNT 8 + +#define FSL_IFC_VERSION_MASK 0x0F0F0000 +#define FSL_IFC_VERSION_1_0_0 0x01000000 +#define FSL_IFC_VERSION_1_1_0 0x01010000 /* * CSPR - Chip Select Property Register @@ -776,23 +785,23 @@ struct fsl_ifc_regs { __be32 cspr; u32 res2; } cspr_cs[FSL_IFC_BANK_COUNT]; - u32 res3[0x19]; + u32 res3[0xd]; struct { __be32 amask; u32 res4[0x2]; } amask_cs[FSL_IFC_BANK_COUNT]; - u32 res5[0x18]; + u32 res5[0xc]; struct { __be32 csor; __be32 csor_ext; u32 res6; } csor_cs[FSL_IFC_BANK_COUNT]; - u32 res7[0x18]; + u32 res7[0xc]; struct { __be32 ftim[4]; u32 res8[0x8]; } ftim_cs[FSL_IFC_BANK_COUNT]; - u32 res9[0x60]; + u32 res9[0x30]; __be32 rb_stat; u32 res10[0x2]; __be32 ifc_gcr; @@ -827,6 +836,8 @@ struct fsl_ifc_ctrl { int nand_irq; spinlock_t lock; void *nand; + int version; + int banks; u32 nand_stat; wait_queue_head_t nand_wait; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index ca060d7c4fa6..0f313f93c586 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -197,24 +197,6 @@ struct fsnotify_group { #define FSNOTIFY_EVENT_INODE 2 /* - * Inode specific fields in an fsnotify_mark - */ -struct fsnotify_inode_mark { - struct inode *inode; /* inode this mark is associated with */ - struct hlist_node i_list; /* list of marks by inode->i_fsnotify_marks */ - struct list_head free_i_list; /* tmp list used when freeing this mark */ -}; - -/* - * Mount point specific fields in an fsnotify_mark - */ -struct fsnotify_vfsmount_mark { - struct vfsmount *mnt; /* vfsmount this mark is associated with */ - struct hlist_node m_list; /* list of marks by inode->i_fsnotify_marks */ - struct list_head free_m_list; /* tmp list used when freeing this mark */ -}; - -/* * a mark is simply an object attached to an in core inode which allows an * fsnotify listener to indicate they are either no longer interested in events * of a type matching mask or only interested in those events. @@ -230,11 +212,17 @@ struct fsnotify_mark { * in kernel that found and may be using this mark. */ atomic_t refcnt; /* active things looking at this mark */ struct fsnotify_group *group; /* group this mark is for */ - struct list_head g_list; /* list of marks by group->i_fsnotify_marks */ + struct list_head g_list; /* list of marks by group->i_fsnotify_marks + * Also reused for queueing mark into + * destroy_list when it's waiting for + * the end of SRCU period before it can + * be freed */ spinlock_t lock; /* protect group and inode */ + struct hlist_node obj_list; /* list of marks for inode / vfsmount */ + struct list_head free_list; /* tmp list used when freeing this mark */ union { - struct fsnotify_inode_mark i; - struct fsnotify_vfsmount_mark m; + struct inode *inode; /* inode this mark is associated with */ + struct vfsmount *mnt; /* vfsmount this mark is associated with */ }; __u32 ignored_mask; /* events types to ignore */ #define FSNOTIFY_MARK_FLAG_INODE 0x01 @@ -243,7 +231,6 @@ struct fsnotify_mark { #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08 #define FSNOTIFY_MARK_FLAG_ALIVE 0x10 unsigned int flags; /* vfsmount or inode mark? */ - struct list_head destroy_list; void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */ }; diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 662697babd48..1da602982cf9 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -39,6 +39,12 @@ # define FTRACE_FORCE_LIST_FUNC 0 #endif +/* Main tracing buffer and events set up */ +#ifdef CONFIG_TRACING +void trace_init(void); +#else +static inline void trace_init(void) { } +#endif struct module; struct ftrace_hash; @@ -61,6 +67,11 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); /* * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are * set in the flags member. + * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and + * IPMODIFY are a kind of attribute flags which can be set only before + * registering the ftrace_ops, and can not be modified while registered. + * Changing those attribute flags after regsitering ftrace_ops will + * cause unexpected results. * * ENABLED - set/unset when ftrace_ops is registered/unregistered * DYNAMIC - set when ftrace_ops is registered to denote dynamically @@ -94,6 +105,17 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * ADDING - The ops is in the process of being added. * REMOVING - The ops is in the process of being removed. * MODIFYING - The ops is in the process of changing its filter functions. + * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. + * The arch specific code sets this flag when it allocated a + * trampoline. This lets the arch know that it can update the + * trampoline in case the callback function changes. + * The ftrace_ops trampoline can be set by the ftrace users, and + * in such cases the arch must not modify it. Only the arch ftrace + * core code should set this flag. + * IPMODIFY - The ops can modify the IP register. This can only be set with + * SAVE_REGS. If another ops with this flag set is already registered + * for any of the functions that this ops will be registered for, then + * this ops will fail to register or set_filter_ip. */ enum { FTRACE_OPS_FL_ENABLED = 1 << 0, @@ -108,6 +130,8 @@ enum { FTRACE_OPS_FL_ADDING = 1 << 9, FTRACE_OPS_FL_REMOVING = 1 << 10, FTRACE_OPS_FL_MODIFYING = 1 << 11, + FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, + FTRACE_OPS_FL_IPMODIFY = 1 << 13, }; #ifdef CONFIG_DYNAMIC_FTRACE @@ -142,6 +166,7 @@ struct ftrace_ops { struct ftrace_ops_hash *func_hash; struct ftrace_ops_hash old_hash; unsigned long trampoline; + unsigned long trampoline_size; #endif }; @@ -255,7 +280,9 @@ struct ftrace_func_command { int ftrace_arch_code_modify_prepare(void); int ftrace_arch_code_modify_post_process(void); -void ftrace_bug(int err, unsigned long ip); +struct dyn_ftrace; + +void ftrace_bug(int err, struct dyn_ftrace *rec); struct seq_file; @@ -287,6 +314,8 @@ extern int ftrace_text_reserved(const void *start, const void *end); extern int ftrace_nr_registered_ops(void); +bool is_ftrace_trampoline(unsigned long addr); + /* * The dyn_ftrace record's flags field is split into two parts. * the first part which is '0-FTRACE_REF_MAX' is a counter of @@ -297,6 +326,7 @@ extern int ftrace_nr_registered_ops(void); * ENABLED - the function is being traced * REGS - the record wants the function to save regs * REGS_EN - the function is set up to save regs. + * IPMODIFY - the record allows for the IP address to be changed. * * When a new ftrace_ops is registered and wants a function to save * pt_regs, the rec->flag REGS is set. When the function has been @@ -310,10 +340,11 @@ enum { FTRACE_FL_REGS_EN = (1UL << 29), FTRACE_FL_TRAMP = (1UL << 28), FTRACE_FL_TRAMP_EN = (1UL << 27), + FTRACE_FL_IPMODIFY = (1UL << 26), }; -#define FTRACE_REF_MAX_SHIFT 27 -#define FTRACE_FL_BITS 5 +#define FTRACE_REF_MAX_SHIFT 26 +#define FTRACE_FL_BITS 6 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) @@ -586,6 +617,11 @@ static inline ssize_t ftrace_notrace_write(struct file *file, const char __user size_t cnt, loff_t *ppos) { return -ENODEV; } static inline int ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } + +static inline bool is_ftrace_trampoline(unsigned long addr) +{ + return false; +} #endif /* CONFIG_DYNAMIC_FTRACE */ /* totally disable ftrace - can not re-enable after this */ @@ -843,6 +879,7 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk) enum ftrace_dump_mode; extern enum ftrace_dump_mode ftrace_dump_on_oops; +extern int tracepoint_printk; extern void disable_trace_on_warning(void); extern int __disable_trace_on_warning; diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 28672e87e910..0bebb5c348b8 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -138,6 +138,17 @@ enum print_line_t { TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ }; +/* + * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq + * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function + * simplifies those functions and keeps them in sync. + */ +static inline enum print_line_t trace_handle_return(struct trace_seq *s) +{ + return trace_seq_has_overflowed(s) ? + TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; +} + void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, int pc); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 41b30fd4d041..b840e3b2770d 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -110,11 +110,8 @@ struct vm_area_struct; #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ __GFP_RECLAIMABLE) #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) -#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ - __GFP_HIGHMEM) -#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ - __GFP_HARDWALL | __GFP_HIGHMEM | \ - __GFP_MOVABLE) +#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) +#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) #define GFP_IOFS (__GFP_IO | __GFP_FS) #define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ @@ -381,8 +378,8 @@ extern void free_kmem_pages(unsigned long addr, unsigned int order); void page_alloc_init(void); void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); -void drain_all_pages(void); -void drain_local_pages(void *dummy); +void drain_all_pages(struct zone *zone); +void drain_local_pages(struct zone *zone); /* * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 85aa5d0b9357..ab81339a8590 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -216,14 +216,15 @@ static inline int gpio_to_irq(unsigned gpio) return -EINVAL; } -static inline int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset) +static inline int gpiochip_lock_as_irq(struct gpio_chip *chip, + unsigned int offset) { WARN_ON(1); return -EINVAL; } -static inline void gpio_unlock_as_irq(struct gpio_chip *chip, - unsigned int offset) +static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip, + unsigned int offset) { WARN_ON(1); } diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 12f146fa6604..fd85cb120ee0 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -66,7 +66,7 @@ __devm_gpiod_get_index_optional(struct device *dev, const char *con_id, unsigned int index, enum gpiod_flags flags); void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); -int gpiod_get_direction(const struct gpio_desc *desc); +int gpiod_get_direction(struct gpio_desc *desc); int gpiod_direction_input(struct gpio_desc *desc); int gpiod_direction_output(struct gpio_desc *desc, int value); int gpiod_direction_output_raw(struct gpio_desc *desc, int value); @@ -74,14 +74,24 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value); /* Value get/set from non-sleeping context */ int gpiod_get_value(const struct gpio_desc *desc); void gpiod_set_value(struct gpio_desc *desc, int value); +void gpiod_set_array(unsigned int array_size, + struct gpio_desc **desc_array, int *value_array); int gpiod_get_raw_value(const struct gpio_desc *desc); void gpiod_set_raw_value(struct gpio_desc *desc, int value); +void gpiod_set_raw_array(unsigned int array_size, + struct gpio_desc **desc_array, int *value_array); /* Value get/set from sleeping context */ int gpiod_get_value_cansleep(const struct gpio_desc *desc); void gpiod_set_value_cansleep(struct gpio_desc *desc, int value); +void gpiod_set_array_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array); int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc); void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value); +void gpiod_set_raw_array_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array); int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); @@ -94,6 +104,13 @@ int gpiod_to_irq(const struct gpio_desc *desc); struct gpio_desc *gpio_to_desc(unsigned gpio); int desc_to_gpio(const struct gpio_desc *desc); +/* Child properties interface */ +struct fwnode_handle; + +struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, + const char *propname); +struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, + struct fwnode_handle *child); #else /* CONFIG_GPIOLIB */ static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev, @@ -210,6 +227,13 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value) /* GPIO can never have been requested */ WARN_ON(1); } +static inline void gpiod_set_array(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(1); +} static inline int gpiod_get_raw_value(const struct gpio_desc *desc) { /* GPIO can never have been requested */ @@ -221,6 +245,13 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) /* GPIO can never have been requested */ WARN_ON(1); } +static inline void gpiod_set_raw_array(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(1); +} static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) { @@ -233,6 +264,13 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) /* GPIO can never have been requested */ WARN_ON(1); } +static inline void gpiod_set_array_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(1); +} static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) { /* GPIO can never have been requested */ @@ -245,6 +283,13 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, /* GPIO can never have been requested */ WARN_ON(1); } +static inline void gpiod_set_raw_array_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(1); +} static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) { diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 249db3057e4d..c497c62889d1 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -32,6 +32,7 @@ struct seq_file; * @get: returns value for signal "offset"; for output signals this * returns either the value actually sensed, or zero * @set: assigns output value for signal "offset" + * @set_multiple: assigns output values for multiple signals defined by "mask" * @set_debounce: optional hook for setting debounce time for specified gpio in * interrupt triggered gpio chips * @to_irq: optional hook supporting non-static gpio_to_irq() mappings; @@ -89,6 +90,9 @@ struct gpio_chip { unsigned offset); void (*set)(struct gpio_chip *chip, unsigned offset, int value); + void (*set_multiple)(struct gpio_chip *chip, + unsigned long *mask, + unsigned long *bits); int (*set_debounce)(struct gpio_chip *chip, unsigned offset, unsigned debounce); @@ -149,8 +153,8 @@ extern struct gpio_chip *gpiochip_find(void *data, int (*match)(struct gpio_chip *chip, void *data)); /* lock/unlock as IRQ */ -int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset); -void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); +int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); +void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index 8b622468952c..ee2d8c6f9130 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h @@ -2,6 +2,7 @@ #define _GPIO_KEYS_H struct device; +struct gpio_desc; /** * struct gpio_keys_button - configuration parameters @@ -17,6 +18,7 @@ struct device; * disable button via sysfs * @value: axis value for %EV_ABS * @irq: Irq number in case of interrupt keys + * @gpiod: GPIO descriptor */ struct gpio_keys_button { unsigned int code; @@ -29,6 +31,7 @@ struct gpio_keys_button { bool can_disable; int value; unsigned int irq; + struct gpio_desc *gpiod; }; /** diff --git a/include/linux/hash.h b/include/linux/hash.h index d0494c399392..1afde47e1528 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -15,7 +15,6 @@ */ #include <asm/types.h> -#include <asm/hash.h> #include <linux/compiler.h> /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ @@ -84,38 +83,4 @@ static inline u32 hash32_ptr(const void *ptr) return (u32)val; } -struct fast_hash_ops { - u32 (*hash)(const void *data, u32 len, u32 seed); - u32 (*hash2)(const u32 *data, u32 len, u32 seed); -}; - -/** - * arch_fast_hash - Caclulates a hash over a given buffer that can have - * arbitrary size. This function will eventually use an - * architecture-optimized hashing implementation if - * available, and trades off distribution for speed. - * - * @data: buffer to hash - * @len: length of buffer in bytes - * @seed: start seed - * - * Returns 32bit hash. - */ -extern u32 arch_fast_hash(const void *data, u32 len, u32 seed); - -/** - * arch_fast_hash2 - Caclulates a hash over a given buffer that has a - * size that is of a multiple of 32bit words. This - * function will eventually use an architecture- - * optimized hashing implementation if available, - * and trades off distribution for speed. - * - * @data: buffer to hash (must be 32bit padded) - * @len: number of 32bit words - * @seed: start seed - * - * Returns 32bit hash. - */ -extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed); - #endif /* _LINUX_HASH_H */ diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 11c0182a153b..cbb5790a35cd 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -1,9 +1,24 @@ /* * Copyright (C) 2012 Avionic Design GmbH * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. */ #ifndef __LINUX_HDMI_H_ diff --git a/include/linux/hid.h b/include/linux/hid.h index 78ea9bf941cd..06c4607744f6 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -234,6 +234,33 @@ struct hid_item { #define HID_DG_BARRELSWITCH 0x000d0044 #define HID_DG_ERASER 0x000d0045 #define HID_DG_TABLETPICK 0x000d0046 + +#define HID_CP_CONSUMERCONTROL 0x000c0001 +#define HID_CP_NUMERICKEYPAD 0x000c0002 +#define HID_CP_PROGRAMMABLEBUTTONS 0x000c0003 +#define HID_CP_MICROPHONE 0x000c0004 +#define HID_CP_HEADPHONE 0x000c0005 +#define HID_CP_GRAPHICEQUALIZER 0x000c0006 +#define HID_CP_FUNCTIONBUTTONS 0x000c0036 +#define HID_CP_SELECTION 0x000c0080 +#define HID_CP_MEDIASELECTION 0x000c0087 +#define HID_CP_SELECTDISC 0x000c00ba +#define HID_CP_PLAYBACKSPEED 0x000c00f1 +#define HID_CP_PROXIMITY 0x000c0109 +#define HID_CP_SPEAKERSYSTEM 0x000c0160 +#define HID_CP_CHANNELLEFT 0x000c0161 +#define HID_CP_CHANNELRIGHT 0x000c0162 +#define HID_CP_CHANNELCENTER 0x000c0163 +#define HID_CP_CHANNELFRONT 0x000c0164 +#define HID_CP_CHANNELCENTERFRONT 0x000c0165 +#define HID_CP_CHANNELSIDE 0x000c0166 +#define HID_CP_CHANNELSURROUND 0x000c0167 +#define HID_CP_CHANNELLOWFREQUENCYENHANCEMENT 0x000c0168 +#define HID_CP_CHANNELTOP 0x000c0169 +#define HID_CP_CHANNELUNKNOWN 0x000c016a +#define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180 +#define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200 + #define HID_DG_CONFIDENCE 0x000d0047 #define HID_DG_WIDTH 0x000d0048 #define HID_DG_HEIGHT 0x000d0049 @@ -312,11 +339,8 @@ struct hid_item { * Vendor specific HID device groups */ #define HID_GROUP_RMI 0x0100 - -/* - * Vendor specific HID device groups - */ #define HID_GROUP_WACOM 0x0101 +#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102 /* * This is the global environment of the parser. This information is @@ -1063,6 +1087,17 @@ static inline void hid_hw_wait(struct hid_device *hdev) hdev->ll_driver->wait(hdev); } +/** + * hid_report_len - calculate the report length + * + * @report: the report we want to know the length + */ +static inline int hid_report_len(struct hid_report *report) +{ + /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ + return ((report->size - 1) >> 3) + 1 + (report->id > 0); +} + int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, int interrupt); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 6e6d338641fe..431b7fc605c9 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -175,6 +175,52 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb, } #endif /* !CONFIG_HUGETLB_PAGE */ +/* + * hugepages at page global directory. If arch support + * hugepages at pgd level, they need to define this. + */ +#ifndef pgd_huge +#define pgd_huge(x) 0 +#endif + +#ifndef pgd_write +static inline int pgd_write(pgd_t pgd) +{ + BUG(); + return 0; +} +#endif + +#ifndef pud_write +static inline int pud_write(pud_t pud) +{ + BUG(); + return 0; +} +#endif + +#ifndef is_hugepd +/* + * Some architectures requires a hugepage directory format that is + * required to support multiple hugepage sizes. For example + * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables" + * introduced the same on powerpc. This allows for a more flexible hugepage + * pagetable layout. + */ +typedef struct { unsigned long pd; } hugepd_t; +#define is_hugepd(hugepd) (0) +#define __hugepd(x) ((hugepd_t) { (x) }) +static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, + unsigned pdshift, unsigned long end, + int write, struct page **pages, int *nr) +{ + return 0; +} +#else +extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr, + unsigned pdshift, unsigned long end, + int write, struct page **pages, int *nr); +#endif #define HUGETLB_ANON_FILE "anon_hugepage" @@ -311,7 +357,8 @@ static inline struct hstate *hstate_sizelog(int page_size_log) { if (!page_size_log) return &default_hstate; - return size_to_hstate(1 << page_size_log); + + return size_to_hstate(1UL << page_size_log); } static inline struct hstate *hstate_vma(struct vm_area_struct *vma) diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 0129f89cf98d..bcc853eccc85 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -16,7 +16,6 @@ #define _LINUX_HUGETLB_CGROUP_H #include <linux/mmdebug.h> -#include <linux/res_counter.h> struct hugetlb_cgroup; /* diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 08cfaff8a072..476c685ca6f9 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -650,6 +650,8 @@ struct vmbus_channel { u8 monitor_grp; u8 monitor_bit; + bool rescind; /* got rescind msg */ + u32 ringbuffer_gpadlhandle; /* Allocated memory for ring buffer */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index b556e0ab946f..e3a1721c8354 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -46,6 +46,8 @@ struct i2c_client; struct i2c_driver; union i2c_smbus_data; struct i2c_board_info; +enum i2c_slave_event; +typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *); struct module; @@ -209,6 +211,8 @@ struct i2c_driver { * @irq: indicates the IRQ generated by this device (if any) * @detected: member of an i2c_driver.clients list or i2c-core's * userspace_devices list + * @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter + * calls it to pass on slave events to the slave driver. * * An i2c_client identifies a single device (i.e. chip) connected to an * i2c bus. The behaviour exposed to Linux is defined by the driver @@ -224,6 +228,7 @@ struct i2c_client { struct device dev; /* the device structure */ int irq; /* irq issued by device */ struct list_head detected; + i2c_slave_cb_t slave_cb; /* callback for slave mode */ }; #define to_i2c_client(d) container_of(d, struct i2c_client, dev) @@ -246,6 +251,25 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) dev_set_drvdata(&dev->dev, data); } +/* I2C slave support */ + +enum i2c_slave_event { + I2C_SLAVE_REQ_READ_START, + I2C_SLAVE_REQ_READ_END, + I2C_SLAVE_REQ_WRITE_START, + I2C_SLAVE_REQ_WRITE_END, + I2C_SLAVE_STOP, +}; + +extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb); +extern int i2c_slave_unregister(struct i2c_client *client); + +static inline int i2c_slave_event(struct i2c_client *client, + enum i2c_slave_event event, u8 *val) +{ + return client->slave_cb(client, event, val); +} + /** * struct i2c_board_info - template for device creation * @type: chip type, to initialize i2c_client.name @@ -352,6 +376,8 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, * into I2C transfers instead. * @functionality: Return the flags that this algorithm/adapter pair supports * from the I2C_FUNC_* flags. + * @reg_slave: Register given client to I2C slave mode of this adapter + * @unreg_slave: Unregister given client from I2C slave mode of this adapter * * The following structs are for those who like to implement new bus drivers: * i2c_algorithm is the interface to a class of hardware solutions which can @@ -359,7 +385,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, * to name two of the most common. * * The return codes from the @master_xfer field should indicate the type of - * error code that occured during the transfer, as documented in the kernel + * error code that occurred during the transfer, as documented in the kernel * Documentation file Documentation/i2c/fault-codes. */ struct i2c_algorithm { @@ -377,6 +403,9 @@ struct i2c_algorithm { /* To determine what the adapter supports */ u32 (*functionality) (struct i2c_adapter *); + + int (*reg_slave)(struct i2c_client *client); + int (*unreg_slave)(struct i2c_client *client); }; /** diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h index 8cfb50f38529..0bc03f100d04 100644 --- a/include/linux/i2c/twl.h +++ b/include/linux/i2c/twl.h @@ -26,7 +26,6 @@ #define __TWL_H_ #include <linux/types.h> -#include <linux/phy/phy.h> #include <linux/input/matrix_keypad.h> /* @@ -634,7 +633,6 @@ enum twl4030_usb_mode { struct twl4030_usb_data { enum twl4030_usb_mode usb_mode; unsigned long features; - struct phy_init_data *init_data; int (*phy_init)(struct device *dev); int (*phy_exit)(struct device *dev); diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index b1be39c76931..4f4eea8a6288 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -19,6 +19,7 @@ #include <linux/types.h> #include <linux/if_ether.h> #include <asm/byteorder.h> +#include <asm/unaligned.h> /* * DS bit usage @@ -1066,6 +1067,12 @@ struct ieee80211_pspoll { /* TDLS */ +/* Channel switch timing */ +struct ieee80211_ch_switch_timing { + __le16 switch_time; + __le16 switch_timeout; +} __packed; + /* Link-id information element */ struct ieee80211_tdls_lnkie { u8 ie_type; /* Link Identifier IE */ @@ -1107,6 +1114,15 @@ struct ieee80211_tdls_data { u8 dialog_token; u8 variable[0]; } __packed discover_req; + struct { + u8 target_channel; + u8 oper_class; + u8 variable[0]; + } __packed chan_switch_req; + struct { + __le16 status_code; + u8 variable[0]; + } __packed chan_switch_resp; } u; } __packed; @@ -1274,7 +1290,7 @@ struct ieee80211_ht_cap { #define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2 /* - * Maximum length of AMPDU that the STA can receive. + * Maximum length of AMPDU that the STA can receive in high-throughput (HT). * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) */ enum ieee80211_max_ampdu_length_exp { @@ -1284,6 +1300,21 @@ enum ieee80211_max_ampdu_length_exp { IEEE80211_HT_MAX_AMPDU_64K = 3 }; +/* + * Maximum length of AMPDU that the STA can receive in VHT. + * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) + */ +enum ieee80211_vht_max_ampdu_length_exp { + IEEE80211_VHT_MAX_AMPDU_8K = 0, + IEEE80211_VHT_MAX_AMPDU_16K = 1, + IEEE80211_VHT_MAX_AMPDU_32K = 2, + IEEE80211_VHT_MAX_AMPDU_64K = 3, + IEEE80211_VHT_MAX_AMPDU_128K = 4, + IEEE80211_VHT_MAX_AMPDU_256K = 5, + IEEE80211_VHT_MAX_AMPDU_512K = 6, + IEEE80211_VHT_MAX_AMPDU_1024K = 7 +}; + #define IEEE80211_HT_MAX_AMPDU_FACTOR 13 /* Minimum MPDU start spacing */ @@ -1998,6 +2029,16 @@ enum ieee80211_tdls_actioncode { WLAN_TDLS_DISCOVERY_REQUEST = 10, }; +/* Extended Channel Switching capability to be set in the 1st byte of + * the @WLAN_EID_EXT_CAPABILITY information element + */ +#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2) + +/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */ +#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) +#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) +#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6) + /* Interworking capabilities are set in 7th bit of 4th byte of the * @WLAN_EID_EXT_CAPABILITY information element */ @@ -2009,6 +2050,7 @@ enum ieee80211_tdls_actioncode { */ #define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5) #define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) +#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7) #define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) #define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7) @@ -2016,6 +2058,9 @@ enum ieee80211_tdls_actioncode { /* TDLS specific payload type in the LLC/SNAP header */ #define WLAN_TDLS_SNAP_RFTYPE 0x2 +/* BSS Coex IE information field bits */ +#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0) + /** * enum - mesh synchronization method identifier * @@ -2398,6 +2443,30 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, return !!(tim->virtual_map[index] & mask); } +/** + * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet) + * @skb: the skb containing the frame, length will not be checked + * @hdr_size: the size of the ieee80211_hdr that starts at skb->data + * + * This function assumes the frame is a data frame, and that the network header + * is in the correct place. + */ +static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size) +{ + if (!skb_is_nonlinear(skb) && + skb->len > (skb_network_offset(skb) + 2)) { + /* Point to where the indication of TDLS should start */ + const u8 *tdls_data = skb_network_header(skb) - 2; + + if (get_unaligned_be16(tdls_data) == ETH_P_TDLS && + tdls_data[2] == WLAN_TDLS_SNAP_RFTYPE && + tdls_data[3] == WLAN_CATEGORY_TDLS) + return tdls_data[4]; + } + + return -1; +} + /* convert time units */ #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) diff --git a/include/net/ieee802154.h b/include/linux/ieee802154.h index 0aa7122e8f15..6e82d888287c 100644 --- a/include/net/ieee802154.h +++ b/include/linux/ieee802154.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Pavel Smolenskiy <pavel.smolenskiy@gmail.com> * Maxim Gorbachyov <maxim.gorbachev@siemens.com> @@ -24,10 +20,27 @@ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ -#ifndef NET_IEEE802154_H -#define NET_IEEE802154_H +#ifndef LINUX_IEEE802154_H +#define LINUX_IEEE802154_H + +#include <linux/types.h> +#include <linux/random.h> +#include <asm/byteorder.h> #define IEEE802154_MTU 127 +#define IEEE802154_MIN_PSDU_LEN 5 + +#define IEEE802154_PAN_ID_BROADCAST 0xffff +#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff +#define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe + +#define IEEE802154_EXTENDED_ADDR_LEN 8 + +#define IEEE802154_LIFS_PERIOD 40 +#define IEEE802154_SIFS_PERIOD 12 + +#define IEEE802154_MAX_CHANNEL 26 +#define IEEE802154_MAX_PAGE 31 #define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */ #define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */ @@ -189,7 +202,41 @@ enum { IEEE802154_SCAN_IN_PROGRESS = 0xfc, }; +/** + * ieee802154_is_valid_psdu_len - check if psdu len is valid + * @len: psdu len with (MHR + payload + MFR) + */ +static inline bool ieee802154_is_valid_psdu_len(const u8 len) +{ + return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU); +} + +/** + * ieee802154_is_valid_psdu_len - check if extended addr is valid + * @addr: extended addr to check + */ +static inline bool ieee802154_is_valid_extended_addr(const __le64 addr) +{ + /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff + * is used internally as extended to short address broadcast mapping. + * This is currently a workaround because neighbor discovery can't + * deal with short addresses types right now. + */ + return ((addr != cpu_to_le64(0x0000000000000000ULL)) && + (addr != cpu_to_le64(0xffffffffffffffffULL))); +} -#endif +/** + * ieee802154_random_extended_addr - generates a random extended address + * @addr: extended addr pointer to place the random address + */ +static inline void ieee802154_random_extended_addr(__le64 *addr) +{ + get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN); + /* toggle some bit if we hit an invalid extended addr */ + if (!ieee802154_is_valid_extended_addr(*addr)) + ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01; +} +#endif /* LINUX_IEEE802154_H */ diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 808dcb8cc04f..0a8ce762a47f 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -15,6 +15,7 @@ #include <linux/netdevice.h> #include <uapi/linux/if_bridge.h> +#include <linux/bitops.h> struct br_ip { union { @@ -32,11 +33,41 @@ struct br_ip_list { struct br_ip addr; }; +#define BR_HAIRPIN_MODE BIT(0) +#define BR_BPDU_GUARD BIT(1) +#define BR_ROOT_BLOCK BIT(2) +#define BR_MULTICAST_FAST_LEAVE BIT(3) +#define BR_ADMIN_COST BIT(4) +#define BR_LEARNING BIT(5) +#define BR_FLOOD BIT(6) +#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING) +#define BR_PROMISC BIT(7) +#define BR_PROXYARP BIT(8) +#define BR_LEARNING_SYNC BIT(9) + extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); typedef int br_should_route_hook_t(struct sk_buff *skb); extern br_should_route_hook_t __rcu *br_should_route_hook; +#if IS_ENABLED(CONFIG_BRIDGE) +int br_fdb_external_learn_add(struct net_device *dev, + const unsigned char *addr, u16 vid); +int br_fdb_external_learn_del(struct net_device *dev, + const unsigned char *addr, u16 vid); +#else +static inline int br_fdb_external_learn_add(struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + return 0; +} +static inline int br_fdb_external_learn_del(struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + return 0; +} +#endif + #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index d69f0577a319..515a35e2a48a 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -282,28 +282,24 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features, } /** - * vlan_insert_tag - regular VLAN tag inserting + * __vlan_insert_tag - regular VLAN tag inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload - * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. - * - * Following the skb_unshare() example, in case of error, the calling function - * doesn't have to worry about freeing the original skb. + * Returns error if skb_cow_head failes. * * Does not change skb->protocol so this function can be used during receive. */ -static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline int __vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) { struct vlan_ethhdr *veth; - if (skb_cow_head(skb, VLAN_HLEN) < 0) { - dev_kfree_skb_any(skb); - return NULL; - } + if (skb_cow_head(skb, VLAN_HLEN) < 0) + return -ENOMEM; + veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); /* Move the mac addresses to the beginning of the new header. */ @@ -316,12 +312,40 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, /* now, the TCI */ veth->h_vlan_TCI = htons(vlan_tci); + return 0; +} + +/** + * vlan_insert_tag - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + int err; + + err = __vlan_insert_tag(skb, vlan_proto, vlan_tci); + if (err) { + dev_kfree_skb_any(skb); + return NULL; + } return skb; } /** - * __vlan_put_tag - regular VLAN tag inserting + * vlan_insert_tag_set_proto - regular VLAN tag inserting * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload @@ -330,8 +354,9 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, * Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. */ -static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, + __be16 vlan_proto, + u16 vlan_tci) { skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); if (skb) @@ -339,39 +364,53 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, return skb; } -/** - * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting +/* + * __vlan_hwaccel_push_inside - pushes vlan tag to the payload * @skb: skbuff to tag - * @vlan_proto: VLAN encapsulation protocol - * @vlan_tci: VLAN TCI to insert * - * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest + * Pushes the VLAN tag from @skb->vlan_tci inside to the payload. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. */ -static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, - __be16 vlan_proto, - u16 vlan_tci) +static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) { - skb->vlan_proto = vlan_proto; - skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, + vlan_tx_tag_get(skb)); + if (likely(skb)) + skb->vlan_tci = 0; + return skb; +} +/* + * vlan_hwaccel_push_inside - pushes vlan tag to the payload + * @skb: skbuff to tag + * + * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the + * VLAN tag from @skb->vlan_tci inside to the payload. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + */ +static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb)) + skb = __vlan_hwaccel_push_inside(skb); return skb; } /** - * vlan_put_tag - inserts VLAN tag according to device features + * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * - * Assumes skb->dev is the target that will xmit this frame. - * Returns a VLAN tagged skb. + * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest */ -static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) { - if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) { - return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); - } else { - return __vlan_put_tag(skb, vlan_proto, vlan_tci); - } + skb->vlan_proto = vlan_proto; + skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; } /** diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index d8257ab60bac..2c476acb87d9 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -164,7 +164,7 @@ struct st_sensor_transfer_function { }; /** - * struct st_sensors - ST sensors list + * struct st_sensor_settings - ST specific sensor settings * @wai: Contents of WhoAmI register. * @sensors_supported: List of supported sensors by struct itself. * @ch: IIO channels for the sensor. @@ -177,7 +177,7 @@ struct st_sensor_transfer_function { * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. * @bootime: samples to discard when sensor passing from power-down to power-up. */ -struct st_sensors { +struct st_sensor_settings { u8 wai; char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME]; struct iio_chan_spec *ch; @@ -196,7 +196,7 @@ struct st_sensors { * struct st_sensor_data - ST sensor device status * @dev: Pointer to instance of struct device (I2C or SPI). * @trig: The trigger in use by the core driver. - * @sensor: Pointer to the current sensor struct in use. + * @sensor_settings: Pointer to the specific sensor settings in use. * @current_fullscale: Maximum range of measure by the sensor. * @vdd: Pointer to sensor's Vdd power supply * @vdd_io: Pointer to sensor's Vdd-IO power supply @@ -213,7 +213,7 @@ struct st_sensors { struct st_sensor_data { struct device *dev; struct iio_trigger *trig; - struct st_sensors *sensor; + struct st_sensor_settings *sensor_settings; struct st_sensor_fullscale_avl *current_fullscale; struct regulator *vdd; struct regulator *vdd_io; @@ -279,7 +279,7 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *ch, int *val); int st_sensors_check_device_support(struct iio_dev *indio_dev, - int num_sensors_list, const struct st_sensors *sensors); + int num_sensors_list, const struct st_sensor_settings *sensor_settings); ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, struct device_attribute *attr, char *buf); diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 15dc6bc2bdd2..3642ce7ef512 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -13,6 +13,7 @@ #include <linux/device.h> #include <linux/cdev.h> #include <linux/iio/types.h> +#include <linux/of.h> /* IIO TODO LIST */ /* * Provide means of adjusting timer accuracy. @@ -326,6 +327,11 @@ struct iio_dev; * @update_scan_mode: function to configure device and scan buffer when * channels have changed * @debugfs_reg_access: function to read or write register value of device + * @of_xlate: function pointer to obtain channel specifier index. + * When #iio-cells is greater than '0', the driver could + * provide a custom of_xlate function that reads the + * *args* and returns the appropriate index in registered + * IIO channels array. **/ struct iio_info { struct module *driver_module; @@ -385,6 +391,8 @@ struct iio_info { int (*debugfs_reg_access)(struct iio_dev *indio_dev, unsigned reg, unsigned writeval, unsigned *readval); + int (*of_xlate)(struct iio_dev *indio_dev, + const struct of_phandle_args *iiospec); }; /** diff --git a/include/linux/integrity.h b/include/linux/integrity.h index 83222cebd47b..c2d6082a1a4c 100644 --- a/include/linux/integrity.h +++ b/include/linux/integrity.h @@ -24,6 +24,7 @@ enum integrity_status { #ifdef CONFIG_INTEGRITY extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode); extern void integrity_inode_free(struct inode *inode); +extern void __init integrity_load_keys(void); #else static inline struct integrity_iint_cache * @@ -36,5 +37,10 @@ static inline void integrity_inode_free(struct inode *inode) { return; } + +static inline void integrity_load_keys(void) +{ +} #endif /* CONFIG_INTEGRITY */ + #endif /* _LINUX_INTEGRITY_H */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 69517a24bc50..d9b05b5bf8c7 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -556,12 +556,6 @@ static inline void tasklet_enable(struct tasklet_struct *t) atomic_dec(&t->count); } -static inline void tasklet_hi_enable(struct tasklet_struct *t) -{ - smp_mb__before_atomic(); - atomic_dec(&t->count); -} - extern void tasklet_kill(struct tasklet_struct *t); extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, diff --git a/include/linux/io.h b/include/linux/io.h index d5fc9b8d8b03..fa02e55e5a2e 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -61,9 +61,9 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr) #define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, - unsigned long size); + resource_size_t size); void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, - unsigned long size); + resource_size_t size); void devm_iounmap(struct device *dev, void __iomem *addr); int check_signature(const volatile void __iomem *io_addr, const unsigned char *signature, int length); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index b29a5982e1c3..38daa453f2e5 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -21,6 +21,7 @@ #include <linux/errno.h> #include <linux/err.h> +#include <linux/of.h> #include <linux/types.h> #include <linux/scatterlist.h> #include <trace/events/iommu.h> @@ -28,7 +29,7 @@ #define IOMMU_READ (1 << 0) #define IOMMU_WRITE (1 << 1) #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ -#define IOMMU_EXEC (1 << 3) +#define IOMMU_NOEXEC (1 << 3) struct iommu_ops; struct iommu_group; @@ -62,6 +63,7 @@ enum iommu_cap { IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA transactions */ IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ + IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ }; /* @@ -105,7 +107,9 @@ enum iommu_attr { * @remove_device: remove device from iommu grouping * @domain_get_attr: Query domain attributes * @domain_set_attr: Change domain attributes + * @of_xlate: add OF master IDs to iommu grouping * @pgsize_bitmap: bitmap of supported page sizes + * @priv: per-instance data private to the iommu driver */ struct iommu_ops { bool (*capable)(enum iommu_cap); @@ -137,7 +141,12 @@ struct iommu_ops { /* Get the numer of window per domain */ u32 (*domain_get_windows)(struct iommu_domain *domain); +#ifdef CONFIG_OF_IOMMU + int (*of_xlate)(struct device *dev, struct of_phandle_args *args); +#endif + unsigned long pgsize_bitmap; + void *priv; }; #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index 35e7eca4e33b..1eee6bcfcf76 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -6,15 +6,7 @@ #include <linux/rwsem.h> #include <linux/notifier.h> #include <linux/nsproxy.h> - -/* - * ipc namespace events - */ -#define IPCNS_MEMCHANGED 0x00000001 /* Notify lowmem size changed */ -#define IPCNS_CREATED 0x00000002 /* Notify new ipc namespace created */ -#define IPCNS_REMOVED 0x00000003 /* Notify ipc namespace removed */ - -#define IPCNS_CALLBACK_PRI 0 +#include <linux/ns_common.h> struct user_namespace; @@ -38,7 +30,6 @@ struct ipc_namespace { unsigned int msg_ctlmni; atomic_t msg_bytes; atomic_t msg_hdrs; - int auto_msgmni; size_t shm_ctlmax; size_t shm_ctlall; @@ -68,7 +59,7 @@ struct ipc_namespace { /* user_ns which owns the ipc ns */ struct user_namespace *user_ns; - unsigned int proc_inum; + struct ns_common ns; }; extern struct ipc_namespace init_ipc_ns; @@ -77,18 +68,8 @@ extern atomic_t nr_ipc_ns; extern spinlock_t mq_lock; #ifdef CONFIG_SYSVIPC -extern int register_ipcns_notifier(struct ipc_namespace *); -extern int cond_register_ipcns_notifier(struct ipc_namespace *); -extern void unregister_ipcns_notifier(struct ipc_namespace *); -extern int ipcns_notify(unsigned long); extern void shm_destroy_orphaned(struct ipc_namespace *ns); #else /* CONFIG_SYSVIPC */ -static inline int register_ipcns_notifier(struct ipc_namespace *ns) -{ return 0; } -static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns) -{ return 0; } -static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { } -static inline int ipcns_notify(unsigned long l) { return 0; } static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {} #endif /* CONFIG_SYSVIPC */ diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 76d2acbfa7c6..838dbfa3c331 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -37,6 +37,7 @@ #include <linux/list.h> #include <linux/proc_fs.h> +#include <linux/acpi.h> /* For acpi_handle */ struct module; struct device; @@ -278,15 +279,18 @@ enum ipmi_addr_src { SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, SI_PCI, SI_DEVICETREE, SI_DEFAULT }; +const char *ipmi_addr_src_to_str(enum ipmi_addr_src src); union ipmi_smi_info_union { +#ifdef CONFIG_ACPI /* * the acpi_info element is defined for the SI_ACPI * address type */ struct { - void *acpi_handle; + acpi_handle acpi_handle; } acpi_info; +#endif }; struct ipmi_smi_info { diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index bd349240d50e..0b1e569f5ff5 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h @@ -98,12 +98,11 @@ struct ipmi_smi_handlers { operation is not allowed to fail. If an error occurs, it should report back the error in a received message. It may do this in the current call context, since no write locks - are held when this is run. If the priority is > 0, the - message will go into a high-priority queue and be sent - first. Otherwise, it goes into a normal-priority queue. */ + are held when this is run. Message are delivered one at + a time by the message handler, a new message will not be + delivered until the previous message is returned. */ void (*sender)(void *send_info, - struct ipmi_smi_msg *msg, - int priority); + struct ipmi_smi_msg *msg); /* Called by the upper layer to request that we try to get events from the BMC we are attached to. */ @@ -212,7 +211,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, void *send_info, struct ipmi_device_id *device_id, struct device *dev, - const char *sysfs_name, unsigned char slave_addr); /* diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ff560537dd61..c694e7baa621 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -42,6 +42,7 @@ struct ipv6_devconf { __s32 accept_ra_from_local; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD __s32 optimistic_dad; + __s32 use_optimistic; #endif #ifdef CONFIG_IPV6_MROUTE __s32 mc_forwarding; @@ -316,14 +317,4 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) #define tcp_twsk_ipv6only(__sk) 0 #define inet_v6_ipv6only(__sk) 0 #endif /* IS_ENABLED(CONFIG_IPV6) */ - -#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ - (((__sk)->sk_portpair == (__ports)) && \ - ((__sk)->sk_family == AF_INET6) && \ - ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \ - ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \ - (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif))) && \ - net_eq(sock_net(__sk), (__net))) - #endif /* _IPV6_H */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 03a4ea37ba86..1e8b0cf30792 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -49,6 +49,10 @@ #define GICD_CTLR_ENABLE_G1A (1U << 1) #define GICD_CTLR_ENABLE_G1 (1U << 0) +#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) +#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) +#define GICD_TYPER_LPIS (1U << 17) + #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) #define GICD_IROUTER_SPI_MODE_ANY (1U << 31) @@ -76,9 +80,27 @@ #define GICR_MOVALLR 0x0110 #define GICR_PIDR2 GICD_PIDR2 +#define GICR_CTLR_ENABLE_LPIS (1UL << 0) + +#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) + #define GICR_WAKER_ProcessorSleep (1U << 1) #define GICR_WAKER_ChildrenAsleep (1U << 2) +#define GICR_PROPBASER_NonShareable (0U << 10) +#define GICR_PROPBASER_InnerShareable (1U << 10) +#define GICR_PROPBASER_OuterShareable (2U << 10) +#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10) +#define GICR_PROPBASER_nCnB (0U << 7) +#define GICR_PROPBASER_nC (1U << 7) +#define GICR_PROPBASER_RaWt (2U << 7) +#define GICR_PROPBASER_RaWb (3U << 7) +#define GICR_PROPBASER_WaWt (4U << 7) +#define GICR_PROPBASER_WaWb (5U << 7) +#define GICR_PROPBASER_RaWaWt (6U << 7) +#define GICR_PROPBASER_RaWaWb (7U << 7) +#define GICR_PROPBASER_IDBITS_MASK (0x1f) + /* * Re-Distributor registers, offsets from SGI_base */ @@ -91,9 +113,93 @@ #define GICR_IPRIORITYR0 GICD_IPRIORITYR #define GICR_ICFGR0 GICD_ICFGR +#define GICR_TYPER_PLPIS (1U << 0) #define GICR_TYPER_VLPIS (1U << 1) #define GICR_TYPER_LAST (1U << 4) +#define LPI_PROP_GROUP1 (1 << 1) +#define LPI_PROP_ENABLED (1 << 0) + +/* + * ITS registers, offsets from ITS_base + */ +#define GITS_CTLR 0x0000 +#define GITS_IIDR 0x0004 +#define GITS_TYPER 0x0008 +#define GITS_CBASER 0x0080 +#define GITS_CWRITER 0x0088 +#define GITS_CREADR 0x0090 +#define GITS_BASER 0x0100 +#define GITS_PIDR2 GICR_PIDR2 + +#define GITS_TRANSLATER 0x10040 + +#define GITS_TYPER_PTA (1UL << 19) + +#define GITS_CBASER_VALID (1UL << 63) +#define GITS_CBASER_nCnB (0UL << 59) +#define GITS_CBASER_nC (1UL << 59) +#define GITS_CBASER_RaWt (2UL << 59) +#define GITS_CBASER_RaWb (3UL << 59) +#define GITS_CBASER_WaWt (4UL << 59) +#define GITS_CBASER_WaWb (5UL << 59) +#define GITS_CBASER_RaWaWt (6UL << 59) +#define GITS_CBASER_RaWaWb (7UL << 59) +#define GITS_CBASER_NonShareable (0UL << 10) +#define GITS_CBASER_InnerShareable (1UL << 10) +#define GITS_CBASER_OuterShareable (2UL << 10) +#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10) + +#define GITS_BASER_NR_REGS 8 + +#define GITS_BASER_VALID (1UL << 63) +#define GITS_BASER_nCnB (0UL << 59) +#define GITS_BASER_nC (1UL << 59) +#define GITS_BASER_RaWt (2UL << 59) +#define GITS_BASER_RaWb (3UL << 59) +#define GITS_BASER_WaWt (4UL << 59) +#define GITS_BASER_WaWb (5UL << 59) +#define GITS_BASER_RaWaWt (6UL << 59) +#define GITS_BASER_RaWaWb (7UL << 59) +#define GITS_BASER_TYPE_SHIFT (56) +#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) +#define GITS_BASER_ENTRY_SIZE_SHIFT (48) +#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1) +#define GITS_BASER_NonShareable (0UL << 10) +#define GITS_BASER_InnerShareable (1UL << 10) +#define GITS_BASER_OuterShareable (2UL << 10) +#define GITS_BASER_SHAREABILITY_SHIFT (10) +#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT) +#define GITS_BASER_PAGE_SIZE_SHIFT (8) +#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) + +#define GITS_BASER_TYPE_NONE 0 +#define GITS_BASER_TYPE_DEVICE 1 +#define GITS_BASER_TYPE_VCPU 2 +#define GITS_BASER_TYPE_CPU 3 +#define GITS_BASER_TYPE_COLLECTION 4 +#define GITS_BASER_TYPE_RESERVED5 5 +#define GITS_BASER_TYPE_RESERVED6 6 +#define GITS_BASER_TYPE_RESERVED7 7 + +/* + * ITS commands + */ +#define GITS_CMD_MAPD 0x08 +#define GITS_CMD_MAPC 0x09 +#define GITS_CMD_MAPVI 0x0a +#define GITS_CMD_MOVI 0x01 +#define GITS_CMD_DISCARD 0x0f +#define GITS_CMD_INV 0x0c +#define GITS_CMD_MOVALL 0x0e +#define GITS_CMD_INVALL 0x0d +#define GITS_CMD_INT 0x03 +#define GITS_CMD_CLEAR 0x04 +#define GITS_CMD_SYNC 0x05 + /* * CPU interface registers */ @@ -189,12 +295,34 @@ #include <linux/stringify.h> +/* + * We need a value to serve as a irq-type for LPIs. Choose one that will + * hopefully pique the interest of the reviewer. + */ +#define GIC_IRQ_TYPE_LPI 0xa110c8ed + +struct rdists { + struct { + void __iomem *rd_base; + struct page *pend_page; + phys_addr_t phys_base; + } __percpu *rdist; + struct page *prop_page; + int id_bits; + u64 flags; +}; + static inline void gic_write_eoir(u64 irq) { asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); isb(); } +struct irq_domain; +int its_cpu_init(void); +int its_init(struct device_node *node, struct rdists *rdists, + struct irq_domain *domain); + #endif #endif diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 13eed92c7d24..71d706d5f169 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -91,6 +91,8 @@ #ifndef __ASSEMBLY__ +#include <linux/irqdomain.h> + struct device_node; extern struct irq_chip gic_arch_extn; @@ -106,6 +108,8 @@ static inline void gic_init(unsigned int nr, int start, gic_init_bases(nr, start, dist, cpu, 0, NULL); } +int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); + void gic_send_sgi(unsigned int cpu_id, unsigned int irq); int gic_get_cpu_id(unsigned int cpu); void gic_migrate_target(unsigned int new_cpu_id); diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h new file mode 100644 index 000000000000..420f77b34d02 --- /dev/null +++ b/include/linux/irqchip/mips-gic.h @@ -0,0 +1,249 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000, 07 MIPS Technologies, Inc. + */ +#ifndef __LINUX_IRQCHIP_MIPS_GIC_H +#define __LINUX_IRQCHIP_MIPS_GIC_H + +#include <linux/clocksource.h> + +#define GIC_MAX_INTRS 256 + +/* Constants */ +#define GIC_POL_POS 1 +#define GIC_POL_NEG 0 +#define GIC_TRIG_EDGE 1 +#define GIC_TRIG_LEVEL 0 +#define GIC_TRIG_DUAL_ENABLE 1 +#define GIC_TRIG_DUAL_DISABLE 0 + +#define MSK(n) ((1 << (n)) - 1) + +/* Accessors */ +#define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS) + +/* GIC Address Space */ +#define SHARED_SECTION_OFS 0x0000 +#define SHARED_SECTION_SIZE 0x8000 +#define VPE_LOCAL_SECTION_OFS 0x8000 +#define VPE_LOCAL_SECTION_SIZE 0x4000 +#define VPE_OTHER_SECTION_OFS 0xc000 +#define VPE_OTHER_SECTION_SIZE 0x4000 +#define USM_VISIBLE_SECTION_OFS 0x10000 +#define USM_VISIBLE_SECTION_SIZE 0x10000 + +/* Register Map for Shared Section */ + +#define GIC_SH_CONFIG_OFS 0x0000 + +/* Shared Global Counter */ +#define GIC_SH_COUNTER_31_00_OFS 0x0010 +#define GIC_SH_COUNTER_63_32_OFS 0x0014 +#define GIC_SH_REVISIONID_OFS 0x0020 + +/* Convert an interrupt number to a byte offset/bit for multi-word registers */ +#define GIC_INTR_OFS(intr) (((intr) / 32) * 4) +#define GIC_INTR_BIT(intr) ((intr) % 32) + +/* Polarity : Reset Value is always 0 */ +#define GIC_SH_SET_POLARITY_OFS 0x0100 + +/* Triggering : Reset Value is always 0 */ +#define GIC_SH_SET_TRIGGER_OFS 0x0180 + +/* Dual edge triggering : Reset Value is always 0 */ +#define GIC_SH_SET_DUAL_OFS 0x0200 + +/* Set/Clear corresponding bit in Edge Detect Register */ +#define GIC_SH_WEDGE_OFS 0x0280 + +/* Mask manipulation */ +#define GIC_SH_RMASK_OFS 0x0300 +#define GIC_SH_SMASK_OFS 0x0380 + +/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */ +#define GIC_SH_MASK_OFS 0x0400 + +/* Pending Global Interrupts (RO) */ +#define GIC_SH_PEND_OFS 0x0480 + +/* Maps Interrupt X to a Pin */ +#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500 +#define GIC_SH_MAP_TO_PIN(intr) (4 * (intr)) + +/* Maps Interrupt X to a VPE */ +#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000 +#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \ + ((32 * (intr)) + (((vpe) / 32) * 4)) +#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32)) + +/* Register Map for Local Section */ +#define GIC_VPE_CTL_OFS 0x0000 +#define GIC_VPE_PEND_OFS 0x0004 +#define GIC_VPE_MASK_OFS 0x0008 +#define GIC_VPE_RMASK_OFS 0x000c +#define GIC_VPE_SMASK_OFS 0x0010 +#define GIC_VPE_WD_MAP_OFS 0x0040 +#define GIC_VPE_COMPARE_MAP_OFS 0x0044 +#define GIC_VPE_TIMER_MAP_OFS 0x0048 +#define GIC_VPE_FDC_MAP_OFS 0x004c +#define GIC_VPE_PERFCTR_MAP_OFS 0x0050 +#define GIC_VPE_SWINT0_MAP_OFS 0x0054 +#define GIC_VPE_SWINT1_MAP_OFS 0x0058 +#define GIC_VPE_OTHER_ADDR_OFS 0x0080 +#define GIC_VPE_WD_CONFIG0_OFS 0x0090 +#define GIC_VPE_WD_COUNT0_OFS 0x0094 +#define GIC_VPE_WD_INITIAL0_OFS 0x0098 +#define GIC_VPE_COMPARE_LO_OFS 0x00a0 +#define GIC_VPE_COMPARE_HI_OFS 0x00a4 + +#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100 +#define GIC_VPE_EIC_SS(intr) (4 * (intr)) + +#define GIC_VPE_EIC_VEC_BASE_OFS 0x0800 +#define GIC_VPE_EIC_VEC(intr) (4 * (intr)) + +#define GIC_VPE_TENABLE_NMI_OFS 0x1000 +#define GIC_VPE_TENABLE_YQ_OFS 0x1004 +#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080 +#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084 + +/* User Mode Visible Section Register Map */ +#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000 +#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004 + +/* Masks */ +#define GIC_SH_CONFIG_COUNTSTOP_SHF 28 +#define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF) + +#define GIC_SH_CONFIG_COUNTBITS_SHF 24 +#define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF) + +#define GIC_SH_CONFIG_NUMINTRS_SHF 16 +#define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF) + +#define GIC_SH_CONFIG_NUMVPES_SHF 0 +#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF) + +#define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31)) +#define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31)) + +#define GIC_MAP_TO_PIN_SHF 31 +#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF) +#define GIC_MAP_TO_NMI_SHF 30 +#define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF) +#define GIC_MAP_TO_YQ_SHF 29 +#define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF) +#define GIC_MAP_SHF 0 +#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF) + +/* GIC_VPE_CTL Masks */ +#define GIC_VPE_CTL_FDC_RTBL_SHF 4 +#define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF) +#define GIC_VPE_CTL_SWINT_RTBL_SHF 3 +#define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF) +#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2 +#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF) +#define GIC_VPE_CTL_TIMER_RTBL_SHF 1 +#define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF) +#define GIC_VPE_CTL_EIC_MODE_SHF 0 +#define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF) + +/* GIC_VPE_PEND Masks */ +#define GIC_VPE_PEND_WD_SHF 0 +#define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF) +#define GIC_VPE_PEND_CMP_SHF 1 +#define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF) +#define GIC_VPE_PEND_TIMER_SHF 2 +#define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF) +#define GIC_VPE_PEND_PERFCOUNT_SHF 3 +#define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF) +#define GIC_VPE_PEND_SWINT0_SHF 4 +#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF) +#define GIC_VPE_PEND_SWINT1_SHF 5 +#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF) + +/* GIC_VPE_RMASK Masks */ +#define GIC_VPE_RMASK_WD_SHF 0 +#define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF) +#define GIC_VPE_RMASK_CMP_SHF 1 +#define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF) +#define GIC_VPE_RMASK_TIMER_SHF 2 +#define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF) +#define GIC_VPE_RMASK_PERFCNT_SHF 3 +#define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF) +#define GIC_VPE_RMASK_SWINT0_SHF 4 +#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF) +#define GIC_VPE_RMASK_SWINT1_SHF 5 +#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF) + +/* GIC_VPE_SMASK Masks */ +#define GIC_VPE_SMASK_WD_SHF 0 +#define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF) +#define GIC_VPE_SMASK_CMP_SHF 1 +#define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF) +#define GIC_VPE_SMASK_TIMER_SHF 2 +#define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF) +#define GIC_VPE_SMASK_PERFCNT_SHF 3 +#define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF) +#define GIC_VPE_SMASK_SWINT0_SHF 4 +#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF) +#define GIC_VPE_SMASK_SWINT1_SHF 5 +#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF) + +/* GIC nomenclature for Core Interrupt Pins. */ +#define GIC_CPU_INT0 0 /* Core Interrupt 2 */ +#define GIC_CPU_INT1 1 /* . */ +#define GIC_CPU_INT2 2 /* . */ +#define GIC_CPU_INT3 3 /* . */ +#define GIC_CPU_INT4 4 /* . */ +#define GIC_CPU_INT5 5 /* Core Interrupt 7 */ + +/* Add 2 to convert GIC CPU pin to core interrupt */ +#define GIC_CPU_PIN_OFFSET 2 + +/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */ +#define GIC_CPU_TO_VEC_OFFSET 2 + +/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ +#define GIC_PIN_TO_VEC_OFFSET 1 + +/* Local GIC interrupts. */ +#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */ +#define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */ +#define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */ +#define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */ +#define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */ +#define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */ +#define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */ +#define GIC_NUM_LOCAL_INTRS 7 + +/* Convert between local/shared IRQ number and GIC HW IRQ number. */ +#define GIC_LOCAL_HWIRQ_BASE 0 +#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) +#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) +#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS +#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) +#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) + +extern unsigned int gic_present; + +extern void gic_init(unsigned long gic_base_addr, + unsigned long gic_addrspace_size, unsigned int cpu_vec, + unsigned int irqbase); +extern void gic_clocksource_init(unsigned int); +extern cycle_t gic_read_count(void); +extern unsigned int gic_get_count_width(void); +extern cycle_t gic_read_compare(void); +extern void gic_write_compare(cycle_t cnt); +extern void gic_write_cpu_compare(cycle_t cnt, int cpu); +extern void gic_send_ipi(unsigned int intr); +extern unsigned int plat_ipi_call_int_xlate(unsigned int); +extern unsigned int plat_ipi_resched_int_xlate(unsigned int); +extern unsigned int gic_get_timer_pending(void); +extern int gic_get_c0_compare_int(void); +extern int gic_get_c0_perfcount_int(void); +#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */ diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h index 866caaa9e2bb..c2ce155d83cc 100644 --- a/include/linux/kern_levels.h +++ b/include/linux/kern_levels.h @@ -22,4 +22,17 @@ */ #define KERN_CONT "" +/* integer equivalents of KERN_<LEVEL> */ +#define LOGLEVEL_SCHED -2 /* Deferred messages from sched code + * are set to this special level */ +#define LOGLEVEL_DEFAULT -1 /* default (or last) loglevel */ +#define LOGLEVEL_EMERG 0 /* system is unusable */ +#define LOGLEVEL_ALERT 1 /* action must be taken immediately */ +#define LOGLEVEL_CRIT 2 /* critical conditions */ +#define LOGLEVEL_ERR 3 /* error conditions */ +#define LOGLEVEL_WARNING 4 /* warning conditions */ +#define LOGLEVEL_NOTICE 5 /* normal but significant condition */ +#define LOGLEVEL_INFO 6 /* informational */ +#define LOGLEVEL_DEBUG 7 /* debug-level messages */ + #endif diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 446d76a87ba1..5449d2f4a1ef 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -416,9 +416,6 @@ extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); extern int func_ptr_is_kernel_text(void *ptr); -struct pid; -extern struct pid *session_of_pgrp(struct pid *pgrp); - unsigned long int_sqrt(unsigned long); extern void bust_spinlocks(int yes); @@ -427,6 +424,7 @@ extern int panic_timeout; extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; +extern int panic_on_warn; extern int sysctl_panic_on_stackoverflow; /* * Only to be used by arch init code. If the user over-wrote the default diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index b9376cd5a187..25a822f6f000 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -68,6 +68,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) * Number of interrupts per specific IRQ source, since bootup */ extern unsigned int kstat_irqs(unsigned int irq); +extern unsigned int kstat_irqs_usr(unsigned int irq); /* * Number of interrupts per cpu, since bootup diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 30faf797c2c3..d4e01b358341 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -179,6 +179,7 @@ struct kernfs_open_file { struct mutex mutex; int event; struct list_head list; + char *prealloc_buf; size_t atomic_write_len; bool mmapped; @@ -214,6 +215,13 @@ struct kernfs_ops { * larger ones are rejected with -E2BIG. */ size_t atomic_write_len; + /* + * "prealloc" causes a buffer to be allocated at open for + * all read/write requests. As ->seq_show uses seq_read() + * which does its own allocation, it is incompatible with + * ->prealloc. Provide ->read and ->write with ->prealloc. + */ + bool prealloc; ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes, loff_t off); diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 057e95971014..e705467ddb47 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h @@ -21,6 +21,8 @@ #ifndef __KMEMLEAK_H #define __KMEMLEAK_H +#include <linux/slab.h> + #ifdef CONFIG_DEBUG_KMEMLEAK extern void kmemleak_init(void) __ref; diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index f7296e57d614..5297f9fa0ef2 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -335,6 +335,7 @@ extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, extern int arch_prepare_kprobe_ftrace(struct kprobe *p); #endif +int arch_check_ftrace_location(struct kprobe *p); /* Get the kprobe at this addr (if any) - called with preemption disabled */ struct kprobe *get_kprobe(void *addr); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a6059bdf7b03..26f106022c88 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -43,6 +43,7 @@ * include/linux/kvm_h. */ #define KVM_MEMSLOT_INVALID (1UL << 16) +#define KVM_MEMSLOT_INCOHERENT (1UL << 17) /* Two fragments for cross MMIO pages. */ #define KVM_MAX_MMIO_FRAGMENTS 2 @@ -353,6 +354,8 @@ struct kvm_memslots { struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; /* The mapping table from slot id to the index in memslots[]. */ short id_to_index[KVM_MEM_SLOTS_NUM]; + atomic_t lru_slot; + int used_slots; }; struct kvm { @@ -395,7 +398,6 @@ struct kvm { * Update side is protected by irq_lock. */ struct kvm_irq_routing_table __rcu *irq_routing; - struct hlist_head mask_notifier_list; #endif #ifdef CONFIG_HAVE_KVM_IRQFD struct hlist_head irq_ack_notifier_list; @@ -447,6 +449,14 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); int __must_check vcpu_load(struct kvm_vcpu *vcpu); void vcpu_put(struct kvm_vcpu *vcpu); +#ifdef __KVM_HAVE_IOAPIC +void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); +#else +static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) +{ +} +#endif + #ifdef CONFIG_HAVE_KVM_IRQFD int kvm_irqfd_init(void); void kvm_irqfd_exit(void); @@ -711,44 +721,6 @@ struct kvm_irq_ack_notifier { void (*irq_acked)(struct kvm_irq_ack_notifier *kian); }; -struct kvm_assigned_dev_kernel { - struct kvm_irq_ack_notifier ack_notifier; - struct list_head list; - int assigned_dev_id; - int host_segnr; - int host_busnr; - int host_devfn; - unsigned int entries_nr; - int host_irq; - bool host_irq_disabled; - bool pci_2_3; - struct msix_entry *host_msix_entries; - int guest_irq; - struct msix_entry *guest_msix_entries; - unsigned long irq_requested_type; - int irq_source_id; - int flags; - struct pci_dev *dev; - struct kvm *kvm; - spinlock_t intx_lock; - spinlock_t intx_mask_lock; - char irq_name[32]; - struct pci_saved_state *pci_saved_state; -}; - -struct kvm_irq_mask_notifier { - void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); - int irq; - struct hlist_node link; -}; - -void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, - struct kvm_irq_mask_notifier *kimn); -void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, - struct kvm_irq_mask_notifier *kimn); -void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, - bool mask); - int kvm_irq_map_gsi(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *entries, int gsi); int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); @@ -770,12 +742,6 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); -int kvm_iommu_map_guest(struct kvm *kvm); -int kvm_iommu_unmap_guest(struct kvm *kvm); -int kvm_assign_device(struct kvm *kvm, - struct kvm_assigned_dev_kernel *assigned_dev); -int kvm_deassign_device(struct kvm *kvm, - struct kvm_assigned_dev_kernel *assigned_dev); #else static inline int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) @@ -787,11 +753,6 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { } - -static inline int kvm_iommu_unmap_guest(struct kvm *kvm) -{ - return 0; -} #endif static inline void kvm_guest_enter(void) @@ -832,12 +793,28 @@ static inline void kvm_guest_exit(void) static inline struct kvm_memory_slot * search_memslots(struct kvm_memslots *slots, gfn_t gfn) { - struct kvm_memory_slot *memslot; + int start = 0, end = slots->used_slots; + int slot = atomic_read(&slots->lru_slot); + struct kvm_memory_slot *memslots = slots->memslots; + + if (gfn >= memslots[slot].base_gfn && + gfn < memslots[slot].base_gfn + memslots[slot].npages) + return &memslots[slot]; - kvm_for_each_memslot(memslot, slots) - if (gfn >= memslot->base_gfn && - gfn < memslot->base_gfn + memslot->npages) - return memslot; + while (start < end) { + slot = start + (end - start) / 2; + + if (gfn >= memslots[slot].base_gfn) + end = slot; + else + start = slot + 1; + } + + if (gfn >= memslots[start].base_gfn && + gfn < memslots[start].base_gfn + memslots[start].npages) { + atomic_set(&slots->lru_slot, start); + return &memslots[start]; + } return NULL; } @@ -1011,25 +988,6 @@ static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } #endif -#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT - -long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, - unsigned long arg); - -void kvm_free_all_assigned_devices(struct kvm *kvm); - -#else - -static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, - unsigned long arg) -{ - return -ENOTTY; -} - -static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {} - -#endif - static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) { set_bit(req, &vcpu->requests); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index b606bb689a3e..931da7e917cf 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -54,33 +54,6 @@ typedef u64 hfn_t; typedef hfn_t pfn_t; -union kvm_ioapic_redirect_entry { - u64 bits; - struct { - u8 vector; - u8 delivery_mode:3; - u8 dest_mode:1; - u8 delivery_status:1; - u8 polarity:1; - u8 remote_irr:1; - u8 trig_mode:1; - u8 mask:1; - u8 reserve:7; - u8 reserved[4]; - u8 dest_id; - } fields; -}; - -struct kvm_lapic_irq { - u32 vector; - u32 delivery_mode; - u32 dest_mode; - u32 level; - u32 trig_mode; - u32 shorthand; - u32 dest_id; -}; - struct gfn_to_hva_cache { u64 generation; gpa_t gpa; diff --git a/include/linux/leds.h b/include/linux/leds.h index a57611d0c94e..cfceef32c9b3 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -13,6 +13,7 @@ #define __LINUX_LEDS_H_INCLUDED #include <linux/list.h> +#include <linux/mutex.h> #include <linux/rwsem.h> #include <linux/spinlock.h> #include <linux/timer.h> @@ -42,11 +43,20 @@ struct led_classdev { #define LED_BLINK_ONESHOT (1 << 17) #define LED_BLINK_ONESHOT_STOP (1 << 18) #define LED_BLINK_INVERT (1 << 19) +#define LED_SYSFS_DISABLE (1 << 20) +#define SET_BRIGHTNESS_ASYNC (1 << 21) +#define SET_BRIGHTNESS_SYNC (1 << 22) /* Set LED brightness level */ /* Must not sleep, use a workqueue if needed */ void (*brightness_set)(struct led_classdev *led_cdev, enum led_brightness brightness); + /* + * Set LED brightness level immediately - it can block the caller for + * the time required for accessing a LED device register. + */ + int (*brightness_set_sync)(struct led_classdev *led_cdev, + enum led_brightness brightness); /* Get LED brightness level */ enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); @@ -85,6 +95,9 @@ struct led_classdev { /* true if activated - deactivate routine uses it to do cleanup */ bool activated; #endif + + /* Ensures consistent access to the LED Flash Class device */ + struct mutex led_access; }; extern int led_classdev_register(struct device *parent, @@ -151,6 +164,33 @@ extern void led_set_brightness(struct led_classdev *led_cdev, */ extern int led_update_brightness(struct led_classdev *led_cdev); +/** + * led_sysfs_disable - disable LED sysfs interface + * @led_cdev: the LED to set + * + * Disable the led_cdev's sysfs interface. + */ +extern void led_sysfs_disable(struct led_classdev *led_cdev); + +/** + * led_sysfs_enable - enable LED sysfs interface + * @led_cdev: the LED to set + * + * Enable the led_cdev's sysfs interface. + */ +extern void led_sysfs_enable(struct led_classdev *led_cdev); + +/** + * led_sysfs_is_disabled - check if LED sysfs interface is disabled + * @led_cdev: the LED to query + * + * Returns: true if the led_cdev's sysfs interface is disabled. + */ +static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev) +{ + return led_cdev->flags & LED_SYSFS_DISABLE; +} + /* * LED Triggers */ @@ -261,6 +301,7 @@ struct gpio_led { unsigned retain_state_suspended : 1; unsigned default_state : 2; /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ + struct gpio_desc *gpiod; }; #define LEDS_GPIO_DEFSTATE_OFF 0 #define LEDS_GPIO_DEFSTATE_ON 1 @@ -273,7 +314,7 @@ struct gpio_led_platform_data { #define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */ #define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */ #define GPIO_LED_BLINK 2 /* Please, blink */ - int (*gpio_blink_set)(unsigned gpio, int state, + int (*gpio_blink_set)(struct gpio_desc *desc, int state, unsigned long *delay_on, unsigned long *delay_off); }; diff --git a/include/linux/libata.h b/include/linux/libata.h index bfbc817c34ee..2d182413b1db 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -191,7 +191,8 @@ enum { ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */ ATA_DEV_SEMB = 7, /* SEMB */ ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ - ATA_DEV_NONE = 9, /* no device */ + ATA_DEV_ZAC = 9, /* ZAC device */ + ATA_DEV_NONE = 10, /* no device */ /* struct ata_link flags */ ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ @@ -1491,7 +1492,8 @@ static inline unsigned int ata_tag_internal(unsigned int tag) static inline unsigned int ata_class_enabled(unsigned int class) { return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI || - class == ATA_DEV_PMP || class == ATA_DEV_SEMB; + class == ATA_DEV_PMP || class == ATA_DEV_SEMB || + class == ATA_DEV_ZAC; } static inline unsigned int ata_class_disabled(unsigned int class) diff --git a/include/linux/list.h b/include/linux/list.h index f33f831eb3c8..feb773c76ee0 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -346,7 +346,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_entry - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_entry(ptr, type, member) \ container_of(ptr, type, member) @@ -355,7 +355,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_first_entry - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Note, that list is expected to be not empty. */ @@ -366,7 +366,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_last_entry - get the last element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Note, that list is expected to be not empty. */ @@ -377,7 +377,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_first_entry_or_null - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Note that if the list is empty, it returns NULL. */ @@ -387,7 +387,7 @@ static inline void list_splice_tail_init(struct list_head *list, /** * list_next_entry - get the next element in list * @pos: the type * to cursor - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_next_entry(pos, member) \ list_entry((pos)->member.next, typeof(*(pos)), member) @@ -395,7 +395,7 @@ static inline void list_splice_tail_init(struct list_head *list, /** * list_prev_entry - get the prev element in list * @pos: the type * to cursor - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_prev_entry(pos, member) \ list_entry((pos)->member.prev, typeof(*(pos)), member) @@ -441,7 +441,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry(pos, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member); \ @@ -452,7 +452,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_reverse - iterate backwards over list of given type. * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member); \ @@ -463,7 +463,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() * @pos: the type * to use as a start point * @head: the head of the list - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). */ @@ -474,7 +474,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_continue - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after * the current position. @@ -488,7 +488,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_continue_reverse - iterate backwards from the given point * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Start to iterate over list of given type backwards, continuing after * the current position. @@ -502,7 +502,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_from - iterate over list of given type from the current point * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type, continuing from current position. */ @@ -515,7 +515,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member), \ @@ -528,7 +528,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type, continuing after current point, * safe against removal of list entry. @@ -544,7 +544,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type from current point, safe against * removal of list entry. @@ -559,7 +559,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate backwards over list of given type, safe against removal * of list entry. @@ -574,7 +574,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_safe_reset_next - reset a stale list_for_each_entry_safe loop * @pos: the loop cursor used in the list_for_each_entry_safe loop * @n: temporary storage used in list_for_each_entry_safe - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * list_safe_reset_next is not safe to use in general if the list may be * modified concurrently (eg. the lock is dropped in the loop body). An diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h index 257d3779f2ab..0ca8109934e4 100644 --- a/include/linux/lockd/debug.h +++ b/include/linux/lockd/debug.h @@ -17,12 +17,8 @@ * Enable lockd debugging. * Requires RPC_DEBUG. */ -#ifdef RPC_DEBUG -# define LOCKD_DEBUG 1 -#endif - #undef ifdebug -#if defined(RPC_DEBUG) && defined(LOCKD_DEBUG) +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag)) #else # define ifdebug(flag) if (0) diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h index 307d9cab2026..1726ccbd8009 100644 --- a/include/linux/mailbox_client.h +++ b/include/linux/mailbox_client.h @@ -25,6 +25,8 @@ struct mbox_chan; * if the client receives some ACK packet for transmission. * Unused if the controller already has TX_Done/RTR IRQ. * @rx_callback: Atomic callback to provide client the data received + * @tx_prepare: Atomic callback to ask client to prepare the payload + * before initiating the transmission if required. * @tx_done: Atomic callback to tell client of data transmission */ struct mbox_client { @@ -34,6 +36,7 @@ struct mbox_client { bool knows_txdone; void (*rx_callback)(struct mbox_client *cl, void *mssg); + void (*tx_prepare)(struct mbox_client *cl, void *mssg); void (*tx_done)(struct mbox_client *cl, void *mssg, int r); }; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 8e9a029e093d..e6982ac3200d 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -16,6 +16,7 @@ #define MARVELL_PHY_ID_88E1318S 0x01410e90 #define MARVELL_PHY_ID_88E1116R 0x01410e40 #define MARVELL_PHY_ID_88E1510 0x01410dd0 +#define MARVELL_PHY_ID_88E3016 0x01410e60 /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6b75640ef5ab..7c95af8d552c 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -25,7 +25,6 @@ #include <linux/jump_label.h> struct mem_cgroup; -struct page_cgroup; struct page; struct mm_struct; struct kmem_cache; @@ -68,10 +67,9 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); -bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, - struct mem_cgroup *memcg); -bool task_in_mem_cgroup(struct task_struct *task, - const struct mem_cgroup *memcg); +bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, + struct mem_cgroup *root); +bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); @@ -79,15 +77,16 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); -static inline -bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) +static inline bool mm_match_cgroup(struct mm_struct *mm, + struct mem_cgroup *memcg) { struct mem_cgroup *task_memcg; - bool match; + bool match = false; rcu_read_lock(); task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); - match = __mem_cgroup_same_or_subtree(memcg, task_memcg); + if (task_memcg) + match = mem_cgroup_is_descendant(task_memcg, memcg); rcu_read_unlock(); return match; } @@ -141,8 +140,8 @@ static inline bool mem_cgroup_disabled(void) struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, unsigned long *flags); -void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, - unsigned long flags); +void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked, + unsigned long *flags); void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx, int val); @@ -174,10 +173,6 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, void mem_cgroup_split_huge_fixup(struct page *head); #endif -#ifdef CONFIG_DEBUG_VM -bool mem_cgroup_bad_page_check(struct page *page); -void mem_cgroup_print_bad_page(struct page *page); -#endif #else /* CONFIG_MEMCG */ struct mem_cgroup; @@ -297,7 +292,7 @@ static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, } static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, - bool locked, unsigned long flags) + bool *locked, unsigned long *flags) { } @@ -347,19 +342,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) } #endif /* CONFIG_MEMCG */ -#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) -static inline bool -mem_cgroup_bad_page_check(struct page *page) -{ - return false; -} - -static inline void -mem_cgroup_print_bad_page(struct page *page) -{ -} -#endif - enum { UNDER_LIMIT, SOFT_LIMIT, @@ -418,8 +400,8 @@ int memcg_cache_id(struct mem_cgroup *memcg); void memcg_update_array_size(int num_groups); -struct kmem_cache * -__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); +struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); +void __memcg_kmem_put_cache(struct kmem_cache *cachep); int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); void __memcg_uncharge_slab(struct kmem_cache *cachep, int order); @@ -447,9 +429,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) /* * __GFP_NOFAIL allocations will move on even if charging is not * possible. Therefore we don't even try, and have this allocation - * unaccounted. We could in theory charge it with - * res_counter_charge_nofail, but we hope those allocations are rare, - * and won't be worth the trouble. + * unaccounted. We could in theory charge it forcibly, but we hope + * those allocations are rare, and won't be worth the trouble. */ if (gfp & __GFP_NOFAIL) return true; @@ -467,8 +448,6 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) * memcg_kmem_uncharge_pages: uncharge pages from memcg * @page: pointer to struct page being freed * @order: allocation order. - * - * there is no need to specify memcg here, since it is embedded in page_cgroup */ static inline void memcg_kmem_uncharge_pages(struct page *page, int order) @@ -485,8 +464,7 @@ memcg_kmem_uncharge_pages(struct page *page, int order) * * Needs to be called after memcg_kmem_newpage_charge, regardless of success or * failure of the allocation. if @page is NULL, this function will revert the - * charges. Otherwise, it will commit the memcg given by @memcg to the - * corresponding page_cgroup. + * charges. Otherwise, it will commit @page to @memcg. */ static inline void memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) @@ -514,7 +492,13 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) if (unlikely(fatal_signal_pending(current))) return cachep; - return __memcg_kmem_get_cache(cachep, gfp); + return __memcg_kmem_get_cache(cachep); +} + +static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) +{ + if (memcg_kmem_enabled()) + __memcg_kmem_put_cache(cachep); } #else #define for_each_memcg_cache_index(_idx) \ @@ -550,6 +534,10 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { return cachep; } + +static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) +{ +} #endif /* CONFIG_MEMCG_KMEM */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h index f34723f7663c..910e3aa1e965 100644 --- a/include/linux/mfd/arizona/core.h +++ b/include/linux/mfd/arizona/core.h @@ -141,6 +141,7 @@ struct arizona { uint16_t dac_comp_coeff; uint8_t dac_comp_enabled; + struct mutex dac_comp_lock; }; int arizona_clk32k_enable(struct arizona *arizona); diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h index cb01496bfa49..8e1cdbef3dad 100644 --- a/include/linux/mfd/davinci_voicecodec.h +++ b/include/linux/mfd/davinci_voicecodec.h @@ -99,12 +99,6 @@ struct davinci_vcif { dma_addr_t dma_rx_addr; }; -struct cq93vc { - struct platform_device *pdev; - struct snd_soc_codec *codec; - u32 sysclk; -}; - struct davinci_vc; struct davinci_vc { @@ -122,7 +116,6 @@ struct davinci_vc { /* Client devices */ struct davinci_vcif davinci_vcif; - struct cq93vc cq93vc; }; #endif diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 53d33dee70e1..2e5b194b9b19 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -37,7 +37,6 @@ /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 -#define MICREL_PHY_25MHZ_CLK 0x00000002 #define MICREL_KSZ9021_EXTREG_CTRL 0xB #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 379c02648ab3..64d25941b329 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -67,6 +67,8 @@ enum { MLX4_CMD_MAP_ICM_AUX = 0xffc, MLX4_CMD_UNMAP_ICM_AUX = 0xffb, MLX4_CMD_SET_ICM_SIZE = 0xffd, + MLX4_CMD_ACCESS_REG = 0x3b, + /*master notify fw on finish for slave's flr*/ MLX4_CMD_INFORM_FLR_DONE = 0x5b, MLX4_CMD_GET_OP_REQ = 0x59, @@ -197,6 +199,33 @@ enum { MLX4_CMD_NATIVE }; +/* + * MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP - + * Receive checksum value is reported in CQE also for non TCP/UDP packets. + * + * MLX4_RX_CSUM_MODE_L4 - + * L4_CSUM bit in CQE, which indicates whether or not L4 checksum + * was validated correctly, is supported. + * + * MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP - + * IP_OK CQE's field is supported also for non TCP/UDP IP packets. + * + * MLX4_RX_CSUM_MODE_MULTI_VLAN - + * Receive Checksum offload is supported for packets with more than 2 vlan headers. + */ +enum mlx4_rx_csum_mode { + MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP = 1UL << 0, + MLX4_RX_CSUM_MODE_L4 = 1UL << 1, + MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP = 1UL << 2, + MLX4_RX_CSUM_MODE_MULTI_VLAN = 1UL << 3 +}; + +struct mlx4_config_dev_params { + u16 vxlan_udp_dport; + u8 rx_csum_flags_port_1; + u8 rx_csum_flags_port_2; +}; + struct mlx4_dev; struct mlx4_cmd_mailbox { @@ -248,6 +277,8 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); +int mlx4_config_dev_retrieval(struct mlx4_dev *dev, + struct mlx4_config_dev_params *params); /* * mlx4_get_slave_default_vlan - * return true if VST ( default vlan) diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 37e4404d0227..25c791e295fd 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -95,7 +95,7 @@ enum { enum { MLX4_MAX_NUM_PF = 16, - MLX4_MAX_NUM_VF = 64, + MLX4_MAX_NUM_VF = 126, MLX4_MAX_NUM_VF_P_PORT = 64, MLX4_MFUNC_MAX = 80, MLX4_MAX_EQ_NUM = 1024, @@ -117,6 +117,14 @@ enum { MLX4_STEERING_MODE_DEVICE_MANAGED }; +enum { + MLX4_STEERING_DMFS_A0_DEFAULT, + MLX4_STEERING_DMFS_A0_DYNAMIC, + MLX4_STEERING_DMFS_A0_STATIC, + MLX4_STEERING_DMFS_A0_DISABLE, + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED +}; + static inline const char *mlx4_steering_mode_str(int steering_mode) { switch (steering_mode) { @@ -186,7 +194,31 @@ enum { MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, - MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13 + MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13, + MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14, + MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15, + MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, + MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, + MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, + MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19 +}; + +enum { + MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0, + MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1 +}; + +/* bit enums for an 8-bit flags field indicating special use + * QPs which require special handling in qp_reserve_range. + * Currently, this only includes QPs used by the ETH interface, + * where we expect to use blueflame. These QPs must not have + * bits 6 and 7 set in their qp number. + * + * This enum may use only bits 0..7. + */ +enum { + MLX4_RESERVE_A0_QP = 1 << 6, + MLX4_RESERVE_ETH_BF_QP = 1 << 7, }; enum { @@ -202,7 +234,8 @@ enum { enum { MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, - MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1 + MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1, + MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2 }; @@ -328,6 +361,8 @@ enum { enum mlx4_qp_region { MLX4_QP_REGION_FW = 0, + MLX4_QP_REGION_RSS_RAW_ETH, + MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH, MLX4_QP_REGION_ETH_ADDR, MLX4_QP_REGION_FC_ADDR, MLX4_QP_REGION_FC_EXCH, @@ -379,6 +414,13 @@ enum { #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) +enum mlx4_module_id { + MLX4_MODULE_ID_SFP = 0x3, + MLX4_MODULE_ID_QSFP = 0xC, + MLX4_MODULE_ID_QSFP_PLUS = 0xD, + MLX4_MODULE_ID_QSFP28 = 0x11, +}; + static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) { return (major << 32) | (minor << 16) | subminor; @@ -433,6 +475,7 @@ struct mlx4_caps { int num_cqs; int max_cqes; int reserved_cqs; + int num_sys_eqs; int num_eqs; int reserved_eqs; int num_comp_vectors; @@ -449,6 +492,7 @@ struct mlx4_caps { int reserved_mcgs; int num_qp_per_mgm; int steering_mode; + int dmfs_high_steer_mode; int fs_log_max_ucast_qp_range_size; int num_pds; int reserved_pds; @@ -487,6 +531,10 @@ struct mlx4_caps { u16 hca_core_clock; u64 phys_port_id[MLX4_MAX_PORTS + 1]; int tunnel_offload_mode; + u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1]; + u8 alloc_res_qp_mask; + u32 dmfs_high_rate_qpn_base; + u32 dmfs_high_rate_qpn_range; }; struct mlx4_buf_list { @@ -607,6 +655,11 @@ struct mlx4_cq { atomic_t refcount; struct completion free; + struct { + struct list_head list; + void (*comp)(struct mlx4_cq *); + void *priv; + } tasklet_ctx; }; struct mlx4_qp { @@ -799,6 +852,26 @@ struct mlx4_init_port_param { u64 si_guid; }; +#define MAD_IFC_DATA_SZ 192 +/* MAD IFC Mailbox */ +struct mlx4_mad_ifc { + u8 base_version; + u8 mgmt_class; + u8 class_version; + u8 method; + __be16 status; + __be16 class_specific; + __be64 tid; + __be16 attr_id; + __be16 resv; + __be32 attr_mod; + __be64 mkey; + __be16 dr_slid; + __be16 dr_dlid; + u8 reserved[28]; + u8 data[MAD_IFC_DATA_SZ]; +} __packed; + #define mlx4_foreach_port(port, dev, type) \ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ if ((type) == (dev)->caps.port_mask[(port)]) @@ -835,7 +908,9 @@ static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev) static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) { return (qpn < dev->phys_caps.base_sqpn + 8 + - 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev)); + 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) && + qpn >= dev->phys_caps.base_sqpn) || + (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]); } static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) @@ -911,8 +986,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, unsigned vector, int collapsed, int timestamp_en); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); - -int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); +int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, + int *base, u8 flags); void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, @@ -1283,10 +1358,50 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, u64 iova, u64 size, int npages, int page_shift, struct mlx4_mpt_entry *mpt_entry); +int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, + u16 offset, u16 size, u8 *data); + /* Returns true if running in low memory profile (kdump kernel) */ static inline bool mlx4_low_memory_profile(void) { return is_kdump_kernel(); } +/* ACCESS REG commands */ +enum mlx4_access_reg_method { + MLX4_ACCESS_REG_QUERY = 0x1, + MLX4_ACCESS_REG_WRITE = 0x2, +}; + +/* ACCESS PTYS Reg command */ +enum mlx4_ptys_proto { + MLX4_PTYS_IB = 1<<0, + MLX4_PTYS_EN = 1<<2, +}; + +struct mlx4_ptys_reg { + u8 resrvd1; + u8 local_port; + u8 resrvd2; + u8 proto_mask; + __be32 resrvd3[2]; + __be32 eth_proto_cap; + __be16 ib_width_cap; + __be16 ib_speed_cap; + __be32 resrvd4; + __be32 eth_proto_admin; + __be16 ib_width_admin; + __be16 ib_speed_admin; + __be32 resrvd5; + __be32 eth_proto_oper; + __be16 ib_width_oper; + __be16 ib_speed_oper; + __be32 resrvd6; + __be32 eth_proto_lp_adv; +} __packed; + +int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, + enum mlx4_access_reg_method method, + struct mlx4_ptys_reg *ptys_reg); + #endif /* MLX4_DEVICE_H */ diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 5f4e36cf0091..467ccdf94c98 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -120,13 +120,15 @@ enum { MLX4_RSS_QPC_FLAG_OFFSET = 13, }; +#define MLX4_EN_RSS_KEY_SIZE 40 + struct mlx4_rss_context { __be32 base_qpn; __be32 default_qpn; u16 reserved; u8 hash_fn; u8 flags; - __be32 rss_key[10]; + __be32 rss_key[MLX4_EN_RSS_KEY_SIZE / sizeof(__be32)]; __be32 base_qpn_udp; }; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 1d67fd32e71c..4e5bd813bb9a 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -120,6 +120,15 @@ enum { }; enum { + MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31 +}; + +enum { + MLX5_PFAULT_SUBTYPE_WQE = 0, + MLX5_PFAULT_SUBTYPE_RDMA = 1, +}; + +enum { MLX5_PERM_LOCAL_READ = 1 << 2, MLX5_PERM_LOCAL_WRITE = 1 << 3, MLX5_PERM_REMOTE_READ = 1 << 4, @@ -180,6 +189,19 @@ enum { MLX5_MKEY_MASK_FREE = 1ull << 29, }; +enum { + MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4), + + MLX5_UMR_CHECK_NOT_FREE = (1 << 5), + MLX5_UMR_CHECK_FREE = (2 << 5), + + MLX5_UMR_INLINE = (1 << 7), +}; + +#define MLX5_UMR_MTT_ALIGNMENT 0x40 +#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) +#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT + enum mlx5_event { MLX5_EVENT_TYPE_COMP = 0x0, @@ -206,6 +228,8 @@ enum mlx5_event { MLX5_EVENT_TYPE_CMD = 0x0a, MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, + + MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, }; enum { @@ -219,11 +243,7 @@ enum { }; enum { - MLX5_DEV_CAP_FLAG_RC = 1LL << 0, - MLX5_DEV_CAP_FLAG_UC = 1LL << 1, - MLX5_DEV_CAP_FLAG_UD = 1LL << 2, MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, - MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6, MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, MLX5_DEV_CAP_FLAG_APM = 1LL << 17, @@ -232,10 +252,7 @@ enum { MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, - MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, - MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, - MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, }; @@ -298,6 +315,8 @@ enum { enum { HCA_CAP_OPMOD_GET_MAX = 0, HCA_CAP_OPMOD_GET_CUR = 1, + HCA_CAP_OPMOD_GET_ODP_MAX = 4, + HCA_CAP_OPMOD_GET_ODP_CUR = 5 }; struct mlx5_inbox_hdr { @@ -327,6 +346,23 @@ struct mlx5_cmd_query_adapter_mbox_out { u8 vsd_psid[16]; }; +enum mlx5_odp_transport_cap_bits { + MLX5_ODP_SUPPORT_SEND = 1 << 31, + MLX5_ODP_SUPPORT_RECV = 1 << 30, + MLX5_ODP_SUPPORT_WRITE = 1 << 29, + MLX5_ODP_SUPPORT_READ = 1 << 28, +}; + +struct mlx5_odp_caps { + char reserved[0x10]; + struct { + __be32 rc_odp_caps; + __be32 uc_odp_caps; + __be32 ud_odp_caps; + } per_transport_caps; + char reserved2[0xe4]; +}; + struct mlx5_cmd_init_hca_mbox_in { struct mlx5_inbox_hdr hdr; u8 rsvd0[2]; @@ -447,6 +483,27 @@ struct mlx5_eqe_page_req { __be32 rsvd1[5]; }; +struct mlx5_eqe_page_fault { + __be32 bytes_committed; + union { + struct { + u16 reserved1; + __be16 wqe_index; + u16 reserved2; + __be16 packet_length; + u8 reserved3[12]; + } __packed wqe; + struct { + __be32 r_key; + u16 reserved1; + __be16 packet_length; + __be32 rdma_op_len; + __be64 rdma_va; + } __packed rdma; + } __packed; + __be32 flags_qpn; +} __packed; + union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; @@ -458,6 +515,7 @@ union ev_data { struct mlx5_eqe_congestion cong; struct mlx5_eqe_stall_vl stall_vl; struct mlx5_eqe_page_req req_pages; + struct mlx5_eqe_page_fault page_fault; } __packed; struct mlx5_eqe { @@ -784,6 +842,10 @@ struct mlx5_query_eq_mbox_out { struct mlx5_eq_context ctx; }; +enum { + MLX5_MKEY_STATUS_FREE = 1 << 6, +}; + struct mlx5_mkey_seg { /* This is a two bit field occupying bits 31-30. * bit 31 is always 0, @@ -820,7 +882,7 @@ struct mlx5_query_special_ctxs_mbox_out { struct mlx5_create_mkey_mbox_in { struct mlx5_inbox_hdr hdr; __be32 input_mkey_index; - u8 rsvd0[4]; + __be32 flags; struct mlx5_mkey_seg seg; u8 rsvd1[16]; __be32 xlat_oct_act_size; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 246310dc8bef..166d9315fe4b 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -113,6 +113,13 @@ enum { MLX5_REG_HOST_ENDIANNESS = 0x7004, }; +enum mlx5_page_fault_resume_flags { + MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0, + MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1, + MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2, + MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7, +}; + enum dbg_rsc_type { MLX5_DBG_RSC_QP, MLX5_DBG_RSC_EQ, @@ -467,7 +474,7 @@ struct mlx5_priv { struct workqueue_struct *pg_wq; struct rb_root page_root; int fw_pages; - int reg_pages; + atomic_t reg_pages; struct list_head free_list; struct mlx5_core_health health; @@ -633,14 +640,6 @@ static inline void *mlx5_vzalloc(unsigned long size) return rtn; } -static inline void mlx5_vfree(const void *addr) -{ - if (addr && is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); -} - static inline u32 mlx5_base_mkey(const u32 key) { return key & 0xffffff00u; @@ -711,6 +710,9 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev); void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); +#endif void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); @@ -748,6 +750,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); +int mlx5_query_odp_caps(struct mlx5_core_dev *dev, + struct mlx5_odp_caps *odp_caps); static inline u32 mlx5_mkey_to_idx(u32 mkey) { diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 3fa075daeb1d..61f7a342d1bf 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -50,6 +50,9 @@ #define MLX5_BSF_APPTAG_ESCAPE 0x1 #define MLX5_BSF_APPREF_ESCAPE 0x2 +#define MLX5_QPN_BITS 24 +#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1) + enum mlx5_qp_optpar { MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, MLX5_QP_OPTPAR_RRE = 1 << 1, @@ -189,6 +192,14 @@ struct mlx5_wqe_ctrl_seg { __be32 imm; }; +#define MLX5_WQE_CTRL_DS_MASK 0x3f +#define MLX5_WQE_CTRL_QPN_MASK 0xffffff00 +#define MLX5_WQE_CTRL_QPN_SHIFT 8 +#define MLX5_WQE_DS_UNITS 16 +#define MLX5_WQE_CTRL_OPCODE_MASK 0xff +#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 +#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 + struct mlx5_wqe_xrc_seg { __be32 xrc_srqn; u8 rsvd[12]; @@ -292,6 +303,8 @@ struct mlx5_wqe_signature_seg { u8 rsvd1[11]; }; +#define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff + struct mlx5_wqe_inline_seg { __be32 byte_count; }; @@ -360,9 +373,46 @@ struct mlx5_stride_block_ctrl_seg { __be16 num_entries; }; +enum mlx5_pagefault_flags { + MLX5_PFAULT_REQUESTOR = 1 << 0, + MLX5_PFAULT_WRITE = 1 << 1, + MLX5_PFAULT_RDMA = 1 << 2, +}; + +/* Contains the details of a pagefault. */ +struct mlx5_pagefault { + u32 bytes_committed; + u8 event_subtype; + enum mlx5_pagefault_flags flags; + union { + /* Initiator or send message responder pagefault details. */ + struct { + /* Received packet size, only valid for responders. */ + u32 packet_size; + /* + * WQE index. Refers to either the send queue or + * receive queue, according to event_subtype. + */ + u16 wqe_index; + } wqe; + /* RDMA responder pagefault details */ + struct { + u32 r_key; + /* + * Received packet size, minimal size page fault + * resolution required for forward progress. + */ + u32 packet_size; + u32 rdma_op_len; + u64 rdma_va; + } rdma; + }; +}; + struct mlx5_core_qp { struct mlx5_core_rsc_common common; /* must be first */ void (*event) (struct mlx5_core_qp *, int); + void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *); int qpn; struct mlx5_rsc_debug *dbg; int pid; @@ -530,6 +580,17 @@ static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u return radix_tree_lookup(&dev->priv.mr_table.tree, key); } +struct mlx5_page_fault_resume_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 flags_qpn; + u8 reserved[4]; +}; + +struct mlx5_page_fault_resume_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + int mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, struct mlx5_create_qp_mbox_in *in, @@ -549,6 +610,10 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev); void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn, + u8 context, int error); +#endif static inline const char *mlx5_qp_type_str(int type) { diff --git a/include/linux/mm.h b/include/linux/mm.h index f7606d3a0915..c0a67b894c4c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -19,6 +19,7 @@ #include <linux/bit_spinlock.h> #include <linux/shrinker.h> #include <linux/resource.h> +#include <linux/page_ext.h> struct mempolicy; struct anon_vma; @@ -56,6 +57,17 @@ extern int sysctl_legacy_va_layout; #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) #endif +/* + * To prevent common memory management code establishing + * a zero page mapping on a read fault. + * This macro should be defined within <asm/pgtable.h>. + * s390 does this to prevent multiplexing of hardware bits + * related to the physical page in case of virtualization. + */ +#ifndef mm_forbids_zeropage +#define mm_forbids_zeropage(X) (0) +#endif + extern unsigned long sysctl_user_reserve_kbytes; extern unsigned long sysctl_admin_reserve_kbytes; @@ -2049,7 +2061,22 @@ static inline void vm_stat_account(struct mm_struct *mm, #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_DEBUG_PAGEALLOC -extern void kernel_map_pages(struct page *page, int numpages, int enable); +extern bool _debug_pagealloc_enabled; +extern void __kernel_map_pages(struct page *page, int numpages, int enable); + +static inline bool debug_pagealloc_enabled(void) +{ + return _debug_pagealloc_enabled; +} + +static inline void +kernel_map_pages(struct page *page, int numpages, int enable) +{ + if (!debug_pagealloc_enabled()) + return; + + __kernel_map_pages(page, numpages, enable); +} #ifdef CONFIG_HIBERNATION extern bool kernel_page_present(struct page *page); #endif /* CONFIG_HIBERNATION */ @@ -2083,9 +2110,9 @@ int drop_caches_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); #endif -unsigned long shrink_slab(struct shrink_control *shrink, - unsigned long nr_pages_scanned, - unsigned long lru_pages); +unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid, + unsigned long nr_scanned, + unsigned long nr_eligible); #ifndef CONFIG_MMU #define randomize_va_space 0 @@ -2144,20 +2171,36 @@ extern void copy_user_huge_page(struct page *dst, struct page *src, unsigned int pages_per_huge_page); #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ +extern struct page_ext_operations debug_guardpage_ops; +extern struct page_ext_operations page_poisoning_ops; + #ifdef CONFIG_DEBUG_PAGEALLOC extern unsigned int _debug_guardpage_minorder; +extern bool _debug_guardpage_enabled; static inline unsigned int debug_guardpage_minorder(void) { return _debug_guardpage_minorder; } +static inline bool debug_guardpage_enabled(void) +{ + return _debug_guardpage_enabled; +} + static inline bool page_is_guard(struct page *page) { - return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); + struct page_ext *page_ext; + + if (!debug_guardpage_enabled()) + return false; + + page_ext = lookup_page_ext(page); + return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); } #else static inline unsigned int debug_guardpage_minorder(void) { return 0; } +static inline bool debug_guardpage_enabled(void) { return false; } static inline bool page_is_guard(struct page *page) { return false; } #endif /* CONFIG_DEBUG_PAGEALLOC */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 004e9d17b47e..6d34aa266a8c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -10,7 +10,6 @@ #include <linux/rwsem.h> #include <linux/completion.h> #include <linux/cpumask.h> -#include <linux/page-debug-flags.h> #include <linux/uprobes.h> #include <linux/page-flags-layout.h> #include <asm/page.h> @@ -22,6 +21,7 @@ #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) struct address_space; +struct mem_cgroup; #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ @@ -167,6 +167,10 @@ struct page { struct page *first_page; /* Compound tail pages */ }; +#ifdef CONFIG_MEMCG + struct mem_cgroup *mem_cgroup; +#endif + /* * On machines where all RAM is mapped into kernel address space, * we can simply calculate the virtual address. On machines with @@ -181,9 +185,6 @@ struct page { void *virtual; /* Kernel virtual address (NULL if not kmapped, ie. highmem) */ #endif /* WANT_PAGE_VIRTUAL */ -#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS - unsigned long debug_flags; /* Use atomic bitops on this */ -#endif #ifdef CONFIG_KMEMCHECK /* @@ -529,4 +530,12 @@ enum tlb_flush_reason { NR_TLB_FLUSH_REASONS, }; + /* + * A swap entry has to fit into a "unsigned long", as the entry is hidden + * in the "index" field of the swapper address space. + */ +typedef struct { + unsigned long val; +} swp_entry_t; + #endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 88787bb4b3b9..95243d28a0ee 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -98,11 +98,11 @@ struct mmu_notifier_ops { /* * invalidate_range_start() and invalidate_range_end() must be * paired and are called only when the mmap_sem and/or the - * locks protecting the reverse maps are held. The subsystem - * must guarantee that no additional references are taken to - * the pages in the range established between the call to - * invalidate_range_start() and the matching call to - * invalidate_range_end(). + * locks protecting the reverse maps are held. If the subsystem + * can't guarantee that no additional references are taken to + * the pages in the range, it has to implement the + * invalidate_range() notifier to remove any references taken + * after invalidate_range_start(). * * Invalidation of multiple concurrent ranges may be * optionally permitted by the driver. Either way the @@ -144,6 +144,29 @@ struct mmu_notifier_ops { void (*invalidate_range_end)(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end); + + /* + * invalidate_range() is either called between + * invalidate_range_start() and invalidate_range_end() when the + * VM has to free pages that where unmapped, but before the + * pages are actually freed, or outside of _start()/_end() when + * a (remote) TLB is necessary. + * + * If invalidate_range() is used to manage a non-CPU TLB with + * shared page-tables, it not necessary to implement the + * invalidate_range_start()/end() notifiers, as + * invalidate_range() alread catches the points in time when an + * external TLB range needs to be flushed. + * + * The invalidate_range() function is called under the ptl + * spin-lock and not allowed to sleep. + * + * Note that this function might be called with just a sub-range + * of what was passed to invalidate_range_start()/end(), if + * called between those functions. + */ + void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, + unsigned long start, unsigned long end); }; /* @@ -154,7 +177,7 @@ struct mmu_notifier_ops { * Therefore notifier chains can only be traversed when either * * 1. mmap_sem is held. - * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem). + * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem). * 3. No other concurrent thread can access the list (release) */ struct mmu_notifier { @@ -190,6 +213,8 @@ extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, unsigned long start, unsigned long end); extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, unsigned long start, unsigned long end); +extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, + unsigned long start, unsigned long end); static inline void mmu_notifier_release(struct mm_struct *mm) { @@ -242,6 +267,13 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, __mmu_notifier_invalidate_range_end(mm, start, end); } +static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + if (mm_has_notifiers(mm)) + __mmu_notifier_invalidate_range(mm, start, end); +} + static inline void mmu_notifier_mm_init(struct mm_struct *mm) { mm->mmu_notifier_mm = NULL; @@ -279,6 +311,44 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) __young; \ }) +#define ptep_clear_flush_notify(__vma, __address, __ptep) \ +({ \ + unsigned long ___addr = __address & PAGE_MASK; \ + struct mm_struct *___mm = (__vma)->vm_mm; \ + pte_t ___pte; \ + \ + ___pte = ptep_clear_flush(__vma, __address, __ptep); \ + mmu_notifier_invalidate_range(___mm, ___addr, \ + ___addr + PAGE_SIZE); \ + \ + ___pte; \ +}) + +#define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \ +({ \ + unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ + struct mm_struct *___mm = (__vma)->vm_mm; \ + pmd_t ___pmd; \ + \ + ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \ + mmu_notifier_invalidate_range(___mm, ___haddr, \ + ___haddr + HPAGE_PMD_SIZE); \ + \ + ___pmd; \ +}) + +#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \ +({ \ + unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ + pmd_t ___pmd; \ + \ + ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \ + mmu_notifier_invalidate_range(__mm, ___haddr, \ + ___haddr + HPAGE_PMD_SIZE); \ + \ + ___pmd; \ +}) + /* * set_pte_at_notify() sets the pte _after_ running the notifier. * This is safe to start by updating the secondary MMUs, because the primary MMU @@ -342,6 +412,11 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, { } +static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + static inline void mmu_notifier_mm_init(struct mm_struct *mm) { } @@ -352,6 +427,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) #define ptep_clear_flush_young_notify ptep_clear_flush_young #define pmdp_clear_flush_young_notify pmdp_clear_flush_young +#define ptep_clear_flush_notify ptep_clear_flush +#define pmdp_clear_flush_notify pmdp_clear_flush +#define pmdp_get_and_clear_notify pmdp_get_and_clear #define set_pte_at_notify set_pte_at #endif /* CONFIG_MMU_NOTIFIER */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ffe66e381c04..2f0856d14b21 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -722,8 +722,8 @@ typedef struct pglist_data { int nr_zones; #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ struct page *node_mem_map; -#ifdef CONFIG_MEMCG - struct page_cgroup *node_page_cgroup; +#ifdef CONFIG_PAGE_EXTENSION + struct page_ext *node_page_ext; #endif #endif #ifndef CONFIG_NO_BOOTMEM @@ -1078,7 +1078,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) struct page; -struct page_cgroup; +struct page_ext; struct mem_section { /* * This is, logically, a pointer to an array of struct @@ -1096,12 +1096,12 @@ struct mem_section { /* See declaration of similar field in struct zone */ unsigned long *pageblock_flags; -#ifdef CONFIG_MEMCG +#ifdef CONFIG_PAGE_EXTENSION /* - * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use - * section. (see memcontrol.h/page_cgroup.h about this.) + * If !SPARSEMEM, pgdat doesn't have page_ext pointer. We use + * section. (see page_ext.h about this.) */ - struct page_cgroup *page_cgroup; + struct page_ext *page_ext; unsigned long pad; #endif /* diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 44eeef0da186..745def862580 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -69,7 +69,7 @@ struct ieee1394_device_id { * @bDeviceClass: Class of device; numbers are assigned * by the USB forum. Products may choose to implement classes, * or be vendor-specific. Device classes specify behavior of all - * the interfaces on a devices. + * the interfaces on a device. * @bDeviceSubClass: Subclass of device; associated with bDeviceClass. * @bDeviceProtocol: Protocol of device; associated with bDeviceClass. * @bInterfaceClass: Class of interface; numbers are assigned diff --git a/include/linux/module.h b/include/linux/module.h index 71f282a4e307..ebfb0e153c6a 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -210,20 +210,6 @@ enum module_state { MODULE_STATE_UNFORMED, /* Still setting it up. */ }; -/** - * struct module_ref - per cpu module reference counts - * @incs: number of module get on this cpu - * @decs: number of module put on this cpu - * - * We force an alignment on 8 or 16 bytes, so that alloc_percpu() - * put @incs/@decs in same cache line, with no extra memory cost, - * since alloc_percpu() is fine grained. - */ -struct module_ref { - unsigned long incs; - unsigned long decs; -} __attribute((aligned(2 * sizeof(unsigned long)))); - struct module { enum module_state state; @@ -367,7 +353,7 @@ struct module { /* Destruction function. */ void (*exit)(void); - struct module_ref __percpu *refptr; + atomic_t refcnt; #endif #ifdef CONFIG_CONSTRUCTORS diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index e4d451e4600b..3d4ea7eb2b68 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -455,8 +455,21 @@ struct nand_hw_control { * be provided if an hardware ECC is available * @calculate: function for ECC calculation or readback from ECC hardware * @correct: function for ECC correction, matching to ECC generator (sw/hw) - * @read_page_raw: function to read a raw page without ECC - * @write_page_raw: function to write a raw page without ECC + * @read_page_raw: function to read a raw page without ECC. This function + * should hide the specific layout used by the ECC + * controller and always return contiguous in-band and + * out-of-band data even if they're not stored + * contiguously on the NAND chip (e.g. + * NAND_ECC_HW_SYNDROME interleaves in-band and + * out-of-band data). + * @write_page_raw: function to write a raw page without ECC. This function + * should hide the specific layout used by the ECC + * controller and consider the passed data as contiguous + * in-band and out-of-band data. ECC controller is + * responsible for doing the appropriate transformations + * to adapt to its specific layout (e.g. + * NAND_ECC_HW_SYNDROME interleaves in-band and + * out-of-band data). * @read_page: function to read a page according to the ECC generator * requirements; returns maximum number of bitflips corrected in * any single ECC step, 0 if bitflips uncorrectable, -EIO hw error @@ -723,6 +736,7 @@ struct nand_chip { #define NAND_MFR_EON 0x92 #define NAND_MFR_SANDISK 0x45 #define NAND_MFR_INTEL 0x89 +#define NAND_MFR_ATO 0x9b /* The maximum expected count of bytes in the NAND ID sequence */ #define NAND_MAX_ID_LEN 8 diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 046a0a2e4c4e..63aeccf9ddc8 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -116,6 +116,10 @@ enum spi_nor_ops { SPI_NOR_OPS_UNLOCK, }; +enum spi_nor_option_flags { + SNOR_F_USE_FSR = BIT(0), +}; + /** * struct spi_nor - Structure for defining a the SPI NOR layer * @mtd: point to a mtd_info structure @@ -129,6 +133,7 @@ enum spi_nor_ops { * @program_opcode: the program opcode * @flash_read: the mode of the read * @sst_write_second: used by the SST write operation + * @flags: flag options for the current SPI-NOR (SNOR_F_*) * @cfg: used by the read_xfer/write_xfer * @cmd_buf: used by the write_reg * @prepare: [OPTIONAL] do some preparations for the @@ -139,9 +144,6 @@ enum spi_nor_ops { * @write_xfer: [OPTIONAL] the writefundamental primitive * @read_reg: [DRIVER-SPECIFIC] read out the register * @write_reg: [DRIVER-SPECIFIC] write data to the register - * @read_id: [REPLACEABLE] read out the ID data, and find - * the proper spi_device_id - * @wait_till_ready: [REPLACEABLE] wait till the NOR becomes ready * @read: [DRIVER-SPECIFIC] read data from the SPI NOR * @write: [DRIVER-SPECIFIC] write data to the SPI NOR * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR @@ -160,6 +162,7 @@ struct spi_nor { u8 program_opcode; enum read_mode flash_read; bool sst_write_second; + u32 flags; struct spi_nor_xfer_cfg cfg; u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; @@ -172,8 +175,6 @@ struct spi_nor { int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len, int write_enable); - const struct spi_device_id *(*read_id)(struct spi_nor *nor); - int (*wait_till_ready)(struct spi_nor *nor); int (*read)(struct spi_nor *nor, loff_t from, size_t len, size_t *retlen, u_char *read_buf); diff --git a/include/linux/namei.h b/include/linux/namei.h index 492de72560fa..c8990779f0c3 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -7,21 +7,10 @@ #include <linux/path.h> struct vfsmount; +struct nameidata; enum { MAX_NESTED_LINKS = 8 }; -struct nameidata { - struct path path; - struct qstr last; - struct path root; - struct inode *inode; /* path.dentry.d_inode */ - unsigned int flags; - unsigned seq, m_seq; - int last_type; - unsigned depth; - char *saved_names[MAX_NESTED_LINKS + 1]; -}; - /* * Type of the last component on LOOKUP_PARENT */ @@ -82,16 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *); extern void unlock_rename(struct dentry *, struct dentry *); extern void nd_jump_link(struct nameidata *nd, struct path *path); - -static inline void nd_set_link(struct nameidata *nd, char *path) -{ - nd->saved_names[nd->depth] = path; -} - -static inline char *nd_get_link(struct nameidata *nd) -{ - return nd->saved_names[nd->depth]; -} +extern void nd_set_link(struct nameidata *nd, char *path); +extern char *nd_get_link(struct nameidata *nd); static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) { diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index dcfdecbfa0b7..8e30685affeb 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -47,9 +47,9 @@ enum { NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ - NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ + NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ - NETIF_F_GSO_MPLS_BIT, + NETIF_F_GSO_TUNNEL_REMCSUM_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ @@ -118,7 +118,7 @@ enum { #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) -#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) +#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) @@ -181,7 +181,6 @@ enum { NETIF_F_GSO_IPIP | \ NETIF_F_GSO_SIT | \ NETIF_F_GSO_UDP_TUNNEL | \ - NETIF_F_GSO_UDP_TUNNEL_CSUM | \ - NETIF_F_GSO_MPLS) + NETIF_F_GSO_UDP_TUNNEL_CSUM) #endif /* _LINUX_NETDEV_FEATURES_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 74fd5d37f15a..c31f74d76ebd 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -57,6 +57,8 @@ struct device; struct phy_device; /* 802.11 specific */ struct wireless_dev; +/* 802.15.4 specific */ +struct wpan_dev; void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); @@ -314,6 +316,7 @@ struct napi_struct { struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; + struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; @@ -386,6 +389,7 @@ typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); void __napi_schedule(struct napi_struct *n); +void __napi_schedule_irqoff(struct napi_struct *n); static inline bool napi_disable_pending(struct napi_struct *n) { @@ -420,6 +424,18 @@ static inline void napi_schedule(struct napi_struct *n) __napi_schedule(n); } +/** + * napi_schedule_irqoff - schedule NAPI poll + * @n: napi context + * + * Variant of napi_schedule(), assuming hard irqs are masked. + */ +static inline void napi_schedule_irqoff(struct napi_struct *n) +{ + if (napi_schedule_prep(n)) + __napi_schedule_irqoff(n); +} + /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ static inline bool napi_reschedule(struct napi_struct *napi) { @@ -430,14 +446,19 @@ static inline bool napi_reschedule(struct napi_struct *napi) return false; } +void __napi_complete(struct napi_struct *n); +void napi_complete_done(struct napi_struct *n, int work_done); /** * napi_complete - NAPI processing complete * @n: napi context * * Mark NAPI processing as complete. + * Consider using napi_complete_done() instead. */ -void __napi_complete(struct napi_struct *n); -void napi_complete(struct napi_struct *n); +static inline void napi_complete(struct napi_struct *n) +{ + return napi_complete_done(n, 0); +} /** * napi_by_id - lookup a NAPI by napi_id @@ -472,14 +493,7 @@ void napi_hash_del(struct napi_struct *napi); * Stop NAPI from being scheduled on this context. * Waits till any outstanding processing completes. */ -static inline void napi_disable(struct napi_struct *n) -{ - might_sleep(); - set_bit(NAPI_STATE_DISABLE, &n->state); - while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) - msleep(1); - clear_bit(NAPI_STATE_DISABLE, &n->state); -} +void napi_disable(struct napi_struct *n); /** * napi_enable - enable NAPI scheduling @@ -740,13 +754,13 @@ struct netdev_fcoe_hbainfo { }; #endif -#define MAX_PHYS_PORT_ID_LEN 32 +#define MAX_PHYS_ITEM_ID_LEN 32 -/* This structure holds a unique identifier to identify the - * physical port used by a netdevice. +/* This structure holds a unique identifier to identify some + * physical item (port for example) used by a netdevice. */ -struct netdev_phys_port_id { - unsigned char id[MAX_PHYS_PORT_ID_LEN]; +struct netdev_phys_item_id { + unsigned char id[MAX_PHYS_ITEM_ID_LEN]; unsigned char id_len; }; @@ -937,11 +951,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, - * const unsigned char *addr, u16 flags) + * const unsigned char *addr, u16 vid, u16 flags) * Adds an FDB entry to dev for addr. * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, - * const unsigned char *addr) + * const unsigned char *addr, u16 vid) * Deletes the FDB entry from dev coresponding to addr. * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, * struct net_device *dev, struct net_device *filter_dev, @@ -962,7 +976,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. * * int (*ndo_get_phys_port_id)(struct net_device *dev, - * struct netdev_phys_port_id *ppid); + * struct netdev_phys_item_id *ppid); * Called to get ID of physical port of this device. If driver does * not implement this, it is assumed that the hw is not able to have * multiple net devices on single physical port. @@ -1004,6 +1018,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * performing GSO on a packet. The device returns true if it is * able to GSO the packet, false otherwise. If the return value is * false the stack will do software GSO. + * + * int (*ndo_switch_parent_id_get)(struct net_device *dev, + * struct netdev_phys_item_id *psid); + * Called to get an ID of the switch chip this port is part of. + * If driver implements this, it indicates that it represents a port + * of a switch chip. + * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state); + * Called to notify switch device port of bridge port STP + * state change. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1114,11 +1137,13 @@ struct net_device_ops { struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, + u16 vid, u16 flags); int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr); + const unsigned char *addr, + u16 vid); int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, @@ -1136,7 +1161,7 @@ struct net_device_ops { int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); int (*ndo_get_phys_port_id)(struct net_device *dev, - struct netdev_phys_port_id *ppid); + struct netdev_phys_item_id *ppid); void (*ndo_add_vxlan_port)(struct net_device *dev, sa_family_t sa_family, __be16 port); @@ -1155,6 +1180,12 @@ struct net_device_ops { int (*ndo_get_lock_subclass)(struct net_device *dev); bool (*ndo_gso_check) (struct sk_buff *skb, struct net_device *dev); +#ifdef CONFIG_NET_SWITCHDEV + int (*ndo_switch_parent_id_get)(struct net_device *dev, + struct netdev_phys_item_id *psid); + int (*ndo_switch_port_stp_update)(struct net_device *dev, + u8 state); +#endif }; /** @@ -1216,6 +1247,8 @@ enum netdev_priv_flags { IFF_LIVE_ADDR_CHANGE = 1<<20, IFF_MACVLAN = 1<<21, IFF_XMIT_DST_RELEASE_PERM = 1<<22, + IFF_IPVLAN_MASTER = 1<<23, + IFF_IPVLAN_SLAVE = 1<<24, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1241,6 +1274,8 @@ enum netdev_priv_flags { #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE #define IFF_MACVLAN IFF_MACVLAN #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM +#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER +#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE /** * struct net_device - The DEVICE structure. @@ -1572,6 +1607,7 @@ struct net_device { struct inet6_dev __rcu *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; + struct wpan_dev *ieee802154_ptr; /* * Cache lines mostly used on receive path (including eth_type_trans()) @@ -1590,6 +1626,7 @@ struct net_device { #endif + unsigned long gro_flush_timeout; rx_handler_func_t __rcu *rx_handler; void __rcu *rx_handler_data; @@ -2316,10 +2353,7 @@ extern int netdev_flow_limit_table_len; * Incoming packets are placed on per-cpu queues */ struct softnet_data { - struct Qdisc *output_queue; - struct Qdisc **output_queue_tailp; struct list_head poll_list; - struct sk_buff *completion_queue; struct sk_buff_head process_queue; /* stats */ @@ -2327,10 +2361,17 @@ struct softnet_data { unsigned int time_squeeze; unsigned int cpu_collision; unsigned int received_rps; - #ifdef CONFIG_RPS struct softnet_data *rps_ipi_list; +#endif +#ifdef CONFIG_NET_FLOW_LIMIT + struct sd_flow_limit __rcu *flow_limit; +#endif + struct Qdisc *output_queue; + struct Qdisc **output_queue_tailp; + struct sk_buff *completion_queue; +#ifdef CONFIG_RPS /* Elements below can be accessed between CPUs for RPS */ struct call_single_data csd ____cacheline_aligned_in_smp; struct softnet_data *rps_ipi_next; @@ -2342,9 +2383,6 @@ struct softnet_data { struct sk_buff_head input_pkt_queue; struct napi_struct backlog; -#ifdef CONFIG_NET_FLOW_LIMIT - struct sd_flow_limit __rcu *flow_limit; -#endif }; static inline void input_queue_head_incr(struct softnet_data *sd) @@ -2748,23 +2786,6 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev, } #endif -static inline int netif_copy_real_num_queues(struct net_device *to_dev, - const struct net_device *from_dev) -{ - int err; - - err = netif_set_real_num_tx_queues(to_dev, - from_dev->real_num_tx_queues); - if (err) - return err; -#ifdef CONFIG_SYSFS - return netif_set_real_num_rx_queues(to_dev, - from_dev->real_num_rx_queues); -#else - return 0; -#endif -} - #ifdef CONFIG_SYSFS static inline unsigned int get_netdev_rx_queue_index( struct netdev_rx_queue *queue) @@ -2864,7 +2885,7 @@ void dev_set_group(struct net_device *, int); int dev_set_mac_address(struct net_device *, struct sockaddr *); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, - struct netdev_phys_port_id *ppid); + struct netdev_phys_item_id *ppid); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@ -3425,6 +3446,12 @@ void netdev_upper_dev_unlink(struct net_device *dev, void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); + +/* RSS keys are 40 or 52 bytes long */ +#define NETDEV_RSS_KEY_LEN 52 +extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; +void netdev_rss_key_fill(void *buffer, size_t len); + int dev_get_nest_level(struct net_device *dev, bool (*type_check)(struct net_device *dev)); int skb_checksum_help(struct sk_buff *skb); @@ -3569,7 +3596,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; } @@ -3614,6 +3641,21 @@ static inline bool netif_is_macvlan(struct net_device *dev) return dev->priv_flags & IFF_MACVLAN; } +static inline bool netif_is_macvlan_port(struct net_device *dev) +{ + return dev->priv_flags & IFF_MACVLAN_PORT; +} + +static inline bool netif_is_ipvlan(struct net_device *dev) +{ + return dev->priv_flags & IFF_IPVLAN_SLAVE; +} + +static inline bool netif_is_ipvlan_port(struct net_device *dev) +{ + return dev->priv_flags & IFF_IPVLAN_MASTER; +} + static inline bool netif_is_bond_master(struct net_device *dev) { return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 356acc2846fd..022b761dbf0a 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -490,6 +490,8 @@ enum { /* nfs42 */ NFSPROC4_CLNT_SEEK, + NFSPROC4_CLNT_ALLOCATE, + NFSPROC4_CLNT_DEALLOCATE, }; /* nfs41 types */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index c72d1ad41ad4..6d627b92df53 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -163,7 +163,7 @@ struct nfs_inode { */ __be32 cookieverf[2]; - unsigned long npages; + unsigned long nrequests; struct nfs_mds_commit_info commit_info; /* Open contexts for shared mmap writes */ @@ -520,7 +520,7 @@ extern void nfs_commit_free(struct nfs_commit_data *data); static inline int nfs_have_writebacks(struct inode *inode) { - return NFS_I(inode)->npages != 0; + return NFS_I(inode)->nrequests != 0; } /* diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index a32ba0d7a98f..1e37fbb78f7a 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -231,5 +231,7 @@ struct nfs_server { #define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17) #define NFS_CAP_SECURITY_LABEL (1U << 18) #define NFS_CAP_SEEK (1U << 19) +#define NFS_CAP_ALLOCATE (1U << 20) +#define NFS_CAP_DEALLOCATE (1U << 21) #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 47ebb4fafd87..467c84efb596 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1243,6 +1243,20 @@ nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) #endif /* CONFIG_NFS_V4_1 */ #ifdef CONFIG_NFS_V4_2 +struct nfs42_falloc_args { + struct nfs4_sequence_args seq_args; + + struct nfs_fh *falloc_fh; + nfs4_stateid falloc_stateid; + u64 falloc_offset; + u64 falloc_length; +}; + +struct nfs42_falloc_res { + struct nfs4_sequence_res seq_res; + unsigned int status; +}; + struct nfs42_seek_args { struct nfs4_sequence_args seq_args; diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h index 20163b9a0eae..167342c2ce6b 100644 --- a/include/linux/nl802154.h +++ b/include/linux/nl802154.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * */ #ifndef NL802154_H diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h new file mode 100644 index 000000000000..85a5c8c16be9 --- /dev/null +++ b/include/linux/ns_common.h @@ -0,0 +1,12 @@ +#ifndef _LINUX_NS_COMMON_H +#define _LINUX_NS_COMMON_H + +struct proc_ns_operations; + +struct ns_common { + atomic_long_t stashed; + const struct proc_ns_operations *ops; + unsigned int inum; +}; + +#endif diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 2bf403195c09..258945fcabf1 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -19,6 +19,7 @@ #include <linux/pci.h> #include <linux/miscdevice.h> #include <linux/kref.h> +#include <linux/blk-mq.h> struct nvme_bar { __u64 cap; /* Controller Capabilities */ @@ -38,6 +39,7 @@ struct nvme_bar { #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) +#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) enum { NVME_CC_ENABLE = 1 << 0, @@ -70,8 +72,10 @@ extern unsigned char nvme_io_timeout; */ struct nvme_dev { struct list_head node; - struct nvme_queue __rcu **queues; - unsigned short __percpu *io_queue; + struct nvme_queue **queues; + struct request_queue *admin_q; + struct blk_mq_tag_set tagset; + struct blk_mq_tag_set admin_tagset; u32 __iomem *dbs; struct pci_dev *pci_dev; struct dma_pool *prp_page_pool; @@ -90,15 +94,16 @@ struct nvme_dev { struct miscdevice miscdev; work_func_t reset_workfn; struct work_struct reset_work; - struct work_struct cpu_work; char name[12]; char serial[20]; char model[40]; char firmware_rev[8]; u32 max_hw_sectors; u32 stripe_size; + u32 page_size; u16 oncs; u16 abort_limit; + u8 event_limit; u8 vwc; u8 initialized; }; @@ -132,7 +137,6 @@ struct nvme_iod { int offset; /* Of PRP list */ int nents; /* Used in scatterlist */ int length; /* Of data, in bytes */ - unsigned long start_time; dma_addr_t first_dma; struct list_head node; struct scatterlist sg[0]; @@ -150,12 +154,14 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) */ void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod); -int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t); +int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t); struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, unsigned long addr, unsigned length); void nvme_unmap_user_pages(struct nvme_dev *dev, int write, struct nvme_iod *iod); -int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *); +int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *, + struct nvme_command *, u32 *); +int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns); int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, u32 *result); int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns, diff --git a/include/linux/of.h b/include/linux/of.h index c55b50018ac4..dfde07e77a63 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -23,6 +23,8 @@ #include <linux/spinlock.h> #include <linux/topology.h> #include <linux/notifier.h> +#include <linux/property.h> +#include <linux/list.h> #include <asm/byteorder.h> #include <asm/errno.h> @@ -49,14 +51,13 @@ struct device_node { const char *type; phandle phandle; const char *full_name; + struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; /* removed properties */ struct device_node *parent; struct device_node *child; struct device_node *sibling; - struct device_node *next; /* next device of same type */ - struct device_node *allnext; /* next in list of all nodes */ struct kobject kobj; unsigned long _flags; void *data; @@ -74,11 +75,18 @@ struct of_phandle_args { uint32_t args[MAX_PHANDLE_ARGS]; }; +struct of_reconfig_data { + struct device_node *dn; + struct property *prop; + struct property *old_prop; +}; + /* initialize a node */ extern struct kobj_type of_node_ktype; static inline void of_node_init(struct device_node *node) { kobject_init(&node->kobj, &of_node_ktype); + node->fwnode.type = FWNODE_OF; } /* true when node is initialized */ @@ -105,18 +113,27 @@ static inline struct device_node *of_node_get(struct device_node *node) static inline void of_node_put(struct device_node *node) { } #endif /* !CONFIG_OF_DYNAMIC */ -#ifdef CONFIG_OF - /* Pointer for first entry in chain of all nodes. */ -extern struct device_node *of_allnodes; +extern struct device_node *of_root; extern struct device_node *of_chosen; extern struct device_node *of_aliases; extern struct device_node *of_stdout; extern raw_spinlock_t devtree_lock; +#ifdef CONFIG_OF +static inline bool is_of_node(struct fwnode_handle *fwnode) +{ + return fwnode && fwnode->type == FWNODE_OF; +} + +static inline struct device_node *of_node(struct fwnode_handle *fwnode) +{ + return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL; +} + static inline bool of_have_populated_dt(void) { - return of_allnodes != NULL; + return of_root != NULL; } static inline bool of_node_is_root(const struct device_node *node) @@ -160,6 +177,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag clear_bit(flag, &p->_flags); } +extern struct device_node *__of_find_all_nodes(struct device_node *prev); extern struct device_node *of_find_all_nodes(struct device_node *prev); /* @@ -215,8 +233,9 @@ static inline const char *of_node_full_name(const struct device_node *np) return np ? np->full_name : "<no-node>"; } -#define for_each_of_allnodes(dn) \ - for (dn = of_allnodes; dn; dn = dn->allnext) +#define for_each_of_allnodes_from(from, dn) \ + for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn)) +#define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn) extern struct device_node *of_find_node_by_name(struct device_node *from, const char *name); extern struct device_node *of_find_node_by_type(struct device_node *from, @@ -228,7 +247,13 @@ extern struct device_node *of_find_matching_node_and_match( const struct of_device_id *matches, const struct of_device_id **match); -extern struct device_node *of_find_node_by_path(const char *path); +extern struct device_node *of_find_node_opts_by_path(const char *path, + const char **opts); +static inline struct device_node *of_find_node_by_path(const char *path) +{ + return of_find_node_opts_by_path(path, NULL); +} + extern struct device_node *of_find_node_by_phandle(phandle handle); extern struct device_node *of_get_parent(const struct device_node *node); extern struct device_node *of_get_next_parent(struct device_node *node); @@ -263,6 +288,10 @@ extern int of_property_read_u32_array(const struct device_node *np, size_t sz); extern int of_property_read_u64(const struct device_node *np, const char *propname, u64 *out_value); +extern int of_property_read_u64_array(const struct device_node *np, + const char *propname, + u64 *out_values, + size_t sz); extern int of_property_read_string(struct device_node *np, const char *propname, @@ -275,7 +304,7 @@ extern int of_property_read_string_helper(struct device_node *np, const char **out_strs, size_t sz, int index); extern int of_device_is_compatible(const struct device_node *device, const char *); -extern int of_device_is_available(const struct device_node *device); +extern bool of_device_is_available(const struct device_node *device); extern const void *of_get_property(const struct device_node *node, const char *name, int *lenp); @@ -317,16 +346,6 @@ extern int of_update_property(struct device_node *np, struct property *newprop); #define OF_RECONFIG_REMOVE_PROPERTY 0x0004 #define OF_RECONFIG_UPDATE_PROPERTY 0x0005 -struct of_prop_reconfig { - struct device_node *dn; - struct property *prop; - struct property *old_prop; -}; - -extern int of_reconfig_notifier_register(struct notifier_block *); -extern int of_reconfig_notifier_unregister(struct notifier_block *); -extern int of_reconfig_notify(unsigned long, void *); - extern int of_attach_node(struct device_node *); extern int of_detach_node(struct device_node *); @@ -355,6 +374,16 @@ bool of_console_check(struct device_node *dn, char *name, int index); #else /* CONFIG_OF */ +static inline bool is_of_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct device_node *of_node(struct fwnode_handle *fwnode) +{ + return NULL; +} + static inline const char* of_node_full_name(const struct device_node *np) { return "<no-node>"; @@ -385,6 +414,12 @@ static inline struct device_node *of_find_node_by_path(const char *path) return NULL; } +static inline struct device_node *of_find_node_opts_by_path(const char *path, + const char **opts) +{ + return NULL; +} + static inline struct device_node *of_get_parent(const struct device_node *node) { return NULL; @@ -426,9 +461,9 @@ static inline int of_device_is_compatible(const struct device_node *device, return 0; } -static inline int of_device_is_available(const struct device_node *device) +static inline bool of_device_is_available(const struct device_node *device) { - return 0; + return false; } static inline struct property *of_find_property(const struct device_node *np, @@ -477,6 +512,13 @@ static inline int of_property_read_u32_array(const struct device_node *np, return -ENOSYS; } +static inline int of_property_read_u64_array(const struct device_node *np, + const char *propname, + u64 *out_values, size_t sz) +{ + return -ENOSYS; +} + static inline int of_property_read_string(struct device_node *np, const char *propname, const char **out_string) @@ -760,6 +802,13 @@ static inline int of_property_read_u32(const struct device_node *np, return of_property_read_u32_array(np, propname, out_value, 1); } +static inline int of_property_read_s32(const struct device_node *np, + const char *propname, + s32 *out_value) +{ + return of_property_read_u32(np, propname, (u32*) out_value); +} + #define of_property_for_each_u32(np, propname, prop, p, u) \ for (prop = of_find_property(np, propname, NULL), \ p = of_prop_next_u32(prop, NULL, &u); \ @@ -828,7 +877,7 @@ static inline int of_get_available_child_count(const struct device_node *np) = { .compatible = compat, \ .data = (fn == (fn_type)NULL) ? fn : fn } #else -#define _OF_DECLARE(table, name, compat, fn, fn_type) \ +#define _OF_DECLARE(table, name, compat, fn, fn_type) \ static const struct of_device_id __of_table_##name \ __attribute__((unused)) \ = { .compatible = compat, \ @@ -879,7 +928,19 @@ struct of_changeset { struct list_head entries; }; +enum of_reconfig_change { + OF_RECONFIG_NO_CHANGE = 0, + OF_RECONFIG_CHANGE_ADD, + OF_RECONFIG_CHANGE_REMOVE, +}; + #ifdef CONFIG_OF_DYNAMIC +extern int of_reconfig_notifier_register(struct notifier_block *); +extern int of_reconfig_notifier_unregister(struct notifier_block *); +extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd); +extern int of_reconfig_get_state_change(unsigned long action, + struct of_reconfig_data *arg); + extern void of_changeset_init(struct of_changeset *ocs); extern void of_changeset_destroy(struct of_changeset *ocs); extern int of_changeset_apply(struct of_changeset *ocs); @@ -917,7 +978,26 @@ static inline int of_changeset_update_property(struct of_changeset *ocs, { return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop); } -#endif +#else /* CONFIG_OF_DYNAMIC */ +static inline int of_reconfig_notifier_register(struct notifier_block *nb) +{ + return -EINVAL; +} +static inline int of_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return -EINVAL; +} +static inline int of_reconfig_notify(unsigned long action, + struct of_reconfig_data *arg) +{ + return -EINVAL; +} +static inline int of_reconfig_get_state_change(unsigned long action, + struct of_reconfig_data *arg) +{ + return -EINVAL; +} +#endif /* CONFIG_OF_DYNAMIC */ /* CONFIG_OF_RESOLVE api */ extern int of_resolve_phandles(struct device_node *tree); @@ -933,4 +1013,34 @@ static inline bool of_device_is_system_power_controller(const struct device_node return of_property_read_bool(np, "system-power-controller"); } +/** + * Overlay support + */ + +#ifdef CONFIG_OF_OVERLAY + +/* ID based overlays; the API for external users */ +int of_overlay_create(struct device_node *tree); +int of_overlay_destroy(int id); +int of_overlay_destroy_all(void); + +#else + +static inline int of_overlay_create(struct device_node *tree) +{ + return -ENOTSUPP; +} + +static inline int of_overlay_destroy(int id) +{ + return -ENOTSUPP; +} + +static inline int of_overlay_destroy_all(void) +{ + return -ENOTSUPP; +} + +#endif + #endif /* _LINUX_OF_H */ diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 8cb14eb393d6..d88e81be6368 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -106,7 +106,7 @@ extern int of_address_to_resource(struct device_node *dev, int index, struct resource *r); void __iomem *of_iomap(struct device_node *node, int index); void __iomem *of_io_request_and_map(struct device_node *device, - int index, char *name); + int index, const char *name); #else #include <linux/io.h> @@ -123,7 +123,7 @@ static inline void __iomem *of_iomap(struct device_node *device, int index) } static inline void __iomem *of_io_request_and_map(struct device_node *device, - int index, char *name) + int index, const char *name) { return IOMEM_ERR_PTR(-EINVAL); } diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index 51a560f34bca..16c75547d725 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h @@ -1,12 +1,19 @@ #ifndef __OF_IOMMU_H #define __OF_IOMMU_H +#include <linux/device.h> +#include <linux/iommu.h> +#include <linux/of.h> + #ifdef CONFIG_OF_IOMMU extern int of_get_dma_window(struct device_node *dn, const char *prefix, int index, unsigned long *busno, dma_addr_t *addr, size_t *size); +extern void of_iommu_init(void); +extern struct iommu_ops *of_iommu_configure(struct device *dev); + #else static inline int of_get_dma_window(struct device_node *dn, const char *prefix, @@ -16,6 +23,22 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix, return -EINVAL; } +static inline void of_iommu_init(void) { } +static inline struct iommu_ops *of_iommu_configure(struct device *dev) +{ + return NULL; +} + #endif /* CONFIG_OF_IOMMU */ +void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops); +struct iommu_ops *of_iommu_get_ops(struct device_node *np); + +extern struct of_device_id __iommu_of_table; + +typedef int (*of_iommu_init_fn)(struct device_node *); + +#define IOMMU_OF_DECLARE(name, compat, fn) \ + _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn) + #endif /* __OF_IOMMU_H */ diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h index c65a18a0cfdf..7e09244bb679 100644 --- a/include/linux/of_pdt.h +++ b/include/linux/of_pdt.h @@ -39,7 +39,6 @@ extern void *prom_early_alloc(unsigned long size); /* for building the device tree */ extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); -extern void (*of_pdt_build_more)(struct device_node *dp, - struct device_node ***nextp); +extern void (*of_pdt_build_more)(struct device_node *dp); #endif /* _LINUX_OF_PDT_H */ diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index c2b0627a2317..8a860f096c35 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -84,4 +84,10 @@ static inline int of_platform_populate(struct device_node *root, static inline void of_platform_depopulate(struct device *parent) { } #endif +#ifdef CONFIG_OF_DYNAMIC +extern void of_platform_register_reconfig_notifier(void); +#else +static inline void of_platform_register_reconfig_notifier(void) { } +#endif + #endif /* _LINUX_OF_PLATFORM_H */ diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h index f8322d9cd235..587bbdd31f5a 100644 --- a/include/linux/omap-mailbox.h +++ b/include/linux/omap-mailbox.h @@ -10,20 +10,20 @@ #define OMAP_MAILBOX_H typedef u32 mbox_msg_t; -struct omap_mbox; typedef int __bitwise omap_mbox_irq_t; #define IRQ_TX ((__force omap_mbox_irq_t) 1) #define IRQ_RX ((__force omap_mbox_irq_t) 2) -int omap_mbox_msg_send(struct omap_mbox *, mbox_msg_t msg); +struct mbox_chan; +struct mbox_client; -struct omap_mbox *omap_mbox_get(const char *, struct notifier_block *nb); -void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb); +struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl, + const char *chan_name); -void omap_mbox_save_ctx(struct omap_mbox *mbox); -void omap_mbox_restore_ctx(struct omap_mbox *mbox); -void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq); -void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq); +void omap_mbox_save_ctx(struct mbox_chan *chan); +void omap_mbox_restore_ctx(struct mbox_chan *chan); +void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); +void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); #endif /* OMAP_MAILBOX_H */ diff --git a/include/linux/oom.h b/include/linux/oom.h index e8d6e1058723..853698c721f7 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -92,6 +92,17 @@ static inline bool oom_gfp_allowed(gfp_t gfp_mask) extern struct task_struct *find_lock_task_mm(struct task_struct *p); +static inline bool task_will_free_mem(struct task_struct *task) +{ + /* + * A coredumping process may sleep for an extended period in exit_mm(), + * so the oom killer cannot assume that the process will promptly exit + * and release memory. + */ + return (task->flags & PF_EXITING) && + !(task->signal->flags & SIGNAL_GROUP_COREDUMP); +} + /* sysctls */ extern int sysctl_oom_dump_tasks; extern int sysctl_oom_kill_allocating_task; diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h deleted file mode 100644 index 22691f614043..000000000000 --- a/include/linux/page-debug-flags.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef LINUX_PAGE_DEBUG_FLAGS_H -#define LINUX_PAGE_DEBUG_FLAGS_H - -/* - * page->debug_flags bits: - * - * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to - * implement generic debug pagealloc feature. The pages are filled with - * poison patterns and set this flag after free_pages(). The poisoned - * pages are verified whether the patterns are not corrupted and clear - * the flag before alloc_pages(). - */ - -enum page_debug_flags { - PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */ - PAGE_DEBUG_FLAG_GUARD, -}; - -/* - * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably - * gets turned off when no debug features are enabling it! - */ - -#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS -#if !defined(CONFIG_PAGE_POISONING) && \ - !defined(CONFIG_PAGE_GUARD) \ -/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */ -#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features! -#endif -#endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */ - -#endif /* LINUX_PAGE_DEBUG_FLAGS_H */ diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h deleted file mode 100644 index 5c831f1eca79..000000000000 --- a/include/linux/page_cgroup.h +++ /dev/null @@ -1,105 +0,0 @@ -#ifndef __LINUX_PAGE_CGROUP_H -#define __LINUX_PAGE_CGROUP_H - -enum { - /* flags for mem_cgroup */ - PCG_USED = 0x01, /* This page is charged to a memcg */ - PCG_MEM = 0x02, /* This page holds a memory charge */ - PCG_MEMSW = 0x04, /* This page holds a memory+swap charge */ -}; - -struct pglist_data; - -#ifdef CONFIG_MEMCG -struct mem_cgroup; - -/* - * Page Cgroup can be considered as an extended mem_map. - * A page_cgroup page is associated with every page descriptor. The - * page_cgroup helps us identify information about the cgroup - * All page cgroups are allocated at boot or memory hotplug event, - * then the page cgroup for pfn always exists. - */ -struct page_cgroup { - unsigned long flags; - struct mem_cgroup *mem_cgroup; -}; - -extern void pgdat_page_cgroup_init(struct pglist_data *pgdat); - -#ifdef CONFIG_SPARSEMEM -static inline void page_cgroup_init_flatmem(void) -{ -} -extern void page_cgroup_init(void); -#else -extern void page_cgroup_init_flatmem(void); -static inline void page_cgroup_init(void) -{ -} -#endif - -struct page_cgroup *lookup_page_cgroup(struct page *page); - -static inline int PageCgroupUsed(struct page_cgroup *pc) -{ - return !!(pc->flags & PCG_USED); -} -#else /* !CONFIG_MEMCG */ -struct page_cgroup; - -static inline void pgdat_page_cgroup_init(struct pglist_data *pgdat) -{ -} - -static inline struct page_cgroup *lookup_page_cgroup(struct page *page) -{ - return NULL; -} - -static inline void page_cgroup_init(void) -{ -} - -static inline void page_cgroup_init_flatmem(void) -{ -} -#endif /* CONFIG_MEMCG */ - -#include <linux/swap.h> - -#ifdef CONFIG_MEMCG_SWAP -extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, - unsigned short old, unsigned short new); -extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); -extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); -extern int swap_cgroup_swapon(int type, unsigned long max_pages); -extern void swap_cgroup_swapoff(int type); -#else - -static inline -unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) -{ - return 0; -} - -static inline -unsigned short lookup_swap_cgroup_id(swp_entry_t ent) -{ - return 0; -} - -static inline int -swap_cgroup_swapon(int type, unsigned long max_pages) -{ - return 0; -} - -static inline void swap_cgroup_swapoff(int type) -{ - return; -} - -#endif /* CONFIG_MEMCG_SWAP */ - -#endif /* __LINUX_PAGE_CGROUP_H */ diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h new file mode 100644 index 000000000000..955421575d16 --- /dev/null +++ b/include/linux/page_counter.h @@ -0,0 +1,51 @@ +#ifndef _LINUX_PAGE_COUNTER_H +#define _LINUX_PAGE_COUNTER_H + +#include <linux/atomic.h> +#include <linux/kernel.h> +#include <asm/page.h> + +struct page_counter { + atomic_long_t count; + unsigned long limit; + struct page_counter *parent; + + /* legacy */ + unsigned long watermark; + unsigned long failcnt; +}; + +#if BITS_PER_LONG == 32 +#define PAGE_COUNTER_MAX LONG_MAX +#else +#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE) +#endif + +static inline void page_counter_init(struct page_counter *counter, + struct page_counter *parent) +{ + atomic_long_set(&counter->count, 0); + counter->limit = PAGE_COUNTER_MAX; + counter->parent = parent; +} + +static inline unsigned long page_counter_read(struct page_counter *counter) +{ + return atomic_long_read(&counter->count); +} + +void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); +void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); +int page_counter_try_charge(struct page_counter *counter, + unsigned long nr_pages, + struct page_counter **fail); +void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); +int page_counter_limit(struct page_counter *counter, unsigned long limit); +int page_counter_memparse(const char *buf, unsigned long *nr_pages); + +static inline void page_counter_reset_watermark(struct page_counter *counter) +{ + counter->watermark = page_counter_read(counter); +} + +#endif /* _LINUX_PAGE_COUNTER_H */ diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h new file mode 100644 index 000000000000..d2a2c84c72d0 --- /dev/null +++ b/include/linux/page_ext.h @@ -0,0 +1,84 @@ +#ifndef __LINUX_PAGE_EXT_H +#define __LINUX_PAGE_EXT_H + +#include <linux/types.h> +#include <linux/stacktrace.h> + +struct pglist_data; +struct page_ext_operations { + bool (*need)(void); + void (*init)(void); +}; + +#ifdef CONFIG_PAGE_EXTENSION + +/* + * page_ext->flags bits: + * + * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to + * implement generic debug pagealloc feature. The pages are filled with + * poison patterns and set this flag after free_pages(). The poisoned + * pages are verified whether the patterns are not corrupted and clear + * the flag before alloc_pages(). + */ + +enum page_ext_flags { + PAGE_EXT_DEBUG_POISON, /* Page is poisoned */ + PAGE_EXT_DEBUG_GUARD, + PAGE_EXT_OWNER, +}; + +/* + * Page Extension can be considered as an extended mem_map. + * A page_ext page is associated with every page descriptor. The + * page_ext helps us add more information about the page. + * All page_ext are allocated at boot or memory hotplug event, + * then the page_ext for pfn always exists. + */ +struct page_ext { + unsigned long flags; +#ifdef CONFIG_PAGE_OWNER + unsigned int order; + gfp_t gfp_mask; + struct stack_trace trace; + unsigned long trace_entries[8]; +#endif +}; + +extern void pgdat_page_ext_init(struct pglist_data *pgdat); + +#ifdef CONFIG_SPARSEMEM +static inline void page_ext_init_flatmem(void) +{ +} +extern void page_ext_init(void); +#else +extern void page_ext_init_flatmem(void); +static inline void page_ext_init(void) +{ +} +#endif + +struct page_ext *lookup_page_ext(struct page *page); + +#else /* !CONFIG_PAGE_EXTENSION */ +struct page_ext; + +static inline void pgdat_page_ext_init(struct pglist_data *pgdat) +{ +} + +static inline struct page_ext *lookup_page_ext(struct page *page) +{ + return NULL; +} + +static inline void page_ext_init(void) +{ +} + +static inline void page_ext_init_flatmem(void) +{ +} +#endif /* CONFIG_PAGE_EXTENSION */ +#endif /* __LINUX_PAGE_EXT_H */ diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h new file mode 100644 index 000000000000..b48c3471c254 --- /dev/null +++ b/include/linux/page_owner.h @@ -0,0 +1,38 @@ +#ifndef __LINUX_PAGE_OWNER_H +#define __LINUX_PAGE_OWNER_H + +#ifdef CONFIG_PAGE_OWNER +extern bool page_owner_inited; +extern struct page_ext_operations page_owner_ops; + +extern void __reset_page_owner(struct page *page, unsigned int order); +extern void __set_page_owner(struct page *page, + unsigned int order, gfp_t gfp_mask); + +static inline void reset_page_owner(struct page *page, unsigned int order) +{ + if (likely(!page_owner_inited)) + return; + + __reset_page_owner(page, order); +} + +static inline void set_page_owner(struct page *page, + unsigned int order, gfp_t gfp_mask) +{ + if (likely(!page_owner_inited)) + return; + + __set_page_owner(page, order, gfp_mask); +} +#else +static inline void reset_page_owner(struct page *page, unsigned int order) +{ +} +static inline void set_page_owner(struct page *page, + unsigned int order, gfp_t gfp_mask) +{ +} + +#endif /* CONFIG_PAGE_OWNER */ +#endif /* __LINUX_PAGE_OWNER_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index c888c5e4e225..360a966a97a5 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1005,6 +1005,8 @@ void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); int pci_save_state(struct pci_dev *dev); void pci_restore_state(struct pci_dev *dev); struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); +int pci_load_saved_state(struct pci_dev *dev, + struct pci_saved_state *state); int pci_load_and_free_saved_state(struct pci_dev *dev, struct pci_saved_state **state); struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 2706ee9a4327..8c7895061121 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -109,7 +109,6 @@ struct hotplug_slot { struct list_head slot_list; struct pci_slot *pci_slot; }; -#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj) static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 97fb9f69aaed..e63c02a93f6b 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -564,6 +564,7 @@ #define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 #define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 #define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 +#define PCI_DEVICE_ID_AMD_NL_USB 0x7912 #define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F #define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 #define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 420032d41d27..57f3a1c550dc 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -254,8 +254,6 @@ do { \ #endif /* CONFIG_SMP */ #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) -#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) -#define __get_cpu_var(var) (*this_cpu_ptr(&(var))) /* * Must be an lvalue. Since @var must be a simple identifier, diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 51ce60c35f4c..b4337646388b 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -128,10 +128,8 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) static inline bool __ref_is_percpu(struct percpu_ref *ref, unsigned long __percpu **percpu_countp) { - unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); - /* paired with smp_store_release() in percpu_ref_reinit() */ - smp_read_barrier_depends(); + unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr); /* * Theoretically, the following could test just ATOMIC; however, @@ -147,28 +145,42 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, } /** - * percpu_ref_get - increment a percpu refcount + * percpu_ref_get_many - increment a percpu refcount * @ref: percpu_ref to get + * @nr: number of references to get * - * Analagous to atomic_long_inc(). + * Analogous to atomic_long_add(). * * This function is safe to call as long as @ref is between init and exit. */ -static inline void percpu_ref_get(struct percpu_ref *ref) +static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) - this_cpu_inc(*percpu_count); + this_cpu_add(*percpu_count, nr); else - atomic_long_inc(&ref->count); + atomic_long_add(nr, &ref->count); rcu_read_unlock_sched(); } /** + * percpu_ref_get - increment a percpu refcount + * @ref: percpu_ref to get + * + * Analagous to atomic_long_inc(). + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_get(struct percpu_ref *ref) +{ + percpu_ref_get_many(ref, 1); +} + +/** * percpu_ref_tryget - try to increment a percpu refcount * @ref: percpu_ref to try-get * @@ -231,29 +243,44 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) } /** - * percpu_ref_put - decrement a percpu refcount + * percpu_ref_put_many - decrement a percpu refcount * @ref: percpu_ref to put + * @nr: number of references to put * * Decrement the refcount, and if 0, call the release function (which was passed * to percpu_ref_init()) * * This function is safe to call as long as @ref is between init and exit. */ -static inline void percpu_ref_put(struct percpu_ref *ref) +static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) - this_cpu_dec(*percpu_count); - else if (unlikely(atomic_long_dec_and_test(&ref->count))) + this_cpu_sub(*percpu_count, nr); + else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) ref->release(ref); rcu_read_unlock_sched(); } /** + * percpu_ref_put - decrement a percpu refcount + * @ref: percpu_ref to put + * + * Decrement the refcount, and if 0, call the release function (which was passed + * to percpu_ref_init()) + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_put(struct percpu_ref *ref) +{ + percpu_ref_put_many(ref, 1); +} + +/** * percpu_ref_is_zero - test whether a percpu refcount reached zero * @ref: percpu_ref to test * diff --git a/include/linux/percpu.h b/include/linux/percpu.h index a3aa63e47637..caebf2a758dc 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -5,6 +5,7 @@ #include <linux/preempt.h> #include <linux/smp.h> #include <linux/cpumask.h> +#include <linux/printk.h> #include <linux/pfn.h> #include <linux/init.h> @@ -134,4 +135,7 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ __alignof__(type)) +/* To avoid include hell, as printk can not declare this, we declare it here */ +DECLARE_PER_CPU(printk_func_t, printk_func); + #endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/phy.h b/include/linux/phy.h index d090cfcaa167..22af8f8f5802 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -433,6 +433,7 @@ struct phy_device { * by this PHY * flags: A bitfield defining certain other features this PHY * supports (like interrupts) + * driver_data: static driver data * * The drivers must implement config_aneg and read_status. All * other functions are optional. Note that none of these @@ -448,6 +449,7 @@ struct phy_driver { unsigned int phy_id_mask; u32 features; u32 flags; + const void *driver_data; /* * Called to issue a PHY software reset @@ -772,4 +774,28 @@ int __init mdio_bus_init(void); void mdio_bus_exit(void); extern struct bus_type mdio_bus_type; + +/** + * module_phy_driver() - Helper macro for registering PHY drivers + * @__phy_drivers: array of PHY drivers to register + * + * Helper macro for PHY drivers which do not do anything special in module + * init/exit. Each module may only use this macro once, and calling it + * replaces module_init() and module_exit(). + */ +#define phy_module_driver(__phy_drivers, __count) \ +static int __init phy_module_init(void) \ +{ \ + return phy_drivers_register(__phy_drivers, __count); \ +} \ +module_init(phy_module_init); \ +static void __exit phy_module_exit(void) \ +{ \ + phy_drivers_unregister(__phy_drivers, __count); \ +} \ +module_exit(phy_module_exit) + +#define module_phy_driver(__phy_drivers) \ + phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers)) + #endif /* __PHY_H */ diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index 8cb6f815475b..a0197fa1b116 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -61,7 +61,6 @@ struct phy { struct device dev; int id; const struct phy_ops *ops; - struct phy_init_data *init_data; struct mutex mutex; int init_count; int power_count; @@ -84,33 +83,14 @@ struct phy_provider { struct of_phandle_args *args); }; -/** - * struct phy_consumer - represents the phy consumer - * @dev_name: the device name of the controller that will use this PHY device - * @port: name given to the consumer port - */ -struct phy_consumer { - const char *dev_name; - const char *port; -}; - -/** - * struct phy_init_data - contains the list of PHY consumers - * @num_consumers: number of consumers for this PHY device - * @consumers: list of PHY consumers - */ -struct phy_init_data { - unsigned int num_consumers; - struct phy_consumer *consumers; +struct phy_lookup { + struct list_head node; + const char *dev_id; + const char *con_id; + struct phy *phy; }; -#define PHY_CONSUMER(_dev_name, _port) \ -{ \ - .dev_name = _dev_name, \ - .port = _port, \ -} - -#define to_phy(dev) (container_of((dev), struct phy, dev)) +#define to_phy(a) (container_of((a), struct phy, dev)) #define of_phy_provider_register(dev, xlate) \ __of_phy_provider_register((dev), THIS_MODULE, (xlate)) @@ -159,10 +139,9 @@ struct phy *of_phy_get(struct device_node *np, const char *con_id); struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args *args); struct phy *phy_create(struct device *dev, struct device_node *node, - const struct phy_ops *ops, - struct phy_init_data *init_data); + const struct phy_ops *ops); struct phy *devm_phy_create(struct device *dev, struct device_node *node, - const struct phy_ops *ops, struct phy_init_data *init_data); + const struct phy_ops *ops); void phy_destroy(struct phy *phy); void devm_phy_destroy(struct device *dev, struct phy *phy); struct phy_provider *__of_phy_provider_register(struct device *dev, @@ -174,6 +153,8 @@ struct phy_provider *__devm_of_phy_provider_register(struct device *dev, void of_phy_provider_unregister(struct phy_provider *phy_provider); void devm_of_phy_provider_unregister(struct device *dev, struct phy_provider *phy_provider); +int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id); +void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id); #else static inline int phy_pm_runtime_get(struct phy *phy) { @@ -301,16 +282,14 @@ static inline struct phy *of_phy_simple_xlate(struct device *dev, static inline struct phy *phy_create(struct device *dev, struct device_node *node, - const struct phy_ops *ops, - struct phy_init_data *init_data) + const struct phy_ops *ops) { return ERR_PTR(-ENOSYS); } static inline struct phy *devm_phy_create(struct device *dev, struct device_node *node, - const struct phy_ops *ops, - struct phy_init_data *init_data) + const struct phy_ops *ops) { return ERR_PTR(-ENOSYS); } @@ -345,6 +324,13 @@ static inline void devm_of_phy_provider_unregister(struct device *dev, struct phy_provider *phy_provider) { } +static inline int +phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id) +{ + return 0; +} +static inline void phy_remove_lookup(struct phy *phy, const char *con_id, + const char *dev_id) { } #endif #endif /* __DRIVERS_PHY_H */ diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index f2ca1b459377..7e75bfe37cc7 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -11,7 +11,7 @@ struct fixed_phy_status { struct device_node; -#ifdef CONFIG_FIXED_PHY +#if IS_ENABLED(CONFIG_FIXED_PHY) extern int fixed_phy_add(unsigned int irq, int phy_id, struct fixed_phy_status *status); extern struct phy_device *fixed_phy_register(unsigned int irq, diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 1997ffc295a7..b9cf6c51b181 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -8,6 +8,7 @@ #include <linux/threads.h> #include <linux/nsproxy.h> #include <linux/kref.h> +#include <linux/ns_common.h> struct pidmap { atomic_t nr_free; @@ -43,7 +44,7 @@ struct pid_namespace { kgid_t pid_gid; int hide_pid; int reboot; /* group exit code if this pidns was rebooted */ - unsigned int proc_inum; + struct ns_common ns; }; extern struct pid_namespace init_pid_ns; diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h index a6591c693ebb..5e0bc779e6c5 100644 --- a/include/linux/platform_data/asoc-s3c.h +++ b/include/linux/platform_data/asoc-s3c.h @@ -27,6 +27,7 @@ struct samsung_i2s { #define QUIRK_NO_MUXPSR (1 << 2) #define QUIRK_NEED_RSTCLR (1 << 3) #define QUIRK_SUPPORTS_TDM (1 << 4) +#define QUIRK_SUPPORTS_IDMA (1 << 5) /* Quirks of the I2S controller */ u32 quirks; dma_addr_t idma_addr; diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h new file mode 100644 index 000000000000..26af54321958 --- /dev/null +++ b/include/linux/platform_data/bcmgenet.h @@ -0,0 +1,18 @@ +#ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__ +#define __LINUX_PLATFORM_DATA_BCMGENET_H__ + +#include <linux/types.h> +#include <linux/if_ether.h> +#include <linux/phy.h> + +struct bcmgenet_platform_data { + bool mdio_enabled; + phy_interface_t phy_interface; + int phy_address; + int phy_speed; + int phy_duplex; + u8 mac_address[ETH_ALEN]; + int genet_version; +}; + +#endif diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index 6a1357d31871..7d964e787299 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h @@ -41,6 +41,7 @@ enum sdma_peripheral_type { IMX_DMATYPE_ESAI, /* ESAI */ IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ + IMX_DMATYPE_SAI, /* SAI */ }; enum imx_dma_prio { diff --git a/include/linux/platform_data/dwc3-exynos.h b/include/linux/platform_data/dwc3-exynos.h deleted file mode 100644 index 5eb7da9b3772..000000000000 --- a/include/linux/platform_data/dwc3-exynos.h +++ /dev/null @@ -1,24 +0,0 @@ -/** - * dwc3-exynos.h - Samsung EXYNOS DWC3 Specific Glue layer, header. - * - * Copyright (c) 2012 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * Author: Anton Tikhomirov <av.tikhomirov@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef _DWC3_EXYNOS_H_ -#define _DWC3_EXYNOS_H_ - -struct dwc3_exynos_data { - int phy_type; - int (*phy_init)(struct platform_device *pdev, int type); - int (*phy_exit)(struct platform_device *pdev, int type); -}; - -#endif /* _DWC3_EXYNOS_H_ */ diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h index 1b2ba24e4e03..9c7fd1efe495 100644 --- a/include/linux/platform_data/lp855x.h +++ b/include/linux/platform_data/lp855x.h @@ -136,6 +136,7 @@ struct lp855x_rom_data { Only valid when mode is PWM_BASED. * @size_program : total size of lp855x_rom_data * @rom_data : list of new eeprom/eprom registers + * @supply : regulator that supplies 3V input */ struct lp855x_platform_data { const char *name; @@ -144,6 +145,7 @@ struct lp855x_platform_data { unsigned int period_ns; int size_program; struct lp855x_rom_data *rom_data; + struct regulator *supply; }; #endif diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h deleted file mode 100644 index a5f045e1d8fe..000000000000 --- a/include/linux/platform_data/rcar-du.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * rcar_du.h -- R-Car Display Unit DRM driver - * - * Copyright (C) 2013 Renesas Corporation - * - * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __RCAR_DU_H__ -#define __RCAR_DU_H__ - -#include <video/videomode.h> - -enum rcar_du_output { - RCAR_DU_OUTPUT_DPAD0, - RCAR_DU_OUTPUT_DPAD1, - RCAR_DU_OUTPUT_LVDS0, - RCAR_DU_OUTPUT_LVDS1, - RCAR_DU_OUTPUT_TCON, - RCAR_DU_OUTPUT_MAX, -}; - -enum rcar_du_encoder_type { - RCAR_DU_ENCODER_UNUSED = 0, - RCAR_DU_ENCODER_NONE, - RCAR_DU_ENCODER_VGA, - RCAR_DU_ENCODER_LVDS, -}; - -struct rcar_du_panel_data { - unsigned int width_mm; /* Panel width in mm */ - unsigned int height_mm; /* Panel height in mm */ - struct videomode mode; -}; - -struct rcar_du_connector_lvds_data { - struct rcar_du_panel_data panel; -}; - -struct rcar_du_connector_vga_data { - /* TODO: Add DDC information for EDID retrieval */ -}; - -/* - * struct rcar_du_encoder_data - Encoder platform data - * @type: the encoder type (RCAR_DU_ENCODER_*) - * @output: the DU output the connector is connected to (RCAR_DU_OUTPUT_*) - * @connector.lvds: platform data for LVDS connectors - * @connector.vga: platform data for VGA connectors - * - * Encoder platform data describes an on-board encoder, its associated DU SoC - * output, and the connector. - */ -struct rcar_du_encoder_data { - enum rcar_du_encoder_type type; - enum rcar_du_output output; - - union { - struct rcar_du_connector_lvds_data lvds; - struct rcar_du_connector_vga_data vga; - } connector; -}; - -struct rcar_du_platform_data { - struct rcar_du_encoder_data *encoders; - unsigned int num_encoders; -}; - -#endif /* __RCAR_DU_H__ */ diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h index 1730312398ff..5087fff96d86 100644 --- a/include/linux/platform_data/st21nfca.h +++ b/include/linux/platform_data/st21nfca.h @@ -24,7 +24,6 @@ #define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" struct st21nfca_nfc_platform_data { - unsigned int gpio_irq; unsigned int gpio_ena; unsigned int irq_polarity; }; diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h index 2d11f1f5efab..c3b432f5b63e 100644 --- a/include/linux/platform_data/st21nfcb.h +++ b/include/linux/platform_data/st21nfcb.h @@ -24,7 +24,6 @@ #define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" struct st21nfcb_nfc_platform_data { - unsigned int gpio_irq; unsigned int gpio_reset; unsigned int irq_polarity; }; diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 153d303af7eb..ae4882ca4a64 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -197,8 +197,10 @@ extern void platform_driver_unregister(struct platform_driver *); /* non-hotpluggable platform devices may use this so that probe() and * its support may live in __init sections, conserving runtime memory. */ -extern int platform_driver_probe(struct platform_driver *driver, - int (*probe)(struct platform_device *)); +#define platform_driver_probe(drv, probe) \ + __platform_driver_probe(drv, probe, THIS_MODULE) +extern int __platform_driver_probe(struct platform_driver *driver, + int (*probe)(struct platform_device *), struct module *module); static inline void *platform_get_drvdata(const struct platform_device *pdev) { @@ -238,10 +240,12 @@ static void __exit __platform_driver##_exit(void) \ } \ module_exit(__platform_driver##_exit); -extern struct platform_device *platform_create_bundle( +#define platform_create_bundle(driver, probe, res, n_res, data, size) \ + __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE) +extern struct platform_device *__platform_create_bundle( struct platform_driver *driver, int (*probe)(struct platform_device *), struct resource *res, unsigned int n_res, - const void *data, size_t size); + const void *data, size_t size, struct module *module); /* early platform driver interface */ struct early_platform_driver { diff --git a/include/linux/plist.h b/include/linux/plist.h index 8b6c970cff6c..97883604a3c5 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -176,7 +176,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); * plist_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop counter * @head: the head for your list - * @mem: the name of the list_struct within the struct + * @mem: the name of the list_head within the struct */ #define plist_for_each_entry(pos, head, mem) \ list_for_each_entry(pos, &(head)->node_list, mem.node_list) @@ -185,7 +185,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); * plist_for_each_entry_continue - continue iteration over list of given type * @pos: the type * to use as a loop cursor * @head: the head for your list - * @m: the name of the list_struct within the struct + * @m: the name of the list_head within the struct * * Continue to iterate over list of given type, continuing after * the current position. @@ -198,7 +198,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); * @pos: the type * to use as a loop counter * @n: another type * to use as temporary storage * @head: the head for your list - * @m: the name of the list_struct within the struct + * @m: the name of the list_head within the struct * * Iterate over list of given type, safe against removal of list entry. */ @@ -229,7 +229,7 @@ static inline int plist_node_empty(const struct plist_node *node) * plist_first_entry - get the struct for the first entry * @head: the &struct plist_head pointer * @type: the type of the struct this is embedded in - * @member: the name of the list_struct within the struct + * @member: the name of the list_head within the struct */ #ifdef CONFIG_DEBUG_PI_LIST # define plist_first_entry(head, type, member) \ @@ -246,7 +246,7 @@ static inline int plist_node_empty(const struct plist_node *node) * plist_last_entry - get the struct for the last entry * @head: the &struct plist_head pointer * @type: the type of the struct this is embedded in - * @member: the name of the list_struct within the struct + * @member: the name of the list_head within the struct */ #ifdef CONFIG_DEBUG_PI_LIST # define plist_last_entry(head, type, member) \ diff --git a/include/linux/pm.h b/include/linux/pm.h index 383fd68aaee1..8b5976364619 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -342,7 +342,7 @@ struct dev_pm_ops { #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) #endif -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ .runtime_suspend = suspend_fn, \ .runtime_resume = resume_fn, \ @@ -351,15 +351,6 @@ struct dev_pm_ops { #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) #endif -#ifdef CONFIG_PM -#define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ - .runtime_suspend = suspend_fn, \ - .runtime_resume = resume_fn, \ - .runtime_idle = idle_fn, -#else -#define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) -#endif - /* * Use this if you want to use the same suspend and resume callbacks for suspend * to RAM and hibernation. @@ -538,11 +529,7 @@ enum rpm_request { }; struct wakeup_source; - -struct pm_domain_data { - struct list_head list_node; - struct device *dev; -}; +struct pm_domain_data; struct pm_subsys_data { spinlock_t lock; @@ -576,7 +563,7 @@ struct dev_pm_info { #else unsigned int should_wakeup:1; #endif -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h index 8348866e7b05..0b0039634410 100644 --- a/include/linux/pm_clock.h +++ b/include/linux/pm_clock.h @@ -18,6 +18,8 @@ struct pm_clk_notifier_block { char *con_ids[]; }; +struct clk; + #ifdef CONFIG_PM_CLK static inline bool pm_clk_no_clocks(struct device *dev) { @@ -29,6 +31,7 @@ extern void pm_clk_init(struct device *dev); extern int pm_clk_create(struct device *dev); extern void pm_clk_destroy(struct device *dev); extern int pm_clk_add(struct device *dev, const char *con_id); +extern int pm_clk_add_clk(struct device *dev, struct clk *clk); extern void pm_clk_remove(struct device *dev, const char *con_id); extern int pm_clk_suspend(struct device *dev); extern int pm_clk_resume(struct device *dev); @@ -51,6 +54,11 @@ static inline int pm_clk_add(struct device *dev, const char *con_id) { return -EINVAL; } + +static inline int pm_clk_add_clk(struct device *dev, struct clk *clk) +{ + return -EINVAL; +} static inline void pm_clk_remove(struct device *dev, const char *con_id) { } diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 2e0e06daf8c0..6cd20d5e651b 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -17,6 +17,9 @@ #include <linux/notifier.h> #include <linux/cpuidle.h> +/* Defines used for the flags field in the struct generic_pm_domain */ +#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ + enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ @@ -76,6 +79,7 @@ struct generic_pm_domain { struct device *dev); void (*detach_dev)(struct generic_pm_domain *domain, struct device *dev); + unsigned int flags; /* Bit field of configs for genpd */ }; static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) @@ -100,6 +104,11 @@ struct gpd_timing_data { bool cached_stop_ok; }; +struct pm_domain_data { + struct list_head list_node; + struct device *dev; +}; + struct generic_pm_domain_data { struct pm_domain_data base; struct gpd_timing_data td; @@ -147,6 +156,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd, extern int pm_genpd_poweron(struct generic_pm_domain *genpd); extern int pm_genpd_name_poweron(const char *domain_name); +extern void pm_genpd_poweroff_unused(void); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; @@ -221,6 +231,7 @@ static inline int pm_genpd_name_poweron(const char *domain_name) { return -ENOSYS; } +static inline void pm_genpd_poweroff_unused(void) {} #define simple_qos_governor NULL #define pm_domain_always_on_gov NULL #endif @@ -237,12 +248,6 @@ static inline int pm_genpd_name_add_device(const char *domain_name, return __pm_genpd_name_add_device(domain_name, dev, NULL); } -#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME -extern void pm_genpd_poweroff_unused(void); -#else -static inline void pm_genpd_poweroff_unused(void) {} -#endif - #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP extern void pm_genpd_syscore_poweroff(struct device *dev); extern void pm_genpd_syscore_poweron(struct device *dev); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 0330217abfad..cec2d4540914 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -21,7 +21,7 @@ struct dev_pm_opp; struct device; enum dev_pm_opp_event { - OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, + OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, }; #if defined(CONFIG_PM_OPP) @@ -44,6 +44,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt); +void dev_pm_opp_remove(struct device *dev, unsigned long freq); int dev_pm_opp_enable(struct device *dev, unsigned long freq); @@ -90,6 +91,10 @@ static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, return -EINVAL; } +static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) +{ +} + static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) { return 0; @@ -109,11 +114,16 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) int of_init_opp_table(struct device *dev); +void of_free_opp_table(struct device *dev); #else static inline int of_init_opp_table(struct device *dev) { return -EINVAL; } + +static inline void of_free_opp_table(struct device *dev) +{ +} #endif #endif /* __LINUX_OPP_H__ */ diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 636e82834506..7b3ae0cffc05 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -154,6 +154,23 @@ void dev_pm_qos_constraints_destroy(struct device *dev); int dev_pm_qos_add_ancestor_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value); +int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); +void dev_pm_qos_hide_latency_limit(struct device *dev); +int dev_pm_qos_expose_flags(struct device *dev, s32 value); +void dev_pm_qos_hide_flags(struct device *dev); +int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); +s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); +int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); + +static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) +{ + return dev->power.qos->resume_latency_req->data.pnode.prio; +} + +static inline s32 dev_pm_qos_requested_flags(struct device *dev) +{ + return dev->power.qos->flags_req->data.flr.flags; +} #else static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) @@ -200,27 +217,6 @@ static inline int dev_pm_qos_add_ancestor_request(struct device *dev, enum dev_pm_qos_req_type type, s32 value) { return 0; } -#endif - -#ifdef CONFIG_PM_RUNTIME -int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); -void dev_pm_qos_hide_latency_limit(struct device *dev); -int dev_pm_qos_expose_flags(struct device *dev, s32 value); -void dev_pm_qos_hide_flags(struct device *dev); -int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); -s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); -int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); - -static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) -{ - return dev->power.qos->resume_latency_req->data.pnode.prio; -} - -static inline s32 dev_pm_qos_requested_flags(struct device *dev) -{ - return dev->power.qos->flags_req->data.flr.flags; -} -#else static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) { return 0; } static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 367f49b9a1c9..30e84d48bfea 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -35,16 +35,6 @@ extern int pm_generic_runtime_suspend(struct device *dev); extern int pm_generic_runtime_resume(struct device *dev); extern int pm_runtime_force_suspend(struct device *dev); extern int pm_runtime_force_resume(struct device *dev); -#else -static inline bool queue_pm_work(struct work_struct *work) { return false; } - -static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } -static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } -static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } -static inline int pm_runtime_force_resume(struct device *dev) { return 0; } -#endif - -#ifdef CONFIG_PM_RUNTIME extern int __pm_runtime_idle(struct device *dev, int rpmflags); extern int __pm_runtime_suspend(struct device *dev, int rpmflags); @@ -128,7 +118,19 @@ static inline void pm_runtime_mark_last_busy(struct device *dev) ACCESS_ONCE(dev->power.last_busy) = jiffies; } -#else /* !CONFIG_PM_RUNTIME */ +static inline bool pm_runtime_is_irq_safe(struct device *dev) +{ + return dev->power.irq_safe; +} + +#else /* !CONFIG_PM */ + +static inline bool queue_pm_work(struct work_struct *work) { return false; } + +static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } +static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } +static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } +static inline int pm_runtime_force_resume(struct device *dev) { return 0; } static inline int __pm_runtime_idle(struct device *dev, int rpmflags) { @@ -167,6 +169,7 @@ static inline bool pm_runtime_enabled(struct device *dev) { return false; } static inline void pm_runtime_no_callbacks(struct device *dev) {} static inline void pm_runtime_irq_safe(struct device *dev) {} +static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; } static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } static inline void pm_runtime_mark_last_busy(struct device *dev) {} @@ -179,7 +182,7 @@ static inline unsigned long pm_runtime_autosuspend_expiration( static inline void pm_runtime_set_memalloc_noio(struct device *dev, bool enable){} -#endif /* !CONFIG_PM_RUNTIME */ +#endif /* !CONFIG_PM */ static inline int pm_runtime_idle(struct device *dev) { diff --git a/include/linux/printk.h b/include/linux/printk.h index d78125f73ac4..c8f170324e64 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -118,12 +118,13 @@ int no_printk(const char *fmt, ...) #ifdef CONFIG_EARLY_PRINTK extern asmlinkage __printf(1, 2) void early_printk(const char *fmt, ...); -void early_vprintk(const char *fmt, va_list ap); #else static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } #endif +typedef int(*printk_func_t)(const char *fmt, va_list args); + #ifdef CONFIG_PRINTK asmlinkage __printf(5, 0) int vprintk_emit(int facility, int level, diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h index 34a1e105bef4..42dfc615dbf8 100644 --- a/include/linux/proc_ns.h +++ b/include/linux/proc_ns.h @@ -4,21 +4,18 @@ #ifndef _LINUX_PROC_NS_H #define _LINUX_PROC_NS_H +#include <linux/ns_common.h> + struct pid_namespace; struct nsproxy; +struct path; struct proc_ns_operations { const char *name; int type; - void *(*get)(struct task_struct *task); - void (*put)(void *ns); - int (*install)(struct nsproxy *nsproxy, void *ns); - unsigned int (*inum)(void *ns); -}; - -struct proc_ns { - void *ns; - const struct proc_ns_operations *ns_ops; + struct ns_common *(*get)(struct task_struct *task); + void (*put)(struct ns_common *ns); + int (*install)(struct nsproxy *nsproxy, struct ns_common *ns); }; extern const struct proc_ns_operations netns_operations; @@ -43,32 +40,38 @@ enum { extern int pid_ns_prepare_proc(struct pid_namespace *ns); extern void pid_ns_release_proc(struct pid_namespace *ns); -extern struct file *proc_ns_fget(int fd); -extern struct proc_ns *get_proc_ns(struct inode *); extern int proc_alloc_inum(unsigned int *pino); extern void proc_free_inum(unsigned int inum); -extern bool proc_ns_inode(struct inode *inode); #else /* CONFIG_PROC_FS */ static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; } static inline void pid_ns_release_proc(struct pid_namespace *ns) {} -static inline struct file *proc_ns_fget(int fd) -{ - return ERR_PTR(-EINVAL); -} - -static inline struct proc_ns *get_proc_ns(struct inode *inode) { return NULL; } - static inline int proc_alloc_inum(unsigned int *inum) { *inum = 1; return 0; } static inline void proc_free_inum(unsigned int inum) {} -static inline bool proc_ns_inode(struct inode *inode) { return false; } #endif /* CONFIG_PROC_FS */ +static inline int ns_alloc_inum(struct ns_common *ns) +{ + atomic_long_set(&ns->stashed, 0); + return proc_alloc_inum(&ns->inum); +} + +#define ns_free_inum(ns) proc_free_inum((ns)->inum) + +extern struct file *proc_ns_fget(int fd); +#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private) +extern void *ns_get_path(struct path *path, struct task_struct *task, + const struct proc_ns_operations *ns_ops); + +extern int ns_get_name(char *buf, size_t size, struct task_struct *task, + const struct proc_ns_operations *ns_ops); +extern void nsfs_init(void); + #endif /* _LINUX_PROC_NS_H */ diff --git a/include/linux/property.h b/include/linux/property.h new file mode 100644 index 000000000000..a6a3d98bd7e9 --- /dev/null +++ b/include/linux/property.h @@ -0,0 +1,143 @@ +/* + * property.h - Unified device property interface. + * + * Copyright (C) 2014, Intel Corporation + * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_PROPERTY_H_ +#define _LINUX_PROPERTY_H_ + +#include <linux/types.h> + +struct device; + +enum dev_prop_type { + DEV_PROP_U8, + DEV_PROP_U16, + DEV_PROP_U32, + DEV_PROP_U64, + DEV_PROP_STRING, + DEV_PROP_MAX, +}; + +bool device_property_present(struct device *dev, const char *propname); +int device_property_read_u8_array(struct device *dev, const char *propname, + u8 *val, size_t nval); +int device_property_read_u16_array(struct device *dev, const char *propname, + u16 *val, size_t nval); +int device_property_read_u32_array(struct device *dev, const char *propname, + u32 *val, size_t nval); +int device_property_read_u64_array(struct device *dev, const char *propname, + u64 *val, size_t nval); +int device_property_read_string_array(struct device *dev, const char *propname, + const char **val, size_t nval); +int device_property_read_string(struct device *dev, const char *propname, + const char **val); + +enum fwnode_type { + FWNODE_INVALID = 0, + FWNODE_OF, + FWNODE_ACPI, +}; + +struct fwnode_handle { + enum fwnode_type type; +}; + +bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname); +int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, + const char *propname, u8 *val, + size_t nval); +int fwnode_property_read_u16_array(struct fwnode_handle *fwnode, + const char *propname, u16 *val, + size_t nval); +int fwnode_property_read_u32_array(struct fwnode_handle *fwnode, + const char *propname, u32 *val, + size_t nval); +int fwnode_property_read_u64_array(struct fwnode_handle *fwnode, + const char *propname, u64 *val, + size_t nval); +int fwnode_property_read_string_array(struct fwnode_handle *fwnode, + const char *propname, const char **val, + size_t nval); +int fwnode_property_read_string(struct fwnode_handle *fwnode, + const char *propname, const char **val); + +struct fwnode_handle *device_get_next_child_node(struct device *dev, + struct fwnode_handle *child); + +#define device_for_each_child_node(dev, child) \ + for (child = device_get_next_child_node(dev, NULL); child; \ + child = device_get_next_child_node(dev, child)) + +void fwnode_handle_put(struct fwnode_handle *fwnode); + +unsigned int device_get_child_node_count(struct device *dev); + +static inline bool device_property_read_bool(struct device *dev, + const char *propname) +{ + return device_property_present(dev, propname); +} + +static inline int device_property_read_u8(struct device *dev, + const char *propname, u8 *val) +{ + return device_property_read_u8_array(dev, propname, val, 1); +} + +static inline int device_property_read_u16(struct device *dev, + const char *propname, u16 *val) +{ + return device_property_read_u16_array(dev, propname, val, 1); +} + +static inline int device_property_read_u32(struct device *dev, + const char *propname, u32 *val) +{ + return device_property_read_u32_array(dev, propname, val, 1); +} + +static inline int device_property_read_u64(struct device *dev, + const char *propname, u64 *val) +{ + return device_property_read_u64_array(dev, propname, val, 1); +} + +static inline bool fwnode_property_read_bool(struct fwnode_handle *fwnode, + const char *propname) +{ + return fwnode_property_present(fwnode, propname); +} + +static inline int fwnode_property_read_u8(struct fwnode_handle *fwnode, + const char *propname, u8 *val) +{ + return fwnode_property_read_u8_array(fwnode, propname, val, 1); +} + +static inline int fwnode_property_read_u16(struct fwnode_handle *fwnode, + const char *propname, u16 *val) +{ + return fwnode_property_read_u16_array(fwnode, propname, val, 1); +} + +static inline int fwnode_property_read_u32(struct fwnode_handle *fwnode, + const char *propname, u32 *val) +{ + return fwnode_property_read_u32_array(fwnode, propname, val, 1); +} + +static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode, + const char *propname, u64 *val) +{ + return fwnode_property_read_u64_array(fwnode, propname, val, 1); +} + +#endif /* _LINUX_PROPERTY_H_ */ diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 9974975d40db..4af3fdc85b01 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -53,7 +53,8 @@ struct persistent_ram_zone { }; struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, - u32 sig, struct persistent_ram_ecc_info *ecc_info); + u32 sig, struct persistent_ram_ecc_info *ecc_info, + unsigned int memtype); void persistent_ram_free(struct persistent_ram_zone *prz); void persistent_ram_zap(struct persistent_ram_zone *prz); @@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, struct ramoops_platform_data { unsigned long mem_size; unsigned long mem_address; + unsigned int mem_type; unsigned long record_size; unsigned long console_size; unsigned long ftrace_size; diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index cc79eff4a1ad..987a73a40ef8 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -52,7 +52,7 @@ extern void ptrace_notify(int exit_code); extern void __ptrace_link(struct task_struct *child, struct task_struct *new_parent); extern void __ptrace_unlink(struct task_struct *child); -extern void exit_ptrace(struct task_struct *tracer); +extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); #define PTRACE_MODE_READ 0x01 #define PTRACE_MODE_ATTACH 0x02 #define PTRACE_MODE_NOAUDIT 0x04 diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h index 18d75e795606..e1ab6e86cdb3 100644 --- a/include/linux/pxa168_eth.h +++ b/include/linux/pxa168_eth.h @@ -4,6 +4,8 @@ #ifndef __LINUX_PXA168_ETH_H #define __LINUX_PXA168_ETH_H +#include <linux/phy.h> + struct pxa168_eth_platform_data { int port_number; int phy_addr; @@ -13,6 +15,7 @@ struct pxa168_eth_platform_data { */ int speed; /* 0, SPEED_10, SPEED_100 */ int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ + phy_interface_t intf; /* * Override default RX/TX queue sizes if nonzero. diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index f2b405116166..77aed9ea1d26 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -108,6 +108,25 @@ #define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ #endif +/* QUARK_X1000 SSCR0 bit definition */ +#define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */ +#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */ +#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */ +#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */ + +#define RX_THRESH_QUARK_X1000_DFLT 1 +#define TX_THRESH_QUARK_X1000_DFLT 16 + +#define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */ +#define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */ + +#define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */ +#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */ +#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */ +#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */ +#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */ +#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */ + /* extra bits in PXA255, PXA26x and PXA27x SSP ports */ #define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ #define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ @@ -175,6 +194,7 @@ enum pxa_ssp_type { PXA910_SSP, CE4100_SSP, LPSS_SSP, + QUARK_X1000_SSP, }; struct ssp_device { diff --git a/include/linux/quota.h b/include/linux/quota.h index 80d345a3524c..50978b781a19 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -56,6 +56,11 @@ enum quota_type { PRJQUOTA = 2, /* element used for project quotas */ }; +/* Masks for quota types when used as a bitmask */ +#define QTYPE_MASK_USR (1 << USRQUOTA) +#define QTYPE_MASK_GRP (1 << GRPQUOTA) +#define QTYPE_MASK_PRJ (1 << PRJQUOTA) + typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ typedef long long qsize_t; /* Type in which we store sizes */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 1d3eee594cd6..f23538a6e411 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -64,10 +64,10 @@ void dquot_destroy(struct dquot *dquot); int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags); void __dquot_free_space(struct inode *inode, qsize_t number, int flags); -int dquot_alloc_inode(const struct inode *inode); +int dquot_alloc_inode(struct inode *inode); int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); -void dquot_free_inode(const struct inode *inode); +void dquot_free_inode(struct inode *inode); void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number); int dquot_disable(struct super_block *sb, int type, unsigned int flags); @@ -213,12 +213,12 @@ static inline void dquot_drop(struct inode *inode) { } -static inline int dquot_alloc_inode(const struct inode *inode) +static inline int dquot_alloc_inode(struct inode *inode) { return 0; } -static inline void dquot_free_inode(const struct inode *inode) +static inline void dquot_free_inode(struct inode *inode) { } diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 0a260d8a18bf..18102529254e 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h @@ -17,14 +17,20 @@ struct ratelimit_state { unsigned long begin; }; -#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ - \ - struct ratelimit_state name = { \ +#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ .interval = interval_init, \ .burst = burst_init, \ } +#define RATELIMIT_STATE_INIT_DISABLED \ + RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST) + +#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ + \ + struct ratelimit_state name = \ + RATELIMIT_STATE_INIT(name, interval_init, burst_init) \ + static inline void ratelimit_state_init(struct ratelimit_state *rs, int interval, int burst) { diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 372ad5e0dcb8..529bc946f450 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -241,7 +241,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * list_entry_rcu - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). @@ -278,7 +278,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * list_first_or_null_rcu - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Note that if the list is empty, it returns NULL. * @@ -296,7 +296,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * list_for_each_entry_rcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() @@ -311,7 +311,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * list_for_each_entry_continue_rcu - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after * the current position. @@ -542,6 +542,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ typeof(*(pos)), member)) +/** + * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point + * @pos: the type * to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_from_rcu(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ + typeof(*(pos)), member)) #endif /* __KERNEL__ */ #endif diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h deleted file mode 100644 index 56b7bc32db4f..000000000000 --- a/include/linux/res_counter.h +++ /dev/null @@ -1,223 +0,0 @@ -#ifndef __RES_COUNTER_H__ -#define __RES_COUNTER_H__ - -/* - * Resource Counters - * Contain common data types and routines for resource accounting - * - * Copyright 2007 OpenVZ SWsoft Inc - * - * Author: Pavel Emelianov <xemul@openvz.org> - * - * See Documentation/cgroups/resource_counter.txt for more - * info about what this counter is. - */ - -#include <linux/spinlock.h> -#include <linux/errno.h> - -/* - * The core object. the cgroup that wishes to account for some - * resource may include this counter into its structures and use - * the helpers described beyond - */ - -struct res_counter { - /* - * the current resource consumption level - */ - unsigned long long usage; - /* - * the maximal value of the usage from the counter creation - */ - unsigned long long max_usage; - /* - * the limit that usage cannot exceed - */ - unsigned long long limit; - /* - * the limit that usage can be exceed - */ - unsigned long long soft_limit; - /* - * the number of unsuccessful attempts to consume the resource - */ - unsigned long long failcnt; - /* - * the lock to protect all of the above. - * the routines below consider this to be IRQ-safe - */ - spinlock_t lock; - /* - * Parent counter, used for hierarchial resource accounting - */ - struct res_counter *parent; -}; - -#define RES_COUNTER_MAX ULLONG_MAX - -/** - * Helpers to interact with userspace - * res_counter_read_u64() - returns the value of the specified member. - * res_counter_read/_write - put/get the specified fields from the - * res_counter struct to/from the user - * - * @counter: the counter in question - * @member: the field to work with (see RES_xxx below) - * @buf: the buffer to opeate on,... - * @nbytes: its size... - * @pos: and the offset. - */ - -u64 res_counter_read_u64(struct res_counter *counter, int member); - -ssize_t res_counter_read(struct res_counter *counter, int member, - const char __user *buf, size_t nbytes, loff_t *pos, - int (*read_strategy)(unsigned long long val, char *s)); - -int res_counter_memparse_write_strategy(const char *buf, - unsigned long long *res); - -/* - * the field descriptors. one for each member of res_counter - */ - -enum { - RES_USAGE, - RES_MAX_USAGE, - RES_LIMIT, - RES_FAILCNT, - RES_SOFT_LIMIT, -}; - -/* - * helpers for accounting - */ - -void res_counter_init(struct res_counter *counter, struct res_counter *parent); - -/* - * charge - try to consume more resource. - * - * @counter: the counter - * @val: the amount of the resource. each controller defines its own - * units, e.g. numbers, bytes, Kbytes, etc - * - * returns 0 on success and <0 if the counter->usage will exceed the - * counter->limit - * - * charge_nofail works the same, except that it charges the resource - * counter unconditionally, and returns < 0 if the after the current - * charge we are over limit. - */ - -int __must_check res_counter_charge(struct res_counter *counter, - unsigned long val, struct res_counter **limit_fail_at); -int res_counter_charge_nofail(struct res_counter *counter, - unsigned long val, struct res_counter **limit_fail_at); - -/* - * uncharge - tell that some portion of the resource is released - * - * @counter: the counter - * @val: the amount of the resource - * - * these calls check for usage underflow and show a warning on the console - * - * returns the total charges still present in @counter. - */ - -u64 res_counter_uncharge(struct res_counter *counter, unsigned long val); - -u64 res_counter_uncharge_until(struct res_counter *counter, - struct res_counter *top, - unsigned long val); -/** - * res_counter_margin - calculate chargeable space of a counter - * @cnt: the counter - * - * Returns the difference between the hard limit and the current usage - * of resource counter @cnt. - */ -static inline unsigned long long res_counter_margin(struct res_counter *cnt) -{ - unsigned long long margin; - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - if (cnt->limit > cnt->usage) - margin = cnt->limit - cnt->usage; - else - margin = 0; - spin_unlock_irqrestore(&cnt->lock, flags); - return margin; -} - -/** - * Get the difference between the usage and the soft limit - * @cnt: The counter - * - * Returns 0 if usage is less than or equal to soft limit - * The difference between usage and soft limit, otherwise. - */ -static inline unsigned long long -res_counter_soft_limit_excess(struct res_counter *cnt) -{ - unsigned long long excess; - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - if (cnt->usage <= cnt->soft_limit) - excess = 0; - else - excess = cnt->usage - cnt->soft_limit; - spin_unlock_irqrestore(&cnt->lock, flags); - return excess; -} - -static inline void res_counter_reset_max(struct res_counter *cnt) -{ - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - cnt->max_usage = cnt->usage; - spin_unlock_irqrestore(&cnt->lock, flags); -} - -static inline void res_counter_reset_failcnt(struct res_counter *cnt) -{ - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - cnt->failcnt = 0; - spin_unlock_irqrestore(&cnt->lock, flags); -} - -static inline int res_counter_set_limit(struct res_counter *cnt, - unsigned long long limit) -{ - unsigned long flags; - int ret = -EBUSY; - - spin_lock_irqsave(&cnt->lock, flags); - if (cnt->usage <= limit) { - cnt->limit = limit; - ret = 0; - } - spin_unlock_irqrestore(&cnt->lock, flags); - return ret; -} - -static inline int -res_counter_set_soft_limit(struct res_counter *cnt, - unsigned long long soft_limit) -{ - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - cnt->soft_limit = soft_limit; - spin_unlock_irqrestore(&cnt->lock, flags); - return 0; -} - -#endif diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index fb298e9d6d3a..b93fd89b2e5e 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -65,7 +65,10 @@ struct rhashtable_params { size_t new_size); bool (*shrink_decision)(const struct rhashtable *ht, size_t new_size); - int (*mutex_is_held)(void); +#ifdef CONFIG_PROVE_LOCKING + int (*mutex_is_held)(void *parent); + void *parent; +#endif }; /** @@ -96,16 +99,16 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); -void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t); -bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t); +void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); +bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, - struct rhash_head __rcu **pprev, gfp_t flags); + struct rhash_head __rcu **pprev); bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); -int rhashtable_expand(struct rhashtable *ht, gfp_t flags); -int rhashtable_shrink(struct rhashtable *ht, gfp_t flags); +int rhashtable_expand(struct rhashtable *ht); +int rhashtable_shrink(struct rhashtable *ht); void *rhashtable_lookup(const struct rhashtable *ht, const void *key); void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 6cacbce1a06c..5db76a32fcab 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -17,6 +17,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, long expires, u32 error); void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); +struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, + unsigned change, gfp_t flags); +void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, + gfp_t flags); + /* RTNL is used as a global lock for all changes to network configuration */ extern void rtnl_lock(void); @@ -94,12 +99,15 @@ extern int ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, - u16 flags); + u16 vid, + u16 flags); extern int ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr); + const unsigned char *addr, + u16 vid); extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, u16 mode); + struct net_device *dev, u16 mode, + u32 flags, u32 mask); #endif /* __LINUX_RTNETLINK_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 55f5ee7cc3d3..8db31ef98d2f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1364,6 +1364,10 @@ struct task_struct { unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; +#ifdef CONFIG_MEMCG_KMEM + unsigned memcg_kmem_skip_account:1; +#endif + unsigned long atomic_flags; /* Flags needing atomic access. */ pid_t pid; @@ -1679,8 +1683,7 @@ struct task_struct { /* bitmask and counter of trace recursion */ unsigned long trace_recursion; #endif /* CONFIG_TRACING */ -#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ - unsigned int memcg_kmem_skip_account; +#ifdef CONFIG_MEMCG struct memcg_oom_info { struct mem_cgroup *memcg; gfp_t gfp_mask; @@ -2482,6 +2485,10 @@ extern void do_group_exit(int); extern int do_execve(struct filename *, const char __user * const __user *, const char __user * const __user *); +extern int do_execveat(int, struct filename *, + const char __user * const __user *, + const char __user * const __user *, + int); extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); struct task_struct *fork_idle(int); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h new file mode 100644 index 000000000000..9aafe0e24c68 --- /dev/null +++ b/include/linux/seq_buf.h @@ -0,0 +1,136 @@ +#ifndef _LINUX_SEQ_BUF_H +#define _LINUX_SEQ_BUF_H + +#include <linux/fs.h> + +/* + * Trace sequences are used to allow a function to call several other functions + * to create a string of data to use. + */ + +/** + * seq_buf - seq buffer structure + * @buffer: pointer to the buffer + * @size: size of the buffer + * @len: the amount of data inside the buffer + * @readpos: The next position to read in the buffer. + */ +struct seq_buf { + char *buffer; + size_t size; + size_t len; + loff_t readpos; +}; + +static inline void seq_buf_clear(struct seq_buf *s) +{ + s->len = 0; + s->readpos = 0; +} + +static inline void +seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size) +{ + s->buffer = buf; + s->size = size; + seq_buf_clear(s); +} + +/* + * seq_buf have a buffer that might overflow. When this happens + * the len and size are set to be equal. + */ +static inline bool +seq_buf_has_overflowed(struct seq_buf *s) +{ + return s->len > s->size; +} + +static inline void +seq_buf_set_overflow(struct seq_buf *s) +{ + s->len = s->size + 1; +} + +/* + * How much buffer is left on the seq_buf? + */ +static inline unsigned int +seq_buf_buffer_left(struct seq_buf *s) +{ + if (seq_buf_has_overflowed(s)) + return 0; + + return s->size - s->len; +} + +/* How much buffer was written? */ +static inline unsigned int seq_buf_used(struct seq_buf *s) +{ + return min(s->len, s->size); +} + +/** + * seq_buf_get_buf - get buffer to write arbitrary data to + * @s: the seq_buf handle + * @bufp: the beginning of the buffer is stored here + * + * Return the number of bytes available in the buffer, or zero if + * there's no space. + */ +static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp) +{ + WARN_ON(s->len > s->size + 1); + + if (s->len < s->size) { + *bufp = s->buffer + s->len; + return s->size - s->len; + } + + *bufp = NULL; + return 0; +} + +/** + * seq_buf_commit - commit data to the buffer + * @s: the seq_buf handle + * @num: the number of bytes to commit + * + * Commit @num bytes of data written to a buffer previously acquired + * by seq_buf_get. To signal an error condition, or that the data + * didn't fit in the available space, pass a negative @num value. + */ +static inline void seq_buf_commit(struct seq_buf *s, int num) +{ + if (num < 0) { + seq_buf_set_overflow(s); + } else { + /* num must be negative on overflow */ + BUG_ON(s->len + num > s->size); + s->len += num; + } +} + +extern __printf(2, 3) +int seq_buf_printf(struct seq_buf *s, const char *fmt, ...); +extern __printf(2, 0) +int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args); +extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s); +extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, + int cnt); +extern int seq_buf_puts(struct seq_buf *s, const char *str); +extern int seq_buf_putc(struct seq_buf *s, unsigned char c); +extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len); +extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, + unsigned int len); +extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc); + +extern int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp, + int nmaskbits); + +#ifdef CONFIG_BINARY_PRINTF +extern int +seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary); +#endif + +#endif /* _LINUX_SEQ_BUF_H */ diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 52e0097f61f0..cf6a9daaaf6d 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -43,6 +43,21 @@ struct seq_operations { #define SEQ_SKIP 1 /** + * seq_has_overflowed - check if the buffer has overflowed + * @m: the seq_file handle + * + * seq_files have a buffer which may overflow. When this happens a larger + * buffer is reallocated and all the data will be printed again. + * The overflow state is true when m->count == m->size. + * + * Returns true if the buffer received more than it can hold. + */ +static inline bool seq_has_overflowed(struct seq_file *m) +{ + return m->count == m->size; +} + +/** * seq_get_buf - get buffer to write arbitrary data to * @m: the seq_file handle * @bufp: the beginning of the buffer is stored here diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 3df10d5f154b..e02acf0a0ec9 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -97,13 +97,10 @@ struct uart_8250_port { unsigned char msr_saved_flags; struct uart_8250_dma *dma; - struct serial_rs485 rs485; /* 8250 specific callbacks */ int (*dl_read)(struct uart_8250_port *); void (*dl_write)(struct uart_8250_port *, int); - int (*rs485_config)(struct uart_8250_port *, - struct serial_rs485 *rs485); }; static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) diff --git a/include/linux/serial_bcm63xx.h b/include/linux/serial_bcm63xx.h index a80aa1a5bee2..570e964dc899 100644 --- a/include/linux/serial_bcm63xx.h +++ b/include/linux/serial_bcm63xx.h @@ -116,6 +116,4 @@ UART_FIFO_PARERR_MASK | \ UART_FIFO_BRKDET_MASK) -#define UART_REG_SIZE 24 - #endif /* _LINUX_SERIAL_BCM63XX_H */ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 21c2e05c1bc3..057038cf2788 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -63,7 +63,7 @@ struct uart_ops { void (*flush_buffer)(struct uart_port *); void (*set_termios)(struct uart_port *, struct ktermios *new, struct ktermios *old); - void (*set_ldisc)(struct uart_port *, int new); + void (*set_ldisc)(struct uart_port *, struct ktermios *); void (*pm)(struct uart_port *, unsigned int state, unsigned int oldstate); @@ -131,6 +131,8 @@ struct uart_port { void (*pm)(struct uart_port *, unsigned int state, unsigned int old); void (*handle_break)(struct uart_port *); + int (*rs485_config)(struct uart_port *, + struct serial_rs485 *rs485); unsigned int irq; /* irq number */ unsigned long irqflags; /* irq flags */ unsigned int uartclk; /* base uart clock */ @@ -140,12 +142,13 @@ struct uart_port { unsigned char iotype; /* io access style */ unsigned char unused1; -#define UPIO_PORT (0) -#define UPIO_HUB6 (1) -#define UPIO_MEM (2) -#define UPIO_MEM32 (3) -#define UPIO_AU (4) /* Au1x00 and RT288x type IO */ -#define UPIO_TSI (5) /* Tsi108/109 type IO */ +#define UPIO_PORT (0) /* 8b I/O port access */ +#define UPIO_HUB6 (1) /* Hub6 ISA card */ +#define UPIO_MEM (2) /* 8b MMIO access */ +#define UPIO_MEM32 (3) /* 32b little endian */ +#define UPIO_MEM32BE (4) /* 32b big endian */ +#define UPIO_AU (5) /* Au1x00 and RT288x type IO */ +#define UPIO_TSI (6) /* Tsi108/109 type IO */ unsigned int read_status_mask; /* driver specific */ unsigned int ignore_status_mask; /* driver specific */ @@ -160,21 +163,33 @@ struct uart_port { /* flags must be updated while holding port mutex */ upf_t flags; -#define UPF_FOURPORT ((__force upf_t) (1 << 1)) -#define UPF_SAK ((__force upf_t) (1 << 2)) -#define UPF_SPD_MASK ((__force upf_t) (0x1030)) -#define UPF_SPD_HI ((__force upf_t) (0x0010)) -#define UPF_SPD_VHI ((__force upf_t) (0x0020)) -#define UPF_SPD_CUST ((__force upf_t) (0x0030)) -#define UPF_SPD_SHI ((__force upf_t) (0x1000)) -#define UPF_SPD_WARP ((__force upf_t) (0x1010)) -#define UPF_SKIP_TEST ((__force upf_t) (1 << 6)) -#define UPF_AUTO_IRQ ((__force upf_t) (1 << 7)) -#define UPF_HARDPPS_CD ((__force upf_t) (1 << 11)) -#define UPF_LOW_LATENCY ((__force upf_t) (1 << 13)) -#define UPF_BUGGY_UART ((__force upf_t) (1 << 14)) + /* + * These flags must be equivalent to the flags defined in + * include/uapi/linux/tty_flags.h which are the userspace definitions + * assigned from the serial_struct flags in uart_set_info() + * [for bit definitions in the UPF_CHANGE_MASK] + * + * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable + * except bit 15 (UPF_NO_TXEN_TEST) which is masked off. + * The remaining bits are serial-core specific and not modifiable by + * userspace. + */ +#define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ ) +#define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ ) +#define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ ) +#define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ ) +#define UPF_SPD_CUST ((__force upf_t) ASYNC_SPD_CUST /* 0x0030 */ ) +#define UPF_SPD_WARP ((__force upf_t) ASYNC_SPD_WARP /* 0x1010 */ ) +#define UPF_SPD_MASK ((__force upf_t) ASYNC_SPD_MASK /* 0x1030 */ ) +#define UPF_SKIP_TEST ((__force upf_t) ASYNC_SKIP_TEST /* 6 */ ) +#define UPF_AUTO_IRQ ((__force upf_t) ASYNC_AUTO_IRQ /* 7 */ ) +#define UPF_HARDPPS_CD ((__force upf_t) ASYNC_HARDPPS_CD /* 11 */ ) +#define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ ) +#define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ ) +#define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ ) #define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15)) -#define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16)) +#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ ) + /* Port has hardware-assisted h/w flow control (iow, auto-RTS *not* auto-CTS) */ #define UPF_HARD_FLOW ((__force upf_t) (1 << 21)) /* Port has hardware-assisted s/w flow control */ @@ -190,9 +205,14 @@ struct uart_port { #define UPF_DEAD ((__force upf_t) (1 << 30)) #define UPF_IOREMAP ((__force upf_t) (1 << 31)) -#define UPF_CHANGE_MASK ((__force upf_t) (0x17fff)) +#define __UPF_CHANGE_MASK 0x17fff +#define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK) #define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY)) +#if __UPF_CHANGE_MASK > ASYNC_FLAGS +#error Change mask not equivalent to userspace-visible bit defines +#endif + /* status must be updated while holding port lock */ upstat_t status; @@ -214,6 +234,7 @@ struct uart_port { unsigned char unused[2]; struct attribute_group *attr_group; /* port specific attributes */ const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ + struct serial_rs485 rs485; void *private_data; /* generic platform data pointer */ }; @@ -367,7 +388,7 @@ static inline int uart_tx_stopped(struct uart_port *port) static inline bool uart_cts_enabled(struct uart_port *uport) { - return uport->status & UPSTAT_CTS_ENABLE; + return !!(uport->status & UPSTAT_CTS_ENABLE); } /* diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 68c097077ef0..f4aee75f00b1 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -18,8 +18,6 @@ struct shrink_control { */ unsigned long nr_to_scan; - /* shrink from these nodes */ - nodemask_t nodes_to_scan; /* current node being shrunk (for NUMA aware shrinkers) */ int nid; }; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6c8b6f604e76..85ab7d72b54c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -20,6 +20,8 @@ #include <linux/time.h> #include <linux/bug.h> #include <linux/cache.h> +#include <linux/rbtree.h> +#include <linux/socket.h> #include <linux/atomic.h> #include <asm/types.h> @@ -148,6 +150,8 @@ struct net_device; struct scatterlist; struct pipe_inode_info; +struct iov_iter; +struct napi_struct; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { @@ -341,7 +345,6 @@ enum { SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ - SKB_FCLONE_FREE, /* this companion fclone skb is available */ }; enum { @@ -370,8 +373,7 @@ enum { SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, - SKB_GSO_MPLS = 1 << 12, - + SKB_GSO_TUNNEL_REMCSUM = 1 << 12, }; #if BITS_PER_LONG > 32 @@ -440,6 +442,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, * @next: Next buffer in list * @prev: Previous buffer in list * @tstamp: Time we arrived/left + * @rbnode: RB tree node, alternative to next/prev for netem/tcp * @sk: Socket we are owned by * @dev: Device we arrived on/are leaving by * @cb: Control buffer. Free for use by every layer. Put private vars here @@ -504,15 +507,19 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, */ struct sk_buff { - /* These two members must be first. */ - struct sk_buff *next; - struct sk_buff *prev; - union { - ktime_t tstamp; - struct skb_mstamp skb_mstamp; + struct { + /* These two members must be first. */ + struct sk_buff *next; + struct sk_buff *prev; + + union { + ktime_t tstamp; + struct skb_mstamp skb_mstamp; + }; + }; + struct rb_node rbnode; /* used in netem & tcp stack */ }; - struct sock *sk; struct net_device *dev; @@ -597,7 +604,8 @@ struct sk_buff { #endif __u8 ipvs_property:1; __u8 inner_protocol_type:1; - /* 4 or 6 bit hole */ + __u8 remcsum_offload:1; + /* 3 or 5 bit hole */ #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ @@ -666,6 +674,7 @@ struct sk_buff { #define SKB_ALLOC_FCLONE 0x01 #define SKB_ALLOC_RX 0x02 +#define SKB_ALLOC_NAPI 0x04 /* Returns true if the skb was allocated from PFMEMALLOC reserves */ static inline bool skb_pfmemalloc(const struct sk_buff *skb) @@ -710,9 +719,6 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) skb->_skb_refdst = (unsigned long)dst; } -void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, - bool force); - /** * skb_dst_set_noref - sets skb dst, hopefully, without taking reference * @skb: buffer @@ -725,24 +731,8 @@ void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, */ static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) { - __skb_dst_set_noref(skb, dst, false); -} - -/** - * skb_dst_set_noref_force - sets skb dst, without taking reference - * @skb: buffer - * @dst: dst entry - * - * Sets skb dst, assuming a reference was not taken on dst. - * No reference is taken and no dst_release will be called. While for - * cached dsts deferred reclaim is a basic feature, for entries that are - * not cached it is caller's job to guarantee that last dst_release for - * provided dst happens when nobody uses it, eg. after a RCU grace period. - */ -static inline void skb_dst_set_noref_force(struct sk_buff *skb, - struct dst_entry *dst) -{ - __skb_dst_set_noref(skb, dst, true); + WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; } /** @@ -810,7 +800,7 @@ static inline bool skb_fclone_busy(const struct sock *sk, fclones = container_of(skb, struct sk_buff_fclones, skb1); return skb->fclone == SKB_FCLONE_ORIG && - fclones->skb2.fclone == SKB_FCLONE_CLONE && + atomic_read(&fclones->fclone_ref) > 1 && fclones->skb2.sk == sk; } @@ -2176,47 +2166,61 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); } +void *napi_alloc_frag(unsigned int fragsz); +struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, + unsigned int length, gfp_t gfp_mask); +static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, + unsigned int length) +{ + return __napi_alloc_skb(napi, length, GFP_ATOMIC); +} + /** - * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data - * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX - * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used - * @order: size of the allocation + * __dev_alloc_pages - allocate page for network Rx + * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx + * @order: size of the allocation * - * Allocate a new page. + * Allocate a new page. * - * %NULL is returned if there is no free memory. + * %NULL is returned if there is no free memory. */ -static inline struct page *__skb_alloc_pages(gfp_t gfp_mask, - struct sk_buff *skb, - unsigned int order) -{ - struct page *page; - - gfp_mask |= __GFP_COLD; - - if (!(gfp_mask & __GFP_NOMEMALLOC)) - gfp_mask |= __GFP_MEMALLOC; +static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, + unsigned int order) +{ + /* This piece of code contains several assumptions. + * 1. This is for device Rx, therefor a cold page is preferred. + * 2. The expectation is the user wants a compound page. + * 3. If requesting a order 0 page it will not be compound + * due to the check to see if order has a value in prep_new_page + * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to + * code in gfp_to_alloc_flags that should be enforcing this. + */ + gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC; - page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); - if (skb && page && page->pfmemalloc) - skb->pfmemalloc = true; + return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); +} - return page; +static inline struct page *dev_alloc_pages(unsigned int order) +{ + return __dev_alloc_pages(GFP_ATOMIC, order); } /** - * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data - * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX - * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used + * __dev_alloc_page - allocate a page for network Rx + * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx * - * Allocate a new page. + * Allocate a new page. * - * %NULL is returned if there is no free memory. + * %NULL is returned if there is no free memory. */ -static inline struct page *__skb_alloc_page(gfp_t gfp_mask, - struct sk_buff *skb) +static inline struct page *__dev_alloc_page(gfp_t gfp_mask) +{ + return __dev_alloc_pages(gfp_mask, 0); +} + +static inline struct page *dev_alloc_page(void) { - return __skb_alloc_pages(gfp_mask, skb, 0); + return __dev_alloc_page(GFP_ATOMIC); } /** @@ -2448,7 +2452,6 @@ static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error. */ - static inline int skb_padto(struct sk_buff *skb, unsigned int len) { unsigned int size = skb->len; @@ -2457,6 +2460,29 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) return skb_pad(skb, len - size); } +/** + * skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} + static inline int skb_add_data(struct sk_buff *skb, char __user *from, int copy) { @@ -2629,18 +2655,18 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err); unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); -int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, - struct iovec *to, int size); -int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, - struct iovec *iov); -int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, - const struct iovec *from, int from_offset, - int len); -int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm, - int offset, size_t count); -int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset, - const struct iovec *to, int to_offset, - int size); +int skb_copy_datagram_iter(const struct sk_buff *from, int offset, + struct iov_iter *to, int size); +static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, + struct msghdr *msg, int size) +{ + return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); +} +int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, + struct msghdr *msg); +int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, + struct iov_iter *from, int len); +int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); void skb_free_datagram(struct sock *sk, struct sk_buff *skb); void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); @@ -2661,6 +2687,20 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet); unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *skb_vlan_untag(struct sk_buff *skb); +int skb_ensure_writable(struct sk_buff *skb, int write_len); +int skb_vlan_pop(struct sk_buff *skb); +int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); + +static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) +{ + /* XXX: stripping const */ + return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len); +} + +static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) +{ + return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; +} struct skb_checksum_ops { __wsum (*update)(const void *mem, int len, __wsum wsum); diff --git a/include/linux/slab.h b/include/linux/slab.h index c265bec6a57d..9a139b637069 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -493,7 +493,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) * @memcg: pointer to the memcg this cache belongs to * @list: list_head for the list of all caches in this memcg * @root_cache: pointer to the global, root cache, this cache was derived from - * @nr_pages: number of pages that belongs to this cache. */ struct memcg_cache_params { bool is_root_cache; @@ -506,17 +505,12 @@ struct memcg_cache_params { struct mem_cgroup *memcg; struct list_head list; struct kmem_cache *root_cache; - atomic_t nr_pages; }; }; }; int memcg_update_all_caches(int num_memcgs); -struct seq_file; -int cache_show(struct kmem_cache *s, struct seq_file *m); -void print_slabinfo_header(struct seq_file *m); - /** * kmalloc_array - allocate memory for an array. * @n: number of elements. diff --git a/include/linux/socket.h b/include/linux/socket.h index bb9b83640070..6e49a14365dc 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -47,16 +47,25 @@ struct linger { struct msghdr { void *msg_name; /* ptr to socket address structure */ int msg_namelen; /* size of socket address structure */ - struct iovec *msg_iov; /* scatter/gather array */ - __kernel_size_t msg_iovlen; /* # elements in msg_iov */ + struct iov_iter msg_iter; /* data */ void *msg_control; /* ancillary data */ __kernel_size_t msg_controllen; /* ancillary data buffer length */ unsigned int msg_flags; /* flags on received message */ }; + +struct user_msghdr { + void __user *msg_name; /* ptr to socket address structure */ + int msg_namelen; /* size of socket address structure */ + struct iovec __user *msg_iov; /* scatter/gather array */ + __kernel_size_t msg_iovlen; /* # elements in msg_iov */ + void __user *msg_control; /* ancillary data */ + __kernel_size_t msg_controllen; /* ancillary data buffer length */ + unsigned int msg_flags; /* flags on received message */ +}; /* For recvmmsg/sendmmsg */ struct mmsghdr { - struct msghdr msg_hdr; + struct user_msghdr msg_hdr; unsigned int msg_len; }; @@ -94,6 +103,10 @@ struct cmsghdr { (cmsg)->cmsg_len <= (unsigned long) \ ((mhdr)->msg_controllen - \ ((char *)(cmsg) - (char *)(mhdr)->msg_control))) +#define for_each_cmsghdr(cmsg, msg) \ + for (cmsg = CMSG_FIRSTHDR(msg); \ + cmsg; \ + cmsg = CMSG_NXTHDR(msg, cmsg)) /* * Get the next cmsg header @@ -312,15 +325,14 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata, extern unsigned long iov_pages(const struct iovec *iov, int offset, unsigned long nr_segs); -extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); struct timespec; /* The __sys_...msg variants allow MSG_CMSG_COMPAT */ -extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); -extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); +extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); +extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 46d188a9947c..a6ef2a8e6de4 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -1049,4 +1049,10 @@ spi_unregister_device(struct spi_device *spi) extern const struct spi_device_id * spi_get_device_id(const struct spi_device *sdev); +static inline bool +spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer) +{ + return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers); +} + #endif /* __LINUX_SPI_H */ diff --git a/include/linux/spmi.h b/include/linux/spmi.h index 91f5eab9e428..f84212cd3b7d 100644 --- a/include/linux/spmi.h +++ b/include/linux/spmi.h @@ -134,9 +134,6 @@ void spmi_controller_remove(struct spmi_controller *ctrl); * this structure. * @probe: binds this driver to a SPMI device. * @remove: unbinds this driver from the SPMI device. - * @shutdown: standard shutdown callback used during powerdown/halt. - * @suspend: standard suspend callback used during system suspend. - * @resume: standard resume callback used during system resume. * * If PM runtime support is desired for a slave, a device driver can call * pm_runtime_put() from their probe() routine (and a balancing diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 115b570e3bff..669045ab73f3 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -1,6 +1,8 @@ #ifndef __LINUX_STACKTRACE_H #define __LINUX_STACKTRACE_H +#include <linux/types.h> + struct task_struct; struct pt_regs; @@ -20,6 +22,8 @@ extern void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace); extern void print_stack_trace(struct stack_trace *trace, int spaces); +extern int snprint_stack_trace(char *buf, size_t size, + struct stack_trace *trace, int spaces); #ifdef CONFIG_USER_STACKTRACE_SUPPORT extern void save_stack_trace_user(struct stack_trace *trace); @@ -32,6 +36,7 @@ extern void save_stack_trace_user(struct stack_trace *trace); # define save_stack_trace_tsk(tsk, trace) do { } while (0) # define save_stack_trace_user(trace) do { } while (0) # define print_stack_trace(trace, spaces) do { } while (0) +# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0) #endif #endif diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 8e030075fe79..a7cbb570cc5c 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -53,7 +53,7 @@ struct rpc_cred { struct rcu_head cr_rcu; struct rpc_auth * cr_auth; const struct rpc_credops *cr_ops; -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) unsigned long cr_magic; /* 0x0f4aa4f0 */ #endif unsigned long cr_expire; /* when to gc */ diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 70736b98c721..d86acc63b25f 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -63,6 +63,9 @@ struct rpc_clnt { struct rpc_rtt cl_rtt_default; struct rpc_timeout cl_timeout_default; const struct rpc_program *cl_program; +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + struct dentry *cl_debugfs; /* debugfs directory */ +#endif }; /* @@ -176,5 +179,6 @@ size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t); +const char *rpc_proc_name(const struct rpc_task *task); #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index 9385bd74c860..c57d8ea0716c 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h @@ -10,22 +10,10 @@ #include <uapi/linux/sunrpc/debug.h> - -/* - * Enable RPC debugging/profiling. - */ -#ifdef CONFIG_SUNRPC_DEBUG -#define RPC_DEBUG -#endif -#ifdef CONFIG_TRACEPOINTS -#define RPC_TRACEPOINTS -#endif -/* #define RPC_PROFILE */ - /* * Debugging macros etc */ -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) extern unsigned int rpc_debug; extern unsigned int nfs_debug; extern unsigned int nfsd_debug; @@ -36,7 +24,7 @@ extern unsigned int nlm_debug; #define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) #undef ifdebug -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) # define dfprintk(fac, args...) \ @@ -65,9 +53,55 @@ extern unsigned int nlm_debug; /* * Sysctl interface for RPC debugging */ -#ifdef RPC_DEBUG + +struct rpc_clnt; +struct rpc_xprt; + +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) void rpc_register_sysctl(void); void rpc_unregister_sysctl(void); +int sunrpc_debugfs_init(void); +void sunrpc_debugfs_exit(void); +int rpc_clnt_debugfs_register(struct rpc_clnt *); +void rpc_clnt_debugfs_unregister(struct rpc_clnt *); +int rpc_xprt_debugfs_register(struct rpc_xprt *); +void rpc_xprt_debugfs_unregister(struct rpc_xprt *); +#else +static inline int +sunrpc_debugfs_init(void) +{ + return 0; +} + +static inline void +sunrpc_debugfs_exit(void) +{ + return; +} + +static inline int +rpc_clnt_debugfs_register(struct rpc_clnt *clnt) +{ + return 0; +} + +static inline void +rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt) +{ + return; +} + +static inline int +rpc_xprt_debugfs_register(struct rpc_xprt *xprt) +{ + return 0; +} + +static inline void +rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt) +{ + return; +} #endif #endif /* _LINUX_SUNRPC_DEBUG_H_ */ diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h index 1565bbe86d51..eecb5a71e6c0 100644 --- a/include/linux/sunrpc/metrics.h +++ b/include/linux/sunrpc/metrics.h @@ -27,10 +27,13 @@ #include <linux/seq_file.h> #include <linux/ktime.h> +#include <linux/spinlock.h> #define RPC_IOSTATS_VERS "1.0" struct rpc_iostats { + spinlock_t om_lock; + /* * These counters give an idea about how many request * transmissions are required, on average, to complete that diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 1a8959944c5f..5f1e6bd4c316 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -79,7 +79,7 @@ struct rpc_task { unsigned short tk_flags; /* misc flags */ unsigned short tk_timeouts; /* maj timeouts */ -#if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) unsigned short tk_pid; /* debugging aid */ #endif unsigned char tk_priority : 2,/* Task priority */ @@ -187,7 +187,7 @@ struct rpc_wait_queue { unsigned char nr; /* # tasks remaining for cookie */ unsigned short qlen; /* total # tasks waiting in queue */ struct rpc_timer timer_list; -#if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) const char * name; #endif }; @@ -237,7 +237,7 @@ void rpc_free(void *); int rpciod_up(void); void rpciod_down(void); int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct net; void rpc_show_tasks(struct net *); #endif @@ -251,7 +251,7 @@ static inline int rpc_wait_for_completion_task(struct rpc_task *task) return __rpc_wait_for_completion_task(task, NULL); } -#if defined(RPC_DEBUG) || defined (RPC_TRACEPOINTS) +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) static inline const char * rpc_qname(const struct rpc_wait_queue *q) { return ((q && q->name) ? q->name : "unknown"); diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 21678464883a..6f22cfeef5e3 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -26,10 +26,10 @@ typedef int (*svc_thread_fn)(void *); /* statistics for svc_pool structures */ struct svc_pool_stats { - unsigned long packets; + atomic_long_t packets; unsigned long sockets_queued; - unsigned long threads_woken; - unsigned long threads_timedout; + atomic_long_t threads_woken; + atomic_long_t threads_timedout; }; /* @@ -45,12 +45,13 @@ struct svc_pool_stats { struct svc_pool { unsigned int sp_id; /* pool id; also node id on NUMA */ spinlock_t sp_lock; /* protects all fields */ - struct list_head sp_threads; /* idle server threads */ struct list_head sp_sockets; /* pending sockets */ unsigned int sp_nrthreads; /* # of threads in pool */ struct list_head sp_all_threads; /* all server threads */ struct svc_pool_stats sp_stats; /* statistics on pool operation */ - int sp_task_pending;/* has pending task */ +#define SP_TASK_PENDING (0) /* still work to do even if no + * xprt is queued. */ + unsigned long sp_flags; } ____cacheline_aligned_in_smp; /* @@ -219,8 +220,8 @@ static inline void svc_putu32(struct kvec *iov, __be32 val) * processed. */ struct svc_rqst { - struct list_head rq_list; /* idle list */ struct list_head rq_all; /* all threads list */ + struct rcu_head rq_rcu_head; /* for RCU deferred kfree */ struct svc_xprt * rq_xprt; /* transport ptr */ struct sockaddr_storage rq_addr; /* peer address */ @@ -236,7 +237,6 @@ struct svc_rqst { struct svc_cred rq_cred; /* auth info */ void * rq_xprt_ctxt; /* transport specific context ptr */ struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ - bool rq_usedeferral; /* use deferral */ size_t rq_xprt_hlen; /* xprt header len */ struct xdr_buf rq_arg; @@ -253,9 +253,17 @@ struct svc_rqst { u32 rq_vers; /* program version */ u32 rq_proc; /* procedure number */ u32 rq_prot; /* IP protocol */ - unsigned short - rq_secure : 1; /* secure port */ - unsigned short rq_local : 1; /* local request */ + int rq_cachetype; /* catering to nfsd */ +#define RQ_SECURE (0) /* secure port */ +#define RQ_LOCAL (1) /* local request */ +#define RQ_USEDEFERRAL (2) /* use deferral */ +#define RQ_DROPME (3) /* drop current reply */ +#define RQ_SPLICE_OK (4) /* turned off in gss privacy + * to prevent encrypting page + * cache pages */ +#define RQ_VICTIM (5) /* about to be shut down */ +#define RQ_BUSY (6) /* request is busy */ + unsigned long rq_flags; /* flags field */ void * rq_argp; /* decoded arguments */ void * rq_resp; /* xdr'd results */ @@ -271,16 +279,12 @@ struct svc_rqst { struct cache_req rq_chandle; /* handle passed to caches for * request delaying */ - bool rq_dropme; /* Catering to nfsd */ struct auth_domain * rq_client; /* RPC peer info */ struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ - int rq_cachetype; struct svc_cacherep * rq_cacherep; /* cache info */ - bool rq_splice_ok; /* turned off in gss privacy - * to prevent encrypting page - * cache pages */ struct task_struct *rq_task; /* service thread */ + spinlock_t rq_lock; /* per-request lock */ }; #define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net) diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index ce6e4182a5b2..79f6f8f3dc0a 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -63,10 +63,9 @@ struct svc_xprt { #define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */ #define XPT_DEFERRED 8 /* deferred request pending */ #define XPT_OLD 9 /* used for xprt aging mark+sweep */ -#define XPT_DETACHED 10 /* detached from tempsocks list */ -#define XPT_LISTENER 11 /* listening endpoint */ -#define XPT_CACHE_AUTH 12 /* cache auth info */ -#define XPT_LOCAL 13 /* connection from loopback interface */ +#define XPT_LISTENER 10 /* listening endpoint */ +#define XPT_CACHE_AUTH 11 /* cache auth info */ +#define XPT_LOCAL 12 /* connection from loopback interface */ struct svc_serv *xpt_server; /* service for transport */ atomic_t xpt_reserved; /* space on outq that is rsvd */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index cf391eef2e6d..9d27ac45b909 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -239,6 +239,9 @@ struct rpc_xprt { struct net *xprt_net; const char *servername; const char *address_strings[RPC_DISPLAY_MAX]; +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + struct dentry *debugfs; /* debugfs directory */ +#endif }; #if defined(CONFIG_SUNRPC_BACKCHANNEL) diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 1ad36cc25b2e..7591788e9fbf 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -17,6 +17,65 @@ void cleanup_socket_xprt(void); #define RPC_DEF_MIN_RESVPORT (665U) #define RPC_DEF_MAX_RESVPORT (1023U) +struct sock_xprt { + struct rpc_xprt xprt; + + /* + * Network layer + */ + struct socket * sock; + struct sock * inet; + + /* + * State of TCP reply receive + */ + __be32 tcp_fraghdr, + tcp_xid, + tcp_calldir; + + u32 tcp_offset, + tcp_reclen; + + unsigned long tcp_copied, + tcp_flags; + + /* + * Connection of transports + */ + struct delayed_work connect_worker; + struct sockaddr_storage srcaddr; + unsigned short srcport; + + /* + * UDP socket buffer size parameters + */ + size_t rcvsize, + sndsize; + + /* + * Saved socket callback addresses + */ + void (*old_data_ready)(struct sock *); + void (*old_state_change)(struct sock *); + void (*old_write_space)(struct sock *); + void (*old_error_report)(struct sock *); +}; + +/* + * TCP receive state flags + */ +#define TCP_RCV_LAST_FRAG (1UL << 0) +#define TCP_RCV_COPY_FRAGHDR (1UL << 1) +#define TCP_RCV_COPY_XID (1UL << 2) +#define TCP_RCV_COPY_DATA (1UL << 3) +#define TCP_RCV_READ_CALLDIR (1UL << 4) +#define TCP_RCV_COPY_CALLDIR (1UL << 5) + +/* + * TCP RPC flags + */ +#define TCP_RPC_REPLY (1UL << 6) + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 37a585beef5c..34e8b60ab973 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -102,14 +102,6 @@ union swap_header { } info; }; - /* A swap entry has to fit into a "unsigned long", as - * the entry is hidden in the "index" field of the - * swapper address space. - */ -typedef struct { - unsigned long val; -} swp_entry_t; - /* * current->reclaim_state points to one of these when a task is running * memory reclaim diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h new file mode 100644 index 000000000000..145306bdc92f --- /dev/null +++ b/include/linux/swap_cgroup.h @@ -0,0 +1,42 @@ +#ifndef __LINUX_SWAP_CGROUP_H +#define __LINUX_SWAP_CGROUP_H + +#include <linux/swap.h> + +#ifdef CONFIG_MEMCG_SWAP + +extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, + unsigned short old, unsigned short new); +extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); +extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); +extern int swap_cgroup_swapon(int type, unsigned long max_pages); +extern void swap_cgroup_swapoff(int type); + +#else + +static inline +unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) +{ + return 0; +} + +static inline +unsigned short lookup_swap_cgroup_id(swp_entry_t ent) +{ + return 0; +} + +static inline int +swap_cgroup_swapon(int type, unsigned long max_pages) +{ + return 0; +} + +static inline void swap_cgroup_swapoff(int type) +{ + return; +} + +#endif /* CONFIG_MEMCG_SWAP */ + +#endif /* __LINUX_SWAP_CGROUP_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index bda9b81357cc..85893d744901 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -25,7 +25,7 @@ struct linux_dirent64; struct list_head; struct mmap_arg_struct; struct msgbuf; -struct msghdr; +struct user_msghdr; struct mmsghdr; struct msqid_ds; struct new_utsname; @@ -601,13 +601,13 @@ asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); asmlinkage long sys_send(int, void __user *, size_t, unsigned); asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, struct sockaddr __user *, int); -asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); +asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, unsigned int vlen, unsigned flags); asmlinkage long sys_recv(int, void __user *, size_t, unsigned); asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *); -asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); +asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, unsigned int vlen, unsigned flags, struct timespec __user *timeout); @@ -877,4 +877,9 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags, asmlinkage long sys_getrandom(char __user *buf, size_t count, unsigned int flags); asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); + +asmlinkage long sys_execveat(int dfd, const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp, int flags); + #endif diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index f97d0dbb59fa..ddad16148bd6 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -70,6 +70,8 @@ struct attribute_group { * for examples.. */ +#define SYSFS_PREALLOC 010000 + #define __ATTR(_name, _mode, _show, _store) { \ .attr = {.name = __stringify(_name), \ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ @@ -77,6 +79,13 @@ struct attribute_group { .store = _store, \ } +#define __ATTR_PREALLOC(_name, _mode, _show, _store) { \ + .attr = {.name = __stringify(_name), \ + .mode = SYSFS_PREALLOC | VERIFY_OCTAL_PERMISSIONS(_mode) },\ + .show = _show, \ + .store = _store, \ +} + #define __ATTR_RO(_name) { \ .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ .show = _name##_show, \ diff --git a/include/linux/syslog.h b/include/linux/syslog.h index 98a3153c0f96..4b7b875a7ce1 100644 --- a/include/linux/syslog.h +++ b/include/linux/syslog.h @@ -49,4 +49,13 @@ int do_syslog(int type, char __user *buf, int count, bool from_file); +#ifdef CONFIG_PRINTK +int check_syslog_permissions(int type, bool from_file); +#else +static inline int check_syslog_permissions(int type, bool from_file) +{ + return 0; +} +#endif + #endif /* _LINUX_SYSLOG_H */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index c2dee7deefa8..67309ece0772 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -130,7 +130,7 @@ struct tcp_sock { /* inet_connection_sock has to be the first member of tcp_sock */ struct inet_connection_sock inet_conn; u16 tcp_header_len; /* Bytes of tcp header to send */ - u16 xmit_size_goal_segs; /* Goal for segmenting output packets */ + u16 gso_segs; /* Max number of segs per GSO packet */ /* * Header prediction flags @@ -162,7 +162,7 @@ struct tcp_sock { struct { struct sk_buff_head prequeue; struct task_struct *task; - struct iovec *iov; + struct msghdr *msg; int memory; int len; } ucopy; @@ -204,10 +204,10 @@ struct tcp_sock { u16 urg_data; /* Saved octet of OOB data and control flags */ u8 ecn_flags; /* ECN status bits. */ - u8 reordering; /* Packet reordering metric. */ + u8 keepalive_probes; /* num of allowed keep alive probes */ + u32 reordering; /* Packet reordering metric. */ u32 snd_up; /* Urgent pointer */ - u8 keepalive_probes; /* num of allowed keep alive probes */ /* * Options received (usually on last packet, some only on SYN packets). */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index ef90838b36a0..c611a02fbc51 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -29,10 +29,10 @@ #include <linux/idr.h> #include <linux/device.h> #include <linux/workqueue.h> +#include <uapi/linux/thermal.h> #define THERMAL_TRIPS_NONE -1 #define THERMAL_MAX_TRIPS 12 -#define THERMAL_NAME_LENGTH 20 /* invalid cooling state */ #define THERMAL_CSTATE_INVALID -1UL @@ -49,11 +49,6 @@ #define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off)) #define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732) -/* Adding event notification support elements */ -#define THERMAL_GENL_FAMILY_NAME "thermal_event" -#define THERMAL_GENL_VERSION 0x01 -#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp" - /* Default Thermal Governor */ #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) #define DEFAULT_THERMAL_GOVERNOR "step_wise" @@ -86,30 +81,6 @@ enum thermal_trend { THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */ }; -/* Events supported by Thermal Netlink */ -enum events { - THERMAL_AUX0, - THERMAL_AUX1, - THERMAL_CRITICAL, - THERMAL_DEV_FAULT, -}; - -/* attributes of thermal_genl_family */ -enum { - THERMAL_GENL_ATTR_UNSPEC, - THERMAL_GENL_ATTR_EVENT, - __THERMAL_GENL_ATTR_MAX, -}; -#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1) - -/* commands supported by the thermal_genl_family */ -enum { - THERMAL_GENL_CMD_UNSPEC, - THERMAL_GENL_CMD_EVENT, - __THERMAL_GENL_CMD_MAX, -}; -#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1) - struct thermal_zone_device_ops { int (*bind) (struct thermal_zone_device *, struct thermal_cooling_device *); @@ -289,19 +260,49 @@ struct thermal_genl_event { enum events event; }; +/** + * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones + * + * Mandatory: + * @get_temp: a pointer to a function that reads the sensor temperature. + * + * Optional: + * @get_trend: a pointer to a function that reads the sensor temperature trend. + * @set_emul_temp: a pointer to a function that sets sensor emulated + * temperature. + */ +struct thermal_zone_of_device_ops { + int (*get_temp)(void *, long *); + int (*get_trend)(void *, long *); + int (*set_emul_temp)(void *, unsigned long); +}; + +/** + * struct thermal_trip - representation of a point in temperature domain + * @np: pointer to struct device_node that this trip point was created from + * @temperature: temperature value in miliCelsius + * @hysteresis: relative hysteresis in miliCelsius + * @type: trip point type + */ + +struct thermal_trip { + struct device_node *np; + unsigned long int temperature; + unsigned long int hysteresis; + enum thermal_trip_type type; +}; + /* Function declarations */ #ifdef CONFIG_THERMAL_OF struct thermal_zone_device * -thermal_zone_of_sensor_register(struct device *dev, int id, - void *data, int (*get_temp)(void *, long *), - int (*get_trend)(void *, long *)); +thermal_zone_of_sensor_register(struct device *dev, int id, void *data, + const struct thermal_zone_of_device_ops *ops); void thermal_zone_of_sensor_unregister(struct device *dev, struct thermal_zone_device *tz); #else static inline struct thermal_zone_device * -thermal_zone_of_sensor_register(struct device *dev, int id, - void *data, int (*get_temp)(void *, long *), - int (*get_trend)(void *, long *)) +thermal_zone_of_sensor_register(struct device *dev, int id, void *data, + const struct thermal_zone_of_device_ops *ops) { return NULL; } diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index ea6c9dea79e3..cfaf5a1d4bad 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -1,7 +1,7 @@ #ifndef _LINUX_TRACE_SEQ_H #define _LINUX_TRACE_SEQ_H -#include <linux/fs.h> +#include <linux/seq_buf.h> #include <asm/page.h> @@ -12,20 +12,36 @@ struct trace_seq { unsigned char buffer[PAGE_SIZE]; - unsigned int len; - unsigned int readpos; + struct seq_buf seq; int full; }; static inline void trace_seq_init(struct trace_seq *s) { - s->len = 0; - s->readpos = 0; + seq_buf_init(&s->seq, s->buffer, PAGE_SIZE); s->full = 0; } /** + * trace_seq_used - amount of actual data written to buffer + * @s: trace sequence descriptor + * + * Returns the amount of data written to the buffer. + * + * IMPORTANT! + * + * Use this instead of @s->seq.len if you need to pass the amount + * of data from the buffer to another buffer (userspace, or what not). + * The @s->seq.len on overflow is bigger than the buffer size and + * using it can cause access to undefined memory. + */ +static inline int trace_seq_used(struct trace_seq *s) +{ + return seq_buf_used(&s->seq); +} + +/** * trace_seq_buffer_ptr - return pointer to next location in buffer * @s: trace sequence descriptor * @@ -37,7 +53,19 @@ trace_seq_init(struct trace_seq *s) static inline unsigned char * trace_seq_buffer_ptr(struct trace_seq *s) { - return s->buffer + s->len; + return s->buffer + seq_buf_used(&s->seq); +} + +/** + * trace_seq_has_overflowed - return true if the trace_seq took too much + * @s: trace sequence descriptor + * + * Returns true if too much data was added to the trace_seq and it is + * now full and will not take anymore. + */ +static inline bool trace_seq_has_overflowed(struct trace_seq *s) +{ + return s->full || seq_buf_has_overflowed(&s->seq); } /* @@ -45,40 +73,37 @@ trace_seq_buffer_ptr(struct trace_seq *s) */ #ifdef CONFIG_TRACING extern __printf(2, 3) -int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); +void trace_seq_printf(struct trace_seq *s, const char *fmt, ...); extern __printf(2, 0) -int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); -extern int +void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); +extern void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt); -extern int trace_seq_puts(struct trace_seq *s, const char *str); -extern int trace_seq_putc(struct trace_seq *s, unsigned char c); -extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); -extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, +extern void trace_seq_puts(struct trace_seq *s, const char *str); +extern void trace_seq_putc(struct trace_seq *s, unsigned char c); +extern void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); +extern void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, unsigned int len); extern int trace_seq_path(struct trace_seq *s, const struct path *path); -extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, +extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, int nmaskbits); #else /* CONFIG_TRACING */ -static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) +static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) { - return 0; } -static inline int +static inline void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) { - return 0; } -static inline int +static inline void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, int nmaskbits) { - return 0; } static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) @@ -90,23 +115,19 @@ static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, { return 0; } -static inline int trace_seq_puts(struct trace_seq *s, const char *str) +static inline void trace_seq_puts(struct trace_seq *s, const char *str) { - return 0; } -static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) +static inline void trace_seq_putc(struct trace_seq *s, unsigned char c) { - return 0; } -static inline int +static inline void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) { - return 0; } -static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, +static inline void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, unsigned int len) { - return 0; } static inline int trace_seq_path(struct trace_seq *s, const struct path *path) { diff --git a/include/linux/tty.h b/include/linux/tty.h index 5171ef8f7b85..7d66ae508e5c 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -284,7 +284,7 @@ struct tty_struct { #define N_TTY_BUF_SIZE 4096 - unsigned char closing:1; + int closing; unsigned char *write_buf; int write_cnt; /* If the tty has a pending do_SAK, queue it here - akpm */ @@ -316,12 +316,10 @@ struct tty_file_private { #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ #define TTY_DEBUG 4 /* Debugging */ #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ -#define TTY_CLOSING 7 /* ->close() in progress */ #define TTY_LDISC_OPEN 11 /* Line discipline is open */ #define TTY_PTY_LOCK 16 /* pty private */ #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ #define TTY_HUPPED 18 /* Post driver->hangup() */ -#define TTY_HUPPING 21 /* ->hangup() in progress */ #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ #define TTY_WRITE_FLUSH(tty) tty_write_flush((tty)) @@ -437,14 +435,13 @@ extern int is_ignored(int sig); extern int tty_signal(int sig, struct tty_struct *tty); extern void tty_hangup(struct tty_struct *tty); extern void tty_vhangup(struct tty_struct *tty); -extern void tty_unhangup(struct file *filp); extern int tty_hung_up_p(struct file *filp); extern void do_SAK(struct tty_struct *tty); extern void __do_SAK(struct tty_struct *tty); extern void no_tty(void); extern void tty_flush_to_ldisc(struct tty_struct *tty); extern void tty_buffer_free_all(struct tty_port *port); -extern void tty_buffer_flush(struct tty_struct *tty); +extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld); extern void tty_buffer_init(struct tty_port *port); extern speed_t tty_termios_baud_rate(struct ktermios *termios); extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); @@ -498,9 +495,6 @@ extern int tty_init_termios(struct tty_struct *tty); extern int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty); -extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty); -extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); - extern struct mutex tty_mutex; extern spinlock_t tty_files_lock; @@ -562,7 +556,7 @@ extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); extern int tty_unregister_ldisc(int disc); extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); -extern void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty); +extern void tty_ldisc_release(struct tty_struct *tty); extern void tty_ldisc_init(struct tty_struct *tty); extern void tty_ldisc_deinit(struct tty_struct *tty); extern void tty_ldisc_begin(void); @@ -623,14 +617,6 @@ extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); -/* serial.c */ - -extern void serial_console_init(void); - -/* pcxx.c */ - -extern int pcxe_open(struct tty_struct *tty, struct file *filp); - /* vt.c */ extern int vt_ioctl(struct tty_struct *tty, @@ -643,11 +629,9 @@ extern long vt_compat_ioctl(struct tty_struct *tty, /* functions for preparation of BKL removal */ extern void __lockfunc tty_lock(struct tty_struct *tty); extern void __lockfunc tty_unlock(struct tty_struct *tty); -extern void __lockfunc tty_lock_pair(struct tty_struct *tty, - struct tty_struct *tty2); -extern void __lockfunc tty_unlock_pair(struct tty_struct *tty, - struct tty_struct *tty2); - +extern void __lockfunc tty_lock_slave(struct tty_struct *tty); +extern void __lockfunc tty_unlock_slave(struct tty_struct *tty); +extern void tty_set_lock_subclass(struct tty_struct *tty); /* * this shall be called only from where BTM is held (like close) * diff --git a/include/linux/uio.h b/include/linux/uio.h index 9b1581414cd4..a41e252396c0 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -31,6 +31,7 @@ struct iov_iter { size_t count; union { const struct iovec *iov; + const struct kvec *kvec; const struct bio_vec *bvec; }; unsigned long nr_segs; @@ -82,10 +83,13 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i); size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); +size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); size_t iov_iter_zero(size_t bytes, struct iov_iter *); unsigned long iov_iter_alignment(const struct iov_iter *i); void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, unsigned long nr_segs, size_t count); +void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *iov, + unsigned long nr_segs, size_t count); ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start); ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, @@ -123,9 +127,10 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) { i->count = count; } +size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); +size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); -int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, int offset, int len); int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index baa81718d985..32c0e83d6239 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -35,7 +35,7 @@ struct uio_map; struct uio_mem { const char *name; phys_addr_t addr; - unsigned long size; + resource_size_t size; int memtype; void __iomem *internal_addr; struct uio_map *map; diff --git a/include/linux/usb.h b/include/linux/usb.h index 447a7e2fc19b..f89c24a03bd9 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -637,7 +637,7 @@ static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index) #endif /* USB autosuspend and autoresume */ -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM extern void usb_enable_autosuspend(struct usb_device *udev); extern void usb_disable_autosuspend(struct usb_device *udev); diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index e14c09a45c5a..535997a6681b 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -13,11 +13,12 @@ struct ci_hdrc_platform_data { /* offset of the capability registers */ uintptr_t capoffset; unsigned power_budget; - struct usb_phy *phy; + struct phy *phy; + /* old usb_phy interface */ + struct usb_phy *usb_phy; enum usb_phy_interface phy_mode; unsigned long flags; #define CI_HDRC_REGS_SHARED BIT(0) -#define CI_HDRC_REQUIRE_TRANSCEIVER BIT(1) #define CI_HDRC_DISABLE_STREAMING BIT(3) /* * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1, diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index c330f5ef42cf..3d87defcc527 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -427,6 +427,8 @@ static inline struct usb_composite_driver *to_cdriver( * @b_vendor_code: bMS_VendorCode part of the OS string * @use_os_string: false by default, interested gadgets set it * @os_desc_config: the configuration to be used with OS descriptors + * @setup_pending: true when setup request is queued but not completed + * @os_desc_pending: true when os_desc request is queued but not completed * * One of these devices is allocated and initialized before the * associated device driver's bind() is called. @@ -488,6 +490,9 @@ struct usb_composite_dev { /* protects deactivations and delayed_status counts*/ spinlock_t lock; + + unsigned setup_pending:1; + unsigned os_desc_pending:1; }; extern int usb_string_id(struct usb_composite_dev *c); @@ -501,6 +506,8 @@ extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n); extern void composite_disconnect(struct usb_gadget *gadget); extern int composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl); +extern void composite_suspend(struct usb_gadget *gadget); +extern void composite_resume(struct usb_gadget *gadget); /* * Some systems will need runtime overrides for the product identifiers diff --git a/include/linux/usb/ehci-dbgp.h b/include/linux/usb/ehci-dbgp.h new file mode 100644 index 000000000000..7344d9e591cc --- /dev/null +++ b/include/linux/usb/ehci-dbgp.h @@ -0,0 +1,83 @@ +/* + * Standalone EHCI usb debug driver + * + * Originally written by: + * Eric W. Biederman" <ebiederm@xmission.com> and + * Yinghai Lu <yhlu.kernel@gmail.com> + * + * Changes for early/late printk and HW errata: + * Jason Wessel <jason.wessel@windriver.com> + * Copyright (C) 2009 Wind River Systems, Inc. + * + */ + +#ifndef __LINUX_USB_EHCI_DBGP_H +#define __LINUX_USB_EHCI_DBGP_H + +#include <linux/console.h> +#include <linux/types.h> + +/* Appendix C, Debug port ... intended for use with special "debug devices" + * that can help if there's no serial console. (nonstandard enumeration.) + */ +struct ehci_dbg_port { + u32 control; +#define DBGP_OWNER (1<<30) +#define DBGP_ENABLED (1<<28) +#define DBGP_DONE (1<<16) +#define DBGP_INUSE (1<<10) +#define DBGP_ERRCODE(x) (((x)>>7)&0x07) +# define DBGP_ERR_BAD 1 +# define DBGP_ERR_SIGNAL 2 +#define DBGP_ERROR (1<<6) +#define DBGP_GO (1<<5) +#define DBGP_OUT (1<<4) +#define DBGP_LEN(x) (((x)>>0)&0x0f) + u32 pids; +#define DBGP_PID_GET(x) (((x)>>16)&0xff) +#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok)) + u32 data03; + u32 data47; + u32 address; +#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep)) +}; + +#ifdef CONFIG_EARLY_PRINTK_DBGP +extern int early_dbgp_init(char *s); +extern struct console early_dbgp_console; +#endif /* CONFIG_EARLY_PRINTK_DBGP */ + +struct usb_hcd; + +#ifdef CONFIG_XEN_DOM0 +extern int xen_dbgp_reset_prep(struct usb_hcd *); +extern int xen_dbgp_external_startup(struct usb_hcd *); +#else +static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd) +{ + return 1; /* Shouldn't this be 0? */ +} + +static inline int xen_dbgp_external_startup(struct usb_hcd *hcd) +{ + return -1; +} +#endif + +#ifdef CONFIG_EARLY_PRINTK_DBGP +/* Call backs from ehci host driver to ehci debug driver */ +extern int dbgp_external_startup(struct usb_hcd *); +extern int dbgp_reset_prep(struct usb_hcd *); +#else +static inline int dbgp_reset_prep(struct usb_hcd *hcd) +{ + return xen_dbgp_reset_prep(hcd); +} + +static inline int dbgp_external_startup(struct usb_hcd *hcd) +{ + return xen_dbgp_external_startup(hcd); +} +#endif + +#endif /* __LINUX_USB_EHCI_DBGP_H */ diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index daec99af5d54..966889a20ea3 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -19,6 +19,8 @@ #ifndef __LINUX_USB_EHCI_DEF_H #define __LINUX_USB_EHCI_DEF_H +#include <linux/usb/ehci-dbgp.h> + /* EHCI register interface, corresponds to EHCI Revision 0.95 specification */ /* Section 2.2 Host Controller Capability Registers */ @@ -190,67 +192,4 @@ struct ehci_regs { #define USBMODE_EX_HC (3<<0) /* host controller mode */ }; -/* Appendix C, Debug port ... intended for use with special "debug devices" - * that can help if there's no serial console. (nonstandard enumeration.) - */ -struct ehci_dbg_port { - u32 control; -#define DBGP_OWNER (1<<30) -#define DBGP_ENABLED (1<<28) -#define DBGP_DONE (1<<16) -#define DBGP_INUSE (1<<10) -#define DBGP_ERRCODE(x) (((x)>>7)&0x07) -# define DBGP_ERR_BAD 1 -# define DBGP_ERR_SIGNAL 2 -#define DBGP_ERROR (1<<6) -#define DBGP_GO (1<<5) -#define DBGP_OUT (1<<4) -#define DBGP_LEN(x) (((x)>>0)&0x0f) - u32 pids; -#define DBGP_PID_GET(x) (((x)>>16)&0xff) -#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok)) - u32 data03; - u32 data47; - u32 address; -#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep)) -}; - -#ifdef CONFIG_EARLY_PRINTK_DBGP -#include <linux/init.h> -extern int __init early_dbgp_init(char *s); -extern struct console early_dbgp_console; -#endif /* CONFIG_EARLY_PRINTK_DBGP */ - -struct usb_hcd; - -#ifdef CONFIG_XEN_DOM0 -extern int xen_dbgp_reset_prep(struct usb_hcd *); -extern int xen_dbgp_external_startup(struct usb_hcd *); -#else -static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd) -{ - return 1; /* Shouldn't this be 0? */ -} - -static inline int xen_dbgp_external_startup(struct usb_hcd *hcd) -{ - return -1; -} -#endif - -#ifdef CONFIG_EARLY_PRINTK_DBGP -/* Call backs from ehci host driver to ehci debug driver */ -extern int dbgp_external_startup(struct usb_hcd *); -extern int dbgp_reset_prep(struct usb_hcd *hcd); -#else -static inline int dbgp_reset_prep(struct usb_hcd *hcd) -{ - return xen_dbgp_reset_prep(hcd); -} -static inline int dbgp_external_startup(struct usb_hcd *hcd) -{ - return xen_dbgp_external_startup(hcd); -} -#endif - #endif /* __LINUX_USB_EHCI_DEF_H */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 522cafe26790..70ddb3943b62 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -490,8 +490,7 @@ struct usb_gadget_ops { void (*get_config_params)(struct usb_dcd_config_params *); int (*udc_start)(struct usb_gadget *, struct usb_gadget_driver *); - int (*udc_stop)(struct usb_gadget *, - struct usb_gadget_driver *); + int (*udc_stop)(struct usb_gadget *); }; /** @@ -925,7 +924,7 @@ extern int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, void (*release)(struct device *dev)); extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); extern void usb_del_gadget_udc(struct usb_gadget *gadget); -extern int udc_attach_driver(const char *name, +extern int usb_udc_attach_driver(const char *name, struct usb_gadget_driver *driver); /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index cd96a2bc3388..086bf13307e6 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -93,7 +93,7 @@ struct usb_hcd { struct timer_list rh_timer; /* drives root-hub polling */ struct urb *status_urb; /* the current status urb */ -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM struct work_struct wakeup_work; /* for remote wakeup */ #endif @@ -379,6 +379,9 @@ struct hc_driver { int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state state); int (*find_raw_port_number)(struct usb_hcd *, int); + /* Call for power on/off the port if necessary */ + int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable); + }; static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd) @@ -625,16 +628,13 @@ extern int usb_find_interface_driver(struct usb_device *dev, extern void usb_root_hub_lost_power(struct usb_device *rhdev); extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); -#endif /* CONFIG_PM */ - -#ifdef CONFIG_PM_RUNTIME extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); #else static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) { return; } -#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index 154332b7c8c0..52661c5da690 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h @@ -9,15 +9,20 @@ #ifndef __LINUX_USB_OTG_H #define __LINUX_USB_OTG_H +#include <linux/phy/phy.h> #include <linux/usb/phy.h> struct usb_otg { u8 default_a; - struct usb_phy *phy; + struct phy *phy; + /* old usb_phy interface */ + struct usb_phy *usb_phy; struct usb_bus *host; struct usb_gadget *gadget; + enum usb_otg_state state; + /* bind/unbind the host controller */ int (*set_host)(struct usb_otg *otg, struct usb_bus *host); diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h index 353053a33f21..f499c23e6342 100644 --- a/include/linux/usb/phy.h +++ b/include/linux/usb/phy.h @@ -77,7 +77,6 @@ struct usb_phy { unsigned int flags; enum usb_phy_type type; - enum usb_otg_state state; enum usb_phy_events last_event; struct usb_otg *otg; @@ -210,6 +209,7 @@ extern void usb_put_phy(struct usb_phy *); extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x); extern int usb_bind_phy(const char *dev_name, u8 index, const char *phy_dev_name); +extern void usb_phy_set_event(struct usb_phy *x, unsigned long event); #else static inline struct usb_phy *usb_get_phy(enum usb_phy_type type) { @@ -251,6 +251,10 @@ static inline int usb_bind_phy(const char *dev_name, u8 index, { return -EOPNOTSUPP; } + +static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event) +{ +} #endif static inline int diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index d5952bb66752..9fd9e481ea98 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -145,6 +145,10 @@ struct renesas_usbhs_driver_param { int d0_rx_id; int d1_tx_id; int d1_rx_id; + int d2_tx_id; + int d2_rx_id; + int d3_tx_id; + int d3_rx_id; /* * option: diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index e95372654f09..8297e5b341d8 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -3,6 +3,7 @@ #include <linux/kref.h> #include <linux/nsproxy.h> +#include <linux/ns_common.h> #include <linux/sched.h> #include <linux/err.h> @@ -17,6 +18,10 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */ } extent[UID_GID_MAP_MAX_EXTENTS]; }; +#define USERNS_SETGROUPS_ALLOWED 1UL + +#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED + struct user_namespace { struct uid_gid_map uid_map; struct uid_gid_map gid_map; @@ -26,7 +31,8 @@ struct user_namespace { int level; kuid_t owner; kgid_t group; - unsigned int proc_inum; + struct ns_common ns; + unsigned long flags; /* Register of per-UID persistent keyrings for this namespace */ #ifdef CONFIG_PERSISTENT_KEYRINGS @@ -63,6 +69,9 @@ extern const struct seq_operations proc_projid_seq_operations; extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *); +extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *); +extern int proc_setgroups_show(struct seq_file *m, void *v); +extern bool userns_may_setgroups(const struct user_namespace *ns); #else static inline struct user_namespace *get_user_ns(struct user_namespace *ns) @@ -87,6 +96,10 @@ static inline void put_user_ns(struct user_namespace *ns) { } +static inline bool userns_may_setgroups(const struct user_namespace *ns) +{ + return true; +} #endif #endif /* _LINUX_USER_H */ diff --git a/include/linux/utsname.h b/include/linux/utsname.h index 239e27733d6c..5093f58ae192 100644 --- a/include/linux/utsname.h +++ b/include/linux/utsname.h @@ -5,6 +5,7 @@ #include <linux/sched.h> #include <linux/kref.h> #include <linux/nsproxy.h> +#include <linux/ns_common.h> #include <linux/err.h> #include <uapi/linux/utsname.h> @@ -23,7 +24,7 @@ struct uts_namespace { struct kref kref; struct new_utsname name; struct user_namespace *user_ns; - unsigned int proc_inum; + struct ns_common ns; }; extern struct uts_namespace init_uts_ns; diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 65261a7244fc..28f0e65b9a11 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -75,10 +75,13 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq); bool virtqueue_is_broken(struct virtqueue *vq); +void *virtqueue_get_avail(struct virtqueue *vq); +void *virtqueue_get_used(struct virtqueue *vq); + /** * virtio_device - representation of a device using virtio * @index: unique position on the virtio bus - * @failed: saved value for CONFIG_S_FAILED bit (for restore) + * @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore) * @config_enabled: configuration change reporting enabled * @config_change_pending: configuration change reported while disabled * @config_lock: protects configuration change reporting @@ -101,11 +104,12 @@ struct virtio_device { const struct virtio_config_ops *config; const struct vringh_config_ops *vringh_config; struct list_head vqs; - /* Note that this is a Linux set_bit-style bitmap. */ - unsigned long features[1]; + u64 features; void *priv; }; +bool virtio_device_is_legacy_only(struct virtio_device_id id); + static inline struct virtio_device *dev_to_virtio(struct device *_dev) { return container_of(_dev, struct virtio_device, dev); @@ -128,6 +132,8 @@ int virtio_device_restore(struct virtio_device *dev); * @id_table: the ids serviced by this driver. * @feature_table: an array of feature numbers supported by this driver. * @feature_table_size: number of entries in the feature table array. + * @feature_table_legacy: same as feature_table but when working in legacy mode. + * @feature_table_size_legacy: number of entries in feature table legacy array. * @probe: the function to call when a device is found. Returns 0 or -errno. * @remove: the function to call when a device is removed. * @config_changed: optional function to call when the device configuration @@ -138,6 +144,8 @@ struct virtio_driver { const struct virtio_device_id *id_table; const unsigned int *feature_table; unsigned int feature_table_size; + const unsigned int *feature_table_legacy; + unsigned int feature_table_size_legacy; int (*probe)(struct virtio_device *dev); void (*scan)(struct virtio_device *dev); void (*remove)(struct virtio_device *dev); diff --git a/include/linux/virtio_byteorder.h b/include/linux/virtio_byteorder.h new file mode 100644 index 000000000000..51865d05b267 --- /dev/null +++ b/include/linux/virtio_byteorder.h @@ -0,0 +1,59 @@ +#ifndef _LINUX_VIRTIO_BYTEORDER_H +#define _LINUX_VIRTIO_BYTEORDER_H +#include <linux/types.h> +#include <uapi/linux/virtio_types.h> + +/* + * Low-level memory accessors for handling virtio in modern little endian and in + * compatibility native endian format. + */ + +static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val) +{ + if (little_endian) + return le16_to_cpu((__force __le16)val); + else + return (__force u16)val; +} + +static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val) +{ + if (little_endian) + return (__force __virtio16)cpu_to_le16(val); + else + return (__force __virtio16)val; +} + +static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val) +{ + if (little_endian) + return le32_to_cpu((__force __le32)val); + else + return (__force u32)val; +} + +static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val) +{ + if (little_endian) + return (__force __virtio32)cpu_to_le32(val); + else + return (__force __virtio32)val; +} + +static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val) +{ + if (little_endian) + return le64_to_cpu((__force __le64)val); + else + return (__force u64)val; +} + +static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val) +{ + if (little_endian) + return (__force __virtio64)cpu_to_le64(val); + else + return (__force __virtio64)val; +} + +#endif /* _LINUX_VIRTIO_BYTEORDER */ diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 7f4ef66873ef..ca3ed78e5ec7 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -4,6 +4,7 @@ #include <linux/err.h> #include <linux/bug.h> #include <linux/virtio.h> +#include <linux/virtio_byteorder.h> #include <uapi/linux/virtio_config.h> /** @@ -18,6 +19,9 @@ * offset: the offset of the configuration field * buf: the buffer to read the field value from. * len: the length of the buffer + * @generation: config generation counter + * vdev: the virtio_device + * Returns the config generation counter * @get_status: read the status byte * vdev: the virtio_device * Returns the status byte @@ -46,6 +50,7 @@ * vdev: the virtio_device * This gives the final feature bits for the device: it can change * the dev->feature bits if it wants. + * Returns 0 on success or error status * @bus_name: return the bus name associated with the device * vdev: the virtio_device * This returns a pointer to the bus name a la pci_name from which @@ -58,6 +63,7 @@ struct virtio_config_ops { void *buf, unsigned len); void (*set)(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len); + u32 (*generation)(struct virtio_device *vdev); u8 (*get_status)(struct virtio_device *vdev); void (*set_status)(struct virtio_device *vdev, u8 status); void (*reset)(struct virtio_device *vdev); @@ -66,8 +72,8 @@ struct virtio_config_ops { vq_callback_t *callbacks[], const char *names[]); void (*del_vqs)(struct virtio_device *); - u32 (*get_features)(struct virtio_device *vdev); - void (*finalize_features)(struct virtio_device *vdev); + u64 (*get_features)(struct virtio_device *vdev); + int (*finalize_features)(struct virtio_device *vdev); const char *(*bus_name)(struct virtio_device *vdev); int (*set_vq_affinity)(struct virtqueue *vq, int cpu); }; @@ -77,23 +83,70 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, unsigned int fbit); /** - * virtio_has_feature - helper to determine if this device has this feature. + * __virtio_test_bit - helper to test feature bits. For use by transports. + * Devices should normally use virtio_has_feature, + * which includes more checks. * @vdev: the device * @fbit: the feature bit */ -static inline bool virtio_has_feature(const struct virtio_device *vdev, +static inline bool __virtio_test_bit(const struct virtio_device *vdev, + unsigned int fbit) +{ + /* Did you forget to fix assumptions on max features? */ + if (__builtin_constant_p(fbit)) + BUILD_BUG_ON(fbit >= 64); + else + BUG_ON(fbit >= 64); + + return vdev->features & BIT_ULL(fbit); +} + +/** + * __virtio_set_bit - helper to set feature bits. For use by transports. + * @vdev: the device + * @fbit: the feature bit + */ +static inline void __virtio_set_bit(struct virtio_device *vdev, + unsigned int fbit) +{ + /* Did you forget to fix assumptions on max features? */ + if (__builtin_constant_p(fbit)) + BUILD_BUG_ON(fbit >= 64); + else + BUG_ON(fbit >= 64); + + vdev->features |= BIT_ULL(fbit); +} + +/** + * __virtio_clear_bit - helper to clear feature bits. For use by transports. + * @vdev: the device + * @fbit: the feature bit + */ +static inline void __virtio_clear_bit(struct virtio_device *vdev, unsigned int fbit) { /* Did you forget to fix assumptions on max features? */ if (__builtin_constant_p(fbit)) - BUILD_BUG_ON(fbit >= 32); + BUILD_BUG_ON(fbit >= 64); else - BUG_ON(fbit >= 32); + BUG_ON(fbit >= 64); + + vdev->features &= ~BIT_ULL(fbit); +} +/** + * virtio_has_feature - helper to determine if this device has this feature. + * @vdev: the device + * @fbit: the feature bit + */ +static inline bool virtio_has_feature(const struct virtio_device *vdev, + unsigned int fbit) +{ if (fbit < VIRTIO_TRANSPORT_F_START) virtio_check_driver_offered_feature(vdev, fbit); - return test_bit(fbit, vdev->features); + return __virtio_test_bit(vdev, fbit); } static inline @@ -152,6 +205,37 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu) return 0; } +/* Memory accessors */ +static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val) +{ + return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val) +{ + return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val) +{ + return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val) +{ + return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val) +{ + return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) +{ + return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + /* Config space accessors. */ #define virtio_cread(vdev, structname, member, ptr) \ do { \ @@ -221,11 +305,33 @@ static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset) return ret; } +/* Read @count fields, @bytes each. */ +static inline void __virtio_cread_many(struct virtio_device *vdev, + unsigned int offset, + void *buf, size_t count, size_t bytes) +{ + u32 old, gen = vdev->config->generation ? + vdev->config->generation(vdev) : 0; + int i; + + do { + old = gen; + + for (i = 0; i < count; i++) + vdev->config->get(vdev, offset + bytes * i, + buf + i * bytes, bytes); + + gen = vdev->config->generation ? + vdev->config->generation(vdev) : 0; + } while (gen != old); +} + + static inline void virtio_cread_bytes(struct virtio_device *vdev, unsigned int offset, void *buf, size_t len) { - vdev->config->get(vdev, offset, buf, len); + __virtio_cread_many(vdev, offset, buf, len, 1); } static inline void virtio_cwrite8(struct virtio_device *vdev, @@ -239,12 +345,13 @@ static inline u16 virtio_cread16(struct virtio_device *vdev, { u16 ret; vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return ret; + return virtio16_to_cpu(vdev, (__force __virtio16)ret); } static inline void virtio_cwrite16(struct virtio_device *vdev, unsigned int offset, u16 val) { + val = (__force u16)cpu_to_virtio16(vdev, val); vdev->config->set(vdev, offset, &val, sizeof(val)); } @@ -253,12 +360,13 @@ static inline u32 virtio_cread32(struct virtio_device *vdev, { u32 ret; vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return ret; + return virtio32_to_cpu(vdev, (__force __virtio32)ret); } static inline void virtio_cwrite32(struct virtio_device *vdev, unsigned int offset, u32 val) { + val = (__force u32)cpu_to_virtio32(vdev, val); vdev->config->set(vdev, offset, &val, sizeof(val)); } @@ -267,12 +375,14 @@ static inline u64 virtio_cread64(struct virtio_device *vdev, { u64 ret; vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return ret; + __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret)); + return virtio64_to_cpu(vdev, (__force __virtio64)ret); } static inline void virtio_cwrite64(struct virtio_device *vdev, unsigned int offset, u64 val) { + val = (__force u64)cpu_to_virtio64(vdev, val); vdev->config->set(vdev, offset, &val, sizeof(val)); } diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 730334cdf037..9246d32dc973 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -90,6 +90,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_DEBUG_VM_VMACACHE VMACACHE_FIND_CALLS, VMACACHE_FIND_HITS, + VMACACHE_FULL_FLUSHES, #endif NR_VM_EVENT_ITEMS }; diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h index 023430e265fe..5691f752ce8f 100644 --- a/include/linux/vmw_vmci_api.h +++ b/include/linux/vmw_vmci_api.h @@ -24,6 +24,7 @@ #define VMCI_KERNEL_API_VERSION_2 2 #define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2 +struct msghdr; typedef void (vmci_device_shutdown_fn) (void *device_registration, void *user_data); @@ -75,8 +76,8 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size, ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, void *iov, size_t iov_size, int mode); ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, - void *iov, size_t iov_size, int mode); -ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, void *iov, size_t iov_size, + struct msghdr *msg, size_t iov_size, int mode); +ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, int mode); #endif /* !__VMW_VMCI_API_H__ */ diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 749cde28728b..a3fa537e717a 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -24,12 +24,16 @@ #ifndef _LINUX_VRINGH_H #define _LINUX_VRINGH_H #include <uapi/linux/virtio_ring.h> +#include <linux/virtio_byteorder.h> #include <linux/uio.h> #include <linux/slab.h> #include <asm/barrier.h> /* virtio_ring with information needed for host access. */ struct vringh { + /* Everything is little endian */ + bool little_endian; + /* Guest publishes used event idx (note: we always do). */ bool event_indices; @@ -105,7 +109,7 @@ struct vringh_kiov { #define VRINGH_IOV_ALLOCATED 0x8000000 /* Helpers for userspace vrings. */ -int vringh_init_user(struct vringh *vrh, u32 features, +int vringh_init_user(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, struct vring_desc __user *desc, struct vring_avail __user *avail, @@ -167,7 +171,7 @@ bool vringh_notify_enable_user(struct vringh *vrh); void vringh_notify_disable_user(struct vringh *vrh); /* Helpers for kernelspace vrings. */ -int vringh_init_kern(struct vringh *vrh, u32 features, +int vringh_init_kern(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, struct vring_desc *desc, struct vring_avail *avail, @@ -222,4 +226,33 @@ static inline void vringh_notify(struct vringh *vrh) vrh->notify(vrh); } +static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val) +{ + return __virtio16_to_cpu(vrh->little_endian, val); +} + +static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val) +{ + return __cpu_to_virtio16(vrh->little_endian, val); +} + +static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val) +{ + return __virtio32_to_cpu(vrh->little_endian, val); +} + +static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val) +{ + return __cpu_to_virtio32(vrh->little_endian, val); +} + +static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val) +{ + return __virtio64_to_cpu(vrh->little_endian, val); +} + +static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) +{ + return __cpu_to_virtio64(vrh->little_endian, val); +} #endif /* _LINUX_VRINGH_H */ diff --git a/include/media/davinci/vpbe.h b/include/media/davinci/vpbe.h index 57585c7004a4..4376beeb28c2 100644 --- a/include/media/davinci/vpbe.h +++ b/include/media/davinci/vpbe.h @@ -63,7 +63,7 @@ struct vpbe_output { * output basis. If per mode is needed, we may have to move this to * mode_info structure */ - enum v4l2_mbus_pixelcode if_params; + u32 if_params; }; /* encoder configuration info */ diff --git a/include/media/davinci/vpbe_display.h b/include/media/davinci/vpbe_display.h index 637749a91432..fa0247ad815f 100644 --- a/include/media/davinci/vpbe_display.h +++ b/include/media/davinci/vpbe_display.h @@ -70,8 +70,6 @@ struct vpbe_disp_buffer { /* vpbe display object structure */ struct vpbe_layer { - /* number of buffers in fbuffers */ - unsigned int numbuffers; /* Pointer to the vpbe_display */ struct vpbe_display *disp_dev; /* Pointer pointing to current v4l2_buffer */ @@ -91,10 +89,6 @@ struct vpbe_layer { /* V4l2 specific parameters */ /* Identifies video device for this layer */ struct video_device video_dev; - /* This field keeps track of type of buffer exchange mechanism user - * has selected - */ - enum v4l2_memory memory; /* Used to store pixel format */ struct v4l2_pix_format pix_fmt; enum v4l2_field buf_field; @@ -106,12 +100,8 @@ struct vpbe_layer { unsigned char window_enable; /* number of open instances of the layer */ unsigned int usrs; - /* number of users performing IO */ - unsigned int io_usrs; /* Indicates id of the field which is being displayed */ unsigned int field_id; - /* Indicates whether streaming started */ - unsigned char started; /* Identifies device object */ enum vpbe_display_device_id device_id; /* facilitation of ioctl ops lock by v4l2*/ @@ -131,17 +121,6 @@ struct vpbe_display { struct osd_state *osd_device; }; -/* File handle structure */ -struct vpbe_fh { - struct v4l2_fh fh; - /* vpbe device structure */ - struct vpbe_display *disp_dev; - /* pointer to layer object for opened device */ - struct vpbe_layer *layer; - /* Indicates whether this file handle is doing IO */ - unsigned char io_allowed; -}; - struct buf_config_params { unsigned char min_numbuffers; unsigned char numbuffers[VPBE_DISPLAY_MAX_DEVICES]; diff --git a/include/media/davinci/vpbe_venc.h b/include/media/davinci/vpbe_venc.h index 476fafc2f522..3dbd20026107 100644 --- a/include/media/davinci/vpbe_venc.h +++ b/include/media/davinci/vpbe_venc.h @@ -30,11 +30,10 @@ #define VENC_SECOND_FIELD BIT(2) struct venc_platform_data { - int (*setup_pinmux)(enum v4l2_mbus_pixelcode if_type, - int field); + int (*setup_pinmux)(u32 if_type, int field); int (*setup_clock)(enum vpbe_enc_timings_type type, unsigned int pixclock); - int (*setup_if_config)(enum v4l2_mbus_pixelcode pixcode); + int (*setup_if_config)(u32 pixcode); /* Number of LCD outputs supported */ int num_lcd_outputs; struct vpbe_if_params *lcd_if_params; diff --git a/include/media/exynos-fimc.h b/include/media/exynos-fimc.h index aa44660e2041..69bcd2a07d5c 100644 --- a/include/media/exynos-fimc.h +++ b/include/media/exynos-fimc.h @@ -101,7 +101,7 @@ struct fimc_source_info { * @flags: flags indicating which operation mode format applies to */ struct fimc_fmt { - enum v4l2_mbus_pixelcode mbus_code; + u32 mbus_code; char *name; u32 fourcc; u32 color; diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h index 78f0637ca68d..05e7ad5d2c8b 100644 --- a/include/media/lirc_dev.h +++ b/include/media/lirc_dev.h @@ -29,14 +29,13 @@ struct lirc_buffer { /* Using chunks instead of bytes pretends to simplify boundary checking * And should allow for some performance fine tunning later */ struct kfifo fifo; - u8 fifo_initialized; }; static inline void lirc_buffer_clear(struct lirc_buffer *buf) { unsigned long flags; - if (buf->fifo_initialized) { + if (kfifo_initialized(&buf->fifo)) { spin_lock_irqsave(&buf->fifo_lock, flags); kfifo_reset(&buf->fifo); spin_unlock_irqrestore(&buf->fifo_lock, flags); @@ -56,17 +55,14 @@ static inline int lirc_buffer_init(struct lirc_buffer *buf, buf->chunk_size = chunk_size; buf->size = size; ret = kfifo_alloc(&buf->fifo, size * chunk_size, GFP_KERNEL); - if (ret == 0) - buf->fifo_initialized = 1; return ret; } static inline void lirc_buffer_free(struct lirc_buffer *buf) { - if (buf->fifo_initialized) { + if (kfifo_initialized(&buf->fifo)) { kfifo_free(&buf->fifo); - buf->fifo_initialized = 0; } else WARN(1, "calling %s on an uninitialized lirc_buffer\n", __func__); diff --git a/include/media/radio-si4713.h b/include/media/radio-si4713.h deleted file mode 100644 index f6aae29c7741..000000000000 --- a/include/media/radio-si4713.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * include/media/radio-si4713.h - * - * Board related data definitions for Si4713 radio transmitter chip. - * - * Copyright (c) 2009 Nokia Corporation - * Contact: Eduardo Valentin <eduardo.valentin@nokia.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - * - */ - -#ifndef RADIO_SI4713_H -#define RADIO_SI4713_H - -#include <linux/i2c.h> - -#define SI4713_NAME "radio-si4713" - -/* - * Platform dependent definition - */ -struct radio_si4713_platform_data { - int i2c_bus; - struct i2c_board_info *subdev_board_info; -}; - -#endif /* ifndef RADIO_SI4713_H*/ diff --git a/include/media/si4713.h b/include/media/si4713.h index f98a0a7af61c..be4f58e2440b 100644 --- a/include/media/si4713.h +++ b/include/media/si4713.h @@ -23,9 +23,7 @@ * Platform dependent definition */ struct si4713_platform_data { - const char * const *supply_names; - unsigned supplies; - int gpio_reset; /* < 0 if not used */ + bool is_platform_device; }; /* diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 865246b00127..2f6261f3e570 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -296,7 +296,7 @@ const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc( * format setup. */ struct soc_camera_format_xlate { - enum v4l2_mbus_pixelcode code; + u32 code; const struct soc_mbus_pixelfmt *host_fmt; }; diff --git a/include/media/soc_mediabus.h b/include/media/soc_mediabus.h index d33f6d059692..2ff773785fb6 100644 --- a/include/media/soc_mediabus.h +++ b/include/media/soc_mediabus.h @@ -91,16 +91,16 @@ struct soc_mbus_pixelfmt { * @fmt: pixel format description */ struct soc_mbus_lookup { - enum v4l2_mbus_pixelcode code; + u32 code; struct soc_mbus_pixelfmt fmt; }; const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc( - enum v4l2_mbus_pixelcode code, + u32 code, const struct soc_mbus_lookup *lookup, int n); const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc( - enum v4l2_mbus_pixelcode code); + u32 code); s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf); s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf, u32 bytes_per_line, u32 height); diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index 48f974866f13..1cc0c5ba16b3 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -80,24 +80,9 @@ /* ------------------------------------------------------------------------- */ -/* Control helper functions */ +/* Control helper function */ -int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl, - const char * const *menu_items); -const char *v4l2_ctrl_get_name(u32 id); -const char * const *v4l2_ctrl_get_menu(u32 id); -const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len); int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def); -int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, - struct v4l2_queryctrl *qctrl, const char * const *menu_items); -#define V4L2_CTRL_MENU_IDS_END (0xffffffff) -int v4l2_ctrl_query_menu_valid_items(struct v4l2_querymenu *qmenu, const u32 *ids); - -/* Note: ctrl_classes points to an array of u32 pointers. Each u32 array is a - 0-terminated array of control IDs. Each array must be sorted low to high - and belong to the same control class. The array of u32 pointers must also - be sorted, from low class IDs to high class IDs. */ -u32 v4l2_ctrl_next(const u32 * const *ctrl_classes, u32 id); /* ------------------------------------------------------------------------- */ diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index b7cd7a665e35..911f3e542834 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -670,6 +670,31 @@ static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, */ void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv); +/** v4l2_ctrl_get_name() - Get the name of the control + * @id: The control ID. + * + * This function returns the name of the given control ID or NULL if it isn't + * a known control. + */ +const char *v4l2_ctrl_get_name(u32 id); + +/** v4l2_ctrl_get_menu() - Get the menu string array of the control + * @id: The control ID. + * + * This function returns the NULL-terminated menu string array name of the + * given control ID or NULL if it isn't a known menu control. + */ +const char * const *v4l2_ctrl_get_menu(u32 id); + +/** v4l2_ctrl_get_int_menu() - Get the integer menu array of the control + * @id: The control ID. + * @len: The size of the integer array. + * + * This function returns the integer array of the given control ID or NULL if it + * if it isn't a known integer menu control. + */ +const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len); + /** v4l2_ctrl_g_ctrl() - Helper function to get the control's value from within a driver. * @ctrl: The control. * diff --git a/include/media/v4l2-image-sizes.h b/include/media/v4l2-image-sizes.h index 10daf92ff1ab..a07d7a683bd9 100644 --- a/include/media/v4l2-image-sizes.h +++ b/include/media/v4l2-image-sizes.h @@ -25,10 +25,19 @@ #define QVGA_WIDTH 320 #define QVGA_HEIGHT 240 +#define SVGA_WIDTH 800 +#define SVGA_HEIGHT 600 + #define SXGA_WIDTH 1280 #define SXGA_HEIGHT 1024 #define VGA_WIDTH 640 #define VGA_HEIGHT 480 +#define UXGA_WIDTH 1600 +#define UXGA_HEIGHT 1200 + +#define XGA_WIDTH 1024 +#define XGA_HEIGHT 768 + #endif /* _IMAGE_SIZES_H */ diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h index 395c4a95a42a..38d960d8dccd 100644 --- a/include/media/v4l2-mediabus.h +++ b/include/media/v4l2-mediabus.h @@ -94,16 +94,20 @@ static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt, pix_fmt->height = mbus_fmt->height; pix_fmt->field = mbus_fmt->field; pix_fmt->colorspace = mbus_fmt->colorspace; + pix_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc; + pix_fmt->quantization = mbus_fmt->quantization; } static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt, const struct v4l2_pix_format *pix_fmt, - enum v4l2_mbus_pixelcode code) + u32 code) { mbus_fmt->width = pix_fmt->width; mbus_fmt->height = pix_fmt->height; mbus_fmt->field = pix_fmt->field; mbus_fmt->colorspace = pix_fmt->colorspace; + mbus_fmt->ycbcr_enc = pix_fmt->ycbcr_enc; + mbus_fmt->quantization = pix_fmt->quantization; mbus_fmt->code = code; } diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index d7465725773d..5860292d42eb 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -341,7 +341,7 @@ struct v4l2_subdev_video_ops { int (*query_dv_timings)(struct v4l2_subdev *sd, struct v4l2_dv_timings *timings); int (*enum_mbus_fmt)(struct v4l2_subdev *sd, unsigned int index, - enum v4l2_mbus_pixelcode *code); + u32 *code); int (*enum_mbus_fsizes)(struct v4l2_subdev *sd, struct v4l2_frmsizeenum *fsize); int (*g_mbus_fmt)(struct v4l2_subdev *sd, diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 6ef2d01197da..bd2cec2d6c3d 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -82,19 +82,23 @@ struct vb2_threadio_data; * unmap_dmabuf. */ struct vb2_mem_ops { - void *(*alloc)(void *alloc_ctx, unsigned long size, gfp_t gfp_flags); + void *(*alloc)(void *alloc_ctx, unsigned long size, + enum dma_data_direction dma_dir, + gfp_t gfp_flags); void (*put)(void *buf_priv); struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags); void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr, - unsigned long size, int write); + unsigned long size, + enum dma_data_direction dma_dir); void (*put_userptr)(void *buf_priv); void (*prepare)(void *buf_priv); void (*finish)(void *buf_priv); void *(*attach_dmabuf)(void *alloc_ctx, struct dma_buf *dbuf, - unsigned long size, int write); + unsigned long size, + enum dma_data_direction dma_dir); void (*detach_dmabuf)(void *buf_priv); int (*map_dmabuf)(void *buf_priv); void (*unmap_dmabuf)(void *buf_priv); @@ -270,22 +274,24 @@ struct vb2_buffer { * queue setup from completing successfully; optional. * @buf_prepare: called every time the buffer is queued from userspace * and from the VIDIOC_PREPARE_BUF ioctl; drivers may - * perform any initialization required before each hardware - * operation in this callback; drivers that support - * VIDIOC_CREATE_BUFS must also validate the buffer size; - * if an error is returned, the buffer will not be queued - * in driver; optional. + * perform any initialization required before each + * hardware operation in this callback; drivers can + * access/modify the buffer here as it is still synced for + * the CPU; drivers that support VIDIOC_CREATE_BUFS must + * also validate the buffer size; if an error is returned, + * the buffer will not be queued in driver; optional. * @buf_finish: called before every dequeue of the buffer back to - * userspace; drivers may perform any operations required - * before userspace accesses the buffer; optional. The - * buffer state can be one of the following: DONE and - * ERROR occur while streaming is in progress, and the - * PREPARED state occurs when the queue has been canceled - * and all pending buffers are being returned to their - * default DEQUEUED state. Typically you only have to do - * something if the state is VB2_BUF_STATE_DONE, since in - * all other cases the buffer contents will be ignored - * anyway. + * userspace; the buffer is synced for the CPU, so drivers + * can access/modify the buffer contents; drivers may + * perform any operations required before userspace + * accesses the buffer; optional. The buffer state can be + * one of the following: DONE and ERROR occur while + * streaming is in progress, and the PREPARED state occurs + * when the queue has been canceled and all pending + * buffers are being returned to their default DEQUEUED + * state. Typically you only have to do something if the + * state is VB2_BUF_STATE_DONE, since in all other cases + * the buffer contents will be ignored anyway. * @buf_cleanup: called once before the buffer is freed; drivers may * perform any additional cleanup; optional. * @start_streaming: called once to enter 'streaming' state; the driver may diff --git a/include/media/videobuf2-dma-sg.h b/include/media/videobuf2-dma-sg.h index 7b89852779af..14ce3068b642 100644 --- a/include/media/videobuf2-dma-sg.h +++ b/include/media/videobuf2-dma-sg.h @@ -21,6 +21,9 @@ static inline struct sg_table *vb2_dma_sg_plane_desc( return (struct sg_table *)vb2_plane_cookie(vb, plane_no); } +void *vb2_dma_sg_init_ctx(struct device *dev); +void vb2_dma_sg_cleanup_ctx(void *alloc_ctx); + extern const struct vb2_mem_ops vb2_dma_sg_memops; #endif diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index d184df1d0d41..dc03d77ad23b 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h @@ -372,12 +372,12 @@ lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset) return skb->len + uncomp_header - ret; } -typedef int (*skb_delivery_cb)(struct sk_buff *skb, struct net_device *dev); - -int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, - const u8 *saddr, const u8 saddr_type, const u8 saddr_len, - const u8 *daddr, const u8 daddr_type, const u8 daddr_len, - u8 iphc0, u8 iphc1, skb_delivery_cb skb_deliver); +int +lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev, + const u8 *saddr, const u8 saddr_type, + const u8 saddr_len, const u8 *daddr, + const u8 daddr_type, const u8 daddr_len, + u8 iphc0, u8 iphc1); int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *_daddr, const void *_saddr, unsigned int len); diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h index 085940f7eeec..7d38e2ffd256 100644 --- a/include/net/af_ieee802154.h +++ b/include/net/af_ieee802154.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Sergey Lapin <slapin@ossfans.org> * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index 428277869400..0d87674fb775 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -103,14 +103,14 @@ struct vsock_transport { int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags); int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *, - struct iovec *, size_t len); + struct msghdr *, size_t len); bool (*dgram_allow)(u32 cid, u32 port); /* STREAM. */ /* TODO: stream_bind() */ - ssize_t (*stream_dequeue)(struct vsock_sock *, struct iovec *, + ssize_t (*stream_dequeue)(struct vsock_sock *, struct msghdr *, size_t len, int flags); - ssize_t (*stream_enqueue)(struct vsock_sock *, struct iovec *, + ssize_t (*stream_enqueue)(struct vsock_sock *, struct msghdr *, size_t len); s64 (*stream_has_data)(struct vsock_sock *); s64 (*stream_has_space)(struct vsock_sock *); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 6e8f24967308..40129b3838b2 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -129,6 +129,15 @@ enum { * during the hdev->setup vendor callback. */ HCI_QUIRK_INVALID_BDADDR, + + /* When this quirk is set, the duplicate filtering during + * scanning is based on Bluetooth devices addresses. To allow + * RSSI based updates, restart scanning if needed. + * + * This quirk can be set before hci_register_dev is called or + * during the hdev->setup vendor callback. + */ + HCI_QUIRK_STRICT_DUPLICATE_FILTER, }; /* HCI device flags */ @@ -154,6 +163,7 @@ enum { enum { HCI_DUT_MODE, HCI_FORCE_SC, + HCI_FORCE_LESC, HCI_FORCE_STATIC_ADDR, }; @@ -265,6 +275,7 @@ enum { /* Low Energy links do not have defined link type. Use invented one */ #define LE_LINK 0x80 #define AMP_LINK 0x81 +#define INVALID_LINK 0xff /* LMP features */ #define LMP_3SLOT 0x01 @@ -332,6 +343,7 @@ enum { #define HCI_LE_ENCRYPTION 0x01 #define HCI_LE_CONN_PARAM_REQ_PROC 0x02 #define HCI_LE_PING 0x10 +#define HCI_LE_EXT_SCAN_POLICY 0x80 /* Connection modes */ #define HCI_CM_ACTIVE 0x0000 @@ -401,6 +413,7 @@ enum { /* The core spec defines 127 as the "not available" value */ #define HCI_TX_POWER_INVALID 127 +#define HCI_RSSI_INVALID 127 #define HCI_ROLE_MASTER 0x00 #define HCI_ROLE_SLAVE 0x01 @@ -629,7 +642,7 @@ struct hci_cp_user_passkey_reply { struct hci_cp_remote_oob_data_reply { bdaddr_t bdaddr; __u8 hash[16]; - __u8 randomizer[16]; + __u8 rand[16]; } __packed; #define HCI_OP_REMOTE_OOB_DATA_NEG_REPLY 0x0433 @@ -721,9 +734,9 @@ struct hci_rp_set_csb { struct hci_cp_remote_oob_ext_data_reply { bdaddr_t bdaddr; __u8 hash192[16]; - __u8 randomizer192[16]; + __u8 rand192[16]; __u8 hash256[16]; - __u8 randomizer256[16]; + __u8 rand256[16]; } __packed; #define HCI_OP_SNIFF_MODE 0x0803 @@ -930,7 +943,7 @@ struct hci_cp_write_ssp_mode { struct hci_rp_read_local_oob_data { __u8 status; __u8 hash[16]; - __u8 randomizer[16]; + __u8 rand[16]; } __packed; #define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58 @@ -1014,9 +1027,9 @@ struct hci_cp_write_sc_support { struct hci_rp_read_local_oob_ext_data { __u8 status; __u8 hash192[16]; - __u8 randomizer192[16]; + __u8 rand192[16]; __u8 hash256[16]; - __u8 randomizer256[16]; + __u8 rand256[16]; } __packed; #define HCI_OP_READ_LOCAL_VERSION 0x1001 @@ -1463,6 +1476,11 @@ struct hci_ev_cmd_status { __le16 opcode; } __packed; +#define HCI_EV_HARDWARE_ERROR 0x10 +struct hci_ev_hardware_error { + __u8 code; +} __packed; + #define HCI_EV_ROLE_CHANGE 0x12 struct hci_ev_role_change { __u8 status; @@ -1734,6 +1752,25 @@ struct hci_ev_le_conn_complete { __u8 clk_accurancy; } __packed; +/* Advertising report event types */ +#define LE_ADV_IND 0x00 +#define LE_ADV_DIRECT_IND 0x01 +#define LE_ADV_SCAN_IND 0x02 +#define LE_ADV_NONCONN_IND 0x03 +#define LE_ADV_SCAN_RSP 0x04 + +#define ADDR_LE_DEV_PUBLIC 0x00 +#define ADDR_LE_DEV_RANDOM 0x01 + +#define HCI_EV_LE_ADVERTISING_REPORT 0x02 +struct hci_ev_le_advertising_info { + __u8 evt_type; + __u8 bdaddr_type; + bdaddr_t bdaddr; + __u8 length; + __u8 data[0]; +} __packed; + #define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03 struct hci_ev_le_conn_update_complete { __u8 status; @@ -1759,23 +1796,14 @@ struct hci_ev_le_remote_conn_param_req { __le16 timeout; } __packed; -/* Advertising report event types */ -#define LE_ADV_IND 0x00 -#define LE_ADV_DIRECT_IND 0x01 -#define LE_ADV_SCAN_IND 0x02 -#define LE_ADV_NONCONN_IND 0x03 -#define LE_ADV_SCAN_RSP 0x04 - -#define ADDR_LE_DEV_PUBLIC 0x00 -#define ADDR_LE_DEV_RANDOM 0x01 - -#define HCI_EV_LE_ADVERTISING_REPORT 0x02 -struct hci_ev_le_advertising_info { +#define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B +struct hci_ev_le_direct_adv_info { __u8 evt_type; __u8 bdaddr_type; bdaddr_t bdaddr; - __u8 length; - __u8 data[0]; + __u8 direct_addr_type; + bdaddr_t direct_addr; + __s8 rssi; } __packed; /* Internal events generated by Bluetooth stack */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 37ff1aef0845..3c7827005c25 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -75,6 +75,10 @@ struct discovery_state { u32 last_adv_flags; u8 last_adv_data[HCI_MAX_AD_LENGTH]; u8 last_adv_data_len; + bool report_invalid_rssi; + s8 rssi; + u16 uuid_count; + u8 (*uuids)[16]; }; struct hci_conn_hash { @@ -108,6 +112,7 @@ struct smp_csrk { struct smp_ltk { struct list_head list; + struct rcu_head rcu; bdaddr_t bdaddr; u8 bdaddr_type; u8 authenticated; @@ -120,6 +125,7 @@ struct smp_ltk { struct smp_irk { struct list_head list; + struct rcu_head rcu; bdaddr_t rpa; bdaddr_t bdaddr; u8 addr_type; @@ -128,6 +134,7 @@ struct smp_irk { struct link_key { struct list_head list; + struct rcu_head rcu; bdaddr_t bdaddr; u8 type; u8 val[HCI_LINK_KEY_SIZE]; @@ -137,10 +144,11 @@ struct link_key { struct oob_data { struct list_head list; bdaddr_t bdaddr; + u8 bdaddr_type; u8 hash192[16]; - u8 randomizer192[16]; + u8 rand192[16]; u8 hash256[16]; - u8 randomizer256[16]; + u8 rand256[16]; }; #define HCI_MAX_SHORT_NAME_LENGTH 10 @@ -303,6 +311,7 @@ struct hci_dev { __u32 req_result; void *smp_data; + void *smp_bredr_data; struct discovery_state discovery; struct hci_conn_hash conn_hash; @@ -398,6 +407,8 @@ struct hci_conn { __u16 le_conn_interval; __u16 le_conn_latency; __u16 le_supv_timeout; + __u8 le_adv_data[HCI_MAX_AD_LENGTH]; + __u8 le_adv_data_len; __s8 rssi; __s8 tx_power; __s8 max_tx_power; @@ -496,6 +507,17 @@ static inline void discovery_init(struct hci_dev *hdev) INIT_LIST_HEAD(&hdev->discovery.all); INIT_LIST_HEAD(&hdev->discovery.unknown); INIT_LIST_HEAD(&hdev->discovery.resolve); + hdev->discovery.report_invalid_rssi = true; + hdev->discovery.rssi = HCI_RSSI_INVALID; +} + +static inline void hci_discovery_filter_clear(struct hci_dev *hdev) +{ + hdev->discovery.report_invalid_rssi = true; + hdev->discovery.rssi = HCI_RSSI_INVALID; + hdev->discovery.uuid_count = 0; + kfree(hdev->discovery.uuids); + hdev->discovery.uuids = NULL; } bool hci_discovery_active(struct hci_dev *hdev); @@ -553,6 +575,8 @@ enum { HCI_CONN_STK_ENCRYPT, HCI_CONN_AUTH_INITIATOR, HCI_CONN_DROP, + HCI_CONN_PARAM_REMOVAL_PEND, + HCI_CONN_NEW_LINK_KEY, }; static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) @@ -643,6 +667,26 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev) return c->acl_num + c->amp_num + c->sco_num + c->le_num; } +static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + __u8 type = INVALID_LINK; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->handle == handle) { + type = c->type; + break; + } + } + + rcu_read_unlock(); + + return type; +} + static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, __u16 handle) { @@ -853,6 +897,7 @@ int hci_register_dev(struct hci_dev *hdev); void hci_unregister_dev(struct hci_dev *hdev); int hci_suspend_dev(struct hci_dev *hdev); int hci_resume_dev(struct hci_dev *hdev); +int hci_reset_dev(struct hci_dev *hdev); int hci_dev_open(__u16 dev); int hci_dev_close(__u16 dev); int hci_dev_reset(__u16 dev); @@ -894,13 +939,11 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len, bool *persistent); -struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand, - u8 role); struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, u8 authenticated, u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand); -struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 addr_type, u8 role); +struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, u8 role); int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); void hci_smp_ltks_clear(struct hci_dev *hdev); int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); @@ -915,13 +958,12 @@ void hci_smp_irks_clear(struct hci_dev *hdev); void hci_remote_oob_data_clear(struct hci_dev *hdev); struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, - bdaddr_t *bdaddr); + bdaddr_t *bdaddr, u8 bdaddr_type); int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 *hash, u8 *randomizer); -int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 *hash192, u8 *randomizer192, - u8 *hash256, u8 *randomizer256); -int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr); + u8 bdaddr_type, u8 *hash192, u8 *rand192, + u8 *hash256, u8 *rand256); +int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 bdaddr_type); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); @@ -972,6 +1014,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \ !test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) +#define bredr_sc_enabled(dev) ((lmp_sc_capable(dev) || \ + test_bit(HCI_FORCE_SC, &(dev)->dbg_flags)) && \ + test_bit(HCI_SC_ENABLED, &(dev)->dev_flags)) /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 @@ -1310,9 +1355,8 @@ int mgmt_update_adv_data(struct hci_dev *hdev); void mgmt_discoverable_timeout(struct hci_dev *hdev); void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent); -void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u32 flags, u8 *name, u8 name_len, - u8 *dev_class); +void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, + u32 flags, u8 *name, u8 name_len); void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 reason, bool mgmt_connected); @@ -1349,8 +1393,8 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, u8 status); void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192, - u8 *randomizer192, u8 *hash256, - u8 *randomizer256, u8 status); + u8 *rand192, u8 *hash256, u8 *rand256, + u8 status); void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len); diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index ead99f032f7a..d1bb342d083f 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -28,6 +28,7 @@ #define __L2CAP_H #include <asm/unaligned.h> +#include <linux/atomic.h> /* L2CAP defaults */ #define L2CAP_DEFAULT_MTU 672 @@ -140,6 +141,7 @@ struct l2cap_conninfo { #define L2CAP_FC_ATT 0x10 #define L2CAP_FC_SIG_LE 0x20 #define L2CAP_FC_SMP_LE 0x40 +#define L2CAP_FC_SMP_BREDR 0x80 /* L2CAP Control Field bit masks */ #define L2CAP_CTRL_SAR 0xC000 @@ -254,6 +256,7 @@ struct l2cap_conn_rsp { #define L2CAP_CID_ATT 0x0004 #define L2CAP_CID_LE_SIGNALING 0x0005 #define L2CAP_CID_SMP 0x0006 +#define L2CAP_CID_SMP_BREDR 0x0007 #define L2CAP_CID_DYN_START 0x0040 #define L2CAP_CID_DYN_END 0xffff #define L2CAP_CID_LE_DYN_END 0x007f @@ -481,6 +484,7 @@ struct l2cap_chan { struct hci_conn *hs_hcon; struct hci_chan *hs_hchan; struct kref kref; + atomic_t nesting; __u8 state; @@ -604,10 +608,6 @@ struct l2cap_ops { struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb); - int (*memcpy_fromiovec) (struct l2cap_chan *chan, - unsigned char *kdata, - struct iovec *iov, - int len); }; struct l2cap_conn { @@ -617,8 +617,8 @@ struct l2cap_conn { unsigned int mtu; __u32 feat_mask; - __u8 fixed_chan_mask; - bool hs_enabled; + __u8 remote_fixed_chan; + __u8 local_fixed_chan; __u8 info_state; __u8 info_ident; @@ -713,6 +713,17 @@ enum { FLAG_HOLD_HCI_CONN, }; +/* Lock nesting levels for L2CAP channels. We need these because lockdep + * otherwise considers all channels equal and will e.g. complain about a + * connection oriented channel triggering SMP procedures or a listening + * channel creating and locking a child channel. + */ +enum { + L2CAP_NESTING_SMP, + L2CAP_NESTING_NORMAL, + L2CAP_NESTING_PARENT, +}; + enum { L2CAP_TX_STATE_XMIT, L2CAP_TX_STATE_WAIT_F, @@ -778,7 +789,7 @@ void l2cap_chan_put(struct l2cap_chan *c); static inline void l2cap_chan_lock(struct l2cap_chan *chan) { - mutex_lock(&chan->lock); + mutex_lock_nested(&chan->lock, atomic_read(&chan->nesting)); } static inline void l2cap_chan_unlock(struct l2cap_chan *chan) @@ -890,31 +901,6 @@ static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan) return 0; } -static inline int l2cap_chan_no_memcpy_fromiovec(struct l2cap_chan *chan, - unsigned char *kdata, - struct iovec *iov, - int len) -{ - /* Following is safe since for compiler definitions of kvec and - * iovec are identical, yielding the same in-core layout and alignment - */ - struct kvec *vec = (struct kvec *)iov; - - while (len > 0) { - if (vec->iov_len) { - int copy = min_t(unsigned int, len, vec->iov_len); - memcpy(kdata, vec->iov_base, copy); - len -= copy; - kdata += copy; - vec->iov_base += copy; - vec->iov_len -= copy; - } - vec++; - } - - return 0; -} - extern bool disable_ertm; int l2cap_init_sockets(void); diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 414cd2f9a437..95c34d5180fa 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -184,6 +184,9 @@ struct mgmt_cp_load_link_keys { #define MGMT_LTK_UNAUTHENTICATED 0x00 #define MGMT_LTK_AUTHENTICATED 0x01 +#define MGMT_LTK_P256_UNAUTH 0x02 +#define MGMT_LTK_P256_AUTH 0x03 +#define MGMT_LTK_P256_DEBUG 0x04 struct mgmt_ltk_info { struct mgmt_addr_info addr; @@ -299,28 +302,28 @@ struct mgmt_cp_user_passkey_neg_reply { #define MGMT_READ_LOCAL_OOB_DATA_SIZE 0 struct mgmt_rp_read_local_oob_data { __u8 hash[16]; - __u8 randomizer[16]; + __u8 rand[16]; } __packed; struct mgmt_rp_read_local_oob_ext_data { __u8 hash192[16]; - __u8 randomizer192[16]; + __u8 rand192[16]; __u8 hash256[16]; - __u8 randomizer256[16]; + __u8 rand256[16]; } __packed; #define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0021 struct mgmt_cp_add_remote_oob_data { struct mgmt_addr_info addr; __u8 hash[16]; - __u8 randomizer[16]; + __u8 rand[16]; } __packed; #define MGMT_ADD_REMOTE_OOB_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 32) struct mgmt_cp_add_remote_oob_ext_data { struct mgmt_addr_info addr; __u8 hash192[16]; - __u8 randomizer192[16]; + __u8 rand192[16]; __u8 hash256[16]; - __u8 randomizer256[16]; + __u8 rand256[16]; } __packed; #define MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 64) @@ -495,6 +498,15 @@ struct mgmt_cp_set_public_address { } __packed; #define MGMT_SET_PUBLIC_ADDRESS_SIZE 6 +#define MGMT_OP_START_SERVICE_DISCOVERY 0x003A +struct mgmt_cp_start_service_discovery { + __u8 type; + __s8 rssi; + __le16 uuid_count; + __u8 uuids[0][16]; +} __packed; +#define MGMT_START_SERVICE_DISCOVERY_SIZE 4 + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h new file mode 100644 index 000000000000..e01d903633ef --- /dev/null +++ b/include/net/bond_3ad.h @@ -0,0 +1,283 @@ +/* + * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + */ + +#ifndef _NET_BOND_3AD_H +#define _NET_BOND_3AD_H + +#include <asm/byteorder.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/if_ether.h> + +/* General definitions */ +#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW) +#define AD_TIMER_INTERVAL 100 /*msec*/ + +#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02} + +#define AD_LACP_SLOW 0 +#define AD_LACP_FAST 1 + +typedef struct mac_addr { + u8 mac_addr_value[ETH_ALEN]; +} __packed mac_addr_t; + +enum { + BOND_AD_STABLE = 0, + BOND_AD_BANDWIDTH = 1, + BOND_AD_COUNT = 2, +}; + +/* rx machine states(43.4.11 in the 802.3ad standard) */ +typedef enum { + AD_RX_DUMMY, + AD_RX_INITIALIZE, /* rx Machine */ + AD_RX_PORT_DISABLED, /* rx Machine */ + AD_RX_LACP_DISABLED, /* rx Machine */ + AD_RX_EXPIRED, /* rx Machine */ + AD_RX_DEFAULTED, /* rx Machine */ + AD_RX_CURRENT /* rx Machine */ +} rx_states_t; + +/* periodic machine states(43.4.12 in the 802.3ad standard) */ +typedef enum { + AD_PERIODIC_DUMMY, + AD_NO_PERIODIC, /* periodic machine */ + AD_FAST_PERIODIC, /* periodic machine */ + AD_SLOW_PERIODIC, /* periodic machine */ + AD_PERIODIC_TX /* periodic machine */ +} periodic_states_t; + +/* mux machine states(43.4.13 in the 802.3ad standard) */ +typedef enum { + AD_MUX_DUMMY, + AD_MUX_DETACHED, /* mux machine */ + AD_MUX_WAITING, /* mux machine */ + AD_MUX_ATTACHED, /* mux machine */ + AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */ +} mux_states_t; + +/* tx machine states(43.4.15 in the 802.3ad standard) */ +typedef enum { + AD_TX_DUMMY, + AD_TRANSMIT /* tx Machine */ +} tx_states_t; + +/* rx indication types */ +typedef enum { + AD_TYPE_LACPDU = 1, /* type lacpdu */ + AD_TYPE_MARKER /* type marker */ +} pdu_type_t; + +/* rx marker indication types */ +typedef enum { + AD_MARKER_INFORMATION_SUBTYPE = 1, /* marker imformation subtype */ + AD_MARKER_RESPONSE_SUBTYPE /* marker response subtype */ +} bond_marker_subtype_t; + +/* timers types(43.4.9 in the 802.3ad standard) */ +typedef enum { + AD_CURRENT_WHILE_TIMER, + AD_ACTOR_CHURN_TIMER, + AD_PERIODIC_TIMER, + AD_PARTNER_CHURN_TIMER, + AD_WAIT_WHILE_TIMER +} ad_timers_t; + +#pragma pack(1) + +/* Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) */ +typedef struct lacpdu { + u8 subtype; /* = LACP(= 0x01) */ + u8 version_number; + u8 tlv_type_actor_info; /* = actor information(type/length/value) */ + u8 actor_information_length; /* = 20 */ + __be16 actor_system_priority; + struct mac_addr actor_system; + __be16 actor_key; + __be16 actor_port_priority; + __be16 actor_port; + u8 actor_state; + u8 reserved_3_1[3]; /* = 0 */ + u8 tlv_type_partner_info; /* = partner information */ + u8 partner_information_length; /* = 20 */ + __be16 partner_system_priority; + struct mac_addr partner_system; + __be16 partner_key; + __be16 partner_port_priority; + __be16 partner_port; + u8 partner_state; + u8 reserved_3_2[3]; /* = 0 */ + u8 tlv_type_collector_info; /* = collector information */ + u8 collector_information_length;/* = 16 */ + __be16 collector_max_delay; + u8 reserved_12[12]; + u8 tlv_type_terminator; /* = terminator */ + u8 terminator_length; /* = 0 */ + u8 reserved_50[50]; /* = 0 */ +} __packed lacpdu_t; + +typedef struct lacpdu_header { + struct ethhdr hdr; + struct lacpdu lacpdu; +} __packed lacpdu_header_t; + +/* Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) */ +typedef struct bond_marker { + u8 subtype; /* = 0x02 (marker PDU) */ + u8 version_number; /* = 0x01 */ + u8 tlv_type; /* = 0x01 (marker information) */ + /* = 0x02 (marker response information) */ + u8 marker_length; /* = 0x16 */ + u16 requester_port; /* The number assigned to the port by the requester */ + struct mac_addr requester_system; /* The requester's system id */ + u32 requester_transaction_id; /* The transaction id allocated by the requester, */ + u16 pad; /* = 0 */ + u8 tlv_type_terminator; /* = 0x00 */ + u8 terminator_length; /* = 0x00 */ + u8 reserved_90[90]; /* = 0 */ +} __packed bond_marker_t; + +typedef struct bond_marker_header { + struct ethhdr hdr; + struct bond_marker marker; +} __packed bond_marker_header_t; + +#pragma pack() + +struct slave; +struct bonding; +struct ad_info; +struct port; + +#ifdef __ia64__ +#pragma pack(8) +#endif + +/* aggregator structure(43.4.5 in the 802.3ad standard) */ +typedef struct aggregator { + struct mac_addr aggregator_mac_address; + u16 aggregator_identifier; + bool is_individual; + u16 actor_admin_aggregator_key; + u16 actor_oper_aggregator_key; + struct mac_addr partner_system; + u16 partner_system_priority; + u16 partner_oper_aggregator_key; + u16 receive_state; /* BOOLEAN */ + u16 transmit_state; /* BOOLEAN */ + struct port *lag_ports; + /* ****** PRIVATE PARAMETERS ****** */ + struct slave *slave; /* pointer to the bond slave that this aggregator belongs to */ + u16 is_active; /* BOOLEAN. Indicates if this aggregator is active */ + u16 num_of_ports; +} aggregator_t; + +struct port_params { + struct mac_addr system; + u16 system_priority; + u16 key; + u16 port_number; + u16 port_priority; + u16 port_state; +}; + +/* port structure(43.4.6 in the 802.3ad standard) */ +typedef struct port { + u16 actor_port_number; + u16 actor_port_priority; + struct mac_addr actor_system; /* This parameter is added here although it is not specified in the standard, just for simplification */ + u16 actor_system_priority; /* This parameter is added here although it is not specified in the standard, just for simplification */ + u16 actor_port_aggregator_identifier; + bool ntt; + u16 actor_admin_port_key; + u16 actor_oper_port_key; + u8 actor_admin_port_state; + u8 actor_oper_port_state; + + struct port_params partner_admin; + struct port_params partner_oper; + + bool is_enabled; + + /* ****** PRIVATE PARAMETERS ****** */ + u16 sm_vars; /* all state machines variables for this port */ + rx_states_t sm_rx_state; /* state machine rx state */ + u16 sm_rx_timer_counter; /* state machine rx timer counter */ + periodic_states_t sm_periodic_state; /* state machine periodic state */ + u16 sm_periodic_timer_counter; /* state machine periodic timer counter */ + mux_states_t sm_mux_state; /* state machine mux state */ + u16 sm_mux_timer_counter; /* state machine mux timer counter */ + tx_states_t sm_tx_state; /* state machine tx state */ + u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */ + struct slave *slave; /* pointer to the bond slave that this port belongs to */ + struct aggregator *aggregator; /* pointer to an aggregator that this port related to */ + struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */ + u32 transaction_id; /* continuous number for identification of Marker PDU's; */ + struct lacpdu lacpdu; /* the lacpdu that will be sent for this port */ +} port_t; + +/* system structure */ +struct ad_system { + u16 sys_priority; + struct mac_addr sys_mac_addr; +}; + +#ifdef __ia64__ +#pragma pack() +#endif + +/* ========== AD Exported structures to the main bonding code ========== */ +#define BOND_AD_INFO(bond) ((bond)->ad_info) +#define SLAVE_AD_INFO(slave) ((slave)->ad_info) + +struct ad_bond_info { + struct ad_system system; /* 802.3ad system structure */ + u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */ + u16 aggregator_identifier; +}; + +struct ad_slave_info { + struct aggregator aggregator; /* 802.3ad aggregator structure */ + struct port port; /* 802.3ad port structure */ + u16 id; +}; + +/* ========== AD Exported functions to the main bonding code ========== */ +void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution); +void bond_3ad_bind_slave(struct slave *slave); +void bond_3ad_unbind_slave(struct slave *slave); +void bond_3ad_state_machine_handler(struct work_struct *); +void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout); +void bond_3ad_adapter_speed_changed(struct slave *slave); +void bond_3ad_adapter_duplex_changed(struct slave *slave); +void bond_3ad_handle_link_change(struct slave *slave, char link); +int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); +int __bond_3ad_get_active_agg_info(struct bonding *bond, + struct ad_info *ad_info); +int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); +int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, + struct slave *slave); +int bond_3ad_set_carrier(struct bonding *bond); +void bond_3ad_update_lacp_rate(struct bonding *bond); +#endif /* _NET_BOND_3AD_H */ + diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h new file mode 100644 index 000000000000..313a8d3b3069 --- /dev/null +++ b/include/net/bond_alb.h @@ -0,0 +1,181 @@ +/* + * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + */ + +#ifndef _NET_BOND_ALB_H +#define _NET_BOND_ALB_H + +#include <linux/if_ether.h> + +struct bonding; +struct slave; + +#define BOND_ALB_INFO(bond) ((bond)->alb_info) +#define SLAVE_TLB_INFO(slave) ((slave)->tlb_info) + +#define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */ +#define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing. + * Used for division - never set + * to zero !!! + */ +#define BOND_ALB_DEFAULT_LP_INTERVAL 1 +#define BOND_ALB_LP_INTERVAL(bond) (bond->params.lp_interval) /* In seconds, periodic send of + * learning packets to the switch + */ + +#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \ + * ALB_TIMER_TICKS_PER_SEC) + +#define BOND_ALB_LP_TICKS(bond) (BOND_ALB_LP_INTERVAL(bond) \ + * ALB_TIMER_TICKS_PER_SEC) + +#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table. + * Note that this value MUST NOT be smaller + * because the key hash table is BYTE wide ! + */ + + +#define TLB_NULL_INDEX 0xffffffff + +/* rlb defs */ +#define RLB_HASH_TABLE_SIZE 256 +#define RLB_NULL_INDEX 0xffffffff +#define RLB_UPDATE_DELAY (2*ALB_TIMER_TICKS_PER_SEC) /* 2 seconds */ +#define RLB_ARP_BURST_SIZE 2 +#define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb + * rebalance interval (5 min). + */ +/* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is + * promiscuous after failover + */ +#define RLB_PROMISC_TIMEOUT (10*ALB_TIMER_TICKS_PER_SEC) + + +struct tlb_client_info { + struct slave *tx_slave; /* A pointer to slave used for transmiting + * packets to a Client that the Hash function + * gave this entry index. + */ + u32 tx_bytes; /* Each Client accumulates the BytesTx that + * were transmitted to it, and after each + * CallBack the LoadHistory is divided + * by the balance interval + */ + u32 load_history; /* This field contains the amount of Bytes + * that were transmitted to this client by + * the server on the previous balance + * interval in Bps. + */ + u32 next; /* The next Hash table entry index, assigned + * to use the same adapter for transmit. + */ + u32 prev; /* The previous Hash table entry index, + * assigned to use the same + */ +}; + +/* ------------------------------------------------------------------------- + * struct rlb_client_info contains all info related to a specific rx client + * connection. This is the Clients Hash Table entry struct. + * Note that this is not a proper hash table; if a new client's IP address + * hash collides with an existing client entry, the old entry is replaced. + * + * There is a linked list (linked by the used_next and used_prev members) + * linking all the used entries of the hash table. This allows updating + * all the clients without walking over all the unused elements of the table. + * + * There are also linked lists of entries with identical hash(ip_src). These + * allow cleaning up the table from ip_src<->mac_src associations that have + * become outdated and would cause sending out invalid ARP updates to the + * network. These are linked by the (src_next and src_prev members). + * ------------------------------------------------------------------------- + */ +struct rlb_client_info { + __be32 ip_src; /* the server IP address */ + __be32 ip_dst; /* the client IP address */ + u8 mac_src[ETH_ALEN]; /* the server MAC address */ + u8 mac_dst[ETH_ALEN]; /* the client MAC address */ + + /* list of used hash table entries, starting at rx_hashtbl_used_head */ + u32 used_next; + u32 used_prev; + + /* ip_src based hashing */ + u32 src_next; /* next entry with same hash(ip_src) */ + u32 src_prev; /* prev entry with same hash(ip_src) */ + u32 src_first; /* first entry with hash(ip_src) == this entry's index */ + + u8 assigned; /* checking whether this entry is assigned */ + u8 ntt; /* flag - need to transmit client info */ + struct slave *slave; /* the slave assigned to this client */ + unsigned short vlan_id; /* VLAN tag associated with IP address */ +}; + +struct tlb_slave_info { + u32 head; /* Index to the head of the bi-directional clients + * hash table entries list. The entries in the list + * are the entries that were assigned to use this + * slave for transmit. + */ + u32 load; /* Each slave sums the loadHistory of all clients + * assigned to it + */ +}; + +struct alb_bond_info { + struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ + u32 unbalanced_load; + int tx_rebalance_counter; + int lp_counter; + /* -------- rlb parameters -------- */ + int rlb_enabled; + struct rlb_client_info *rx_hashtbl; /* Receive hash table */ + u32 rx_hashtbl_used_head; + u8 rx_ntt; /* flag - need to transmit + * to all rx clients + */ + struct slave *rx_slave;/* last slave to xmit from */ + u8 primary_is_promisc; /* boolean */ + u32 rlb_promisc_timeout_counter;/* counts primary + * promiscuity time + */ + u32 rlb_update_delay_counter; + u32 rlb_update_retry_counter;/* counter of retries + * of client update + */ + u8 rlb_rebalance; /* flag - indicates that the + * rx traffic should be + * rebalanced + */ +}; + +int bond_alb_initialize(struct bonding *bond, int rlb_enabled); +void bond_alb_deinitialize(struct bonding *bond); +int bond_alb_init_slave(struct bonding *bond, struct slave *slave); +void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave); +void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link); +void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave); +int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev); +int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev); +void bond_alb_monitor(struct work_struct *); +int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr); +void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id); +#endif /* _NET_BOND_ALB_H */ + diff --git a/include/net/bond_options.h b/include/net/bond_options.h new file mode 100644 index 000000000000..ea6546d2c946 --- /dev/null +++ b/include/net/bond_options.h @@ -0,0 +1,130 @@ +/* + * drivers/net/bond/bond_options.h - bonding options + * Copyright (c) 2013 Nikolay Aleksandrov <nikolay@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _NET_BOND_OPTIONS_H +#define _NET_BOND_OPTIONS_H + +#define BOND_OPT_MAX_NAMELEN 32 +#define BOND_OPT_VALID(opt) ((opt) < BOND_OPT_LAST) +#define BOND_MODE_ALL_EX(x) (~(x)) + +/* Option flags: + * BOND_OPTFLAG_NOSLAVES - check if the bond device is empty before setting + * BOND_OPTFLAG_IFDOWN - check if the bond device is down before setting + * BOND_OPTFLAG_RAWVAL - the option parses the value itself + */ +enum { + BOND_OPTFLAG_NOSLAVES = BIT(0), + BOND_OPTFLAG_IFDOWN = BIT(1), + BOND_OPTFLAG_RAWVAL = BIT(2) +}; + +/* Value type flags: + * BOND_VALFLAG_DEFAULT - mark the value as default + * BOND_VALFLAG_(MIN|MAX) - mark the value as min/max + */ +enum { + BOND_VALFLAG_DEFAULT = BIT(0), + BOND_VALFLAG_MIN = BIT(1), + BOND_VALFLAG_MAX = BIT(2) +}; + +/* Option IDs, their bit positions correspond to their IDs */ +enum { + BOND_OPT_MODE, + BOND_OPT_PACKETS_PER_SLAVE, + BOND_OPT_XMIT_HASH, + BOND_OPT_ARP_VALIDATE, + BOND_OPT_ARP_ALL_TARGETS, + BOND_OPT_FAIL_OVER_MAC, + BOND_OPT_ARP_INTERVAL, + BOND_OPT_ARP_TARGETS, + BOND_OPT_DOWNDELAY, + BOND_OPT_UPDELAY, + BOND_OPT_LACP_RATE, + BOND_OPT_MINLINKS, + BOND_OPT_AD_SELECT, + BOND_OPT_NUM_PEER_NOTIF, + BOND_OPT_MIIMON, + BOND_OPT_PRIMARY, + BOND_OPT_PRIMARY_RESELECT, + BOND_OPT_USE_CARRIER, + BOND_OPT_ACTIVE_SLAVE, + BOND_OPT_QUEUE_ID, + BOND_OPT_ALL_SLAVES_ACTIVE, + BOND_OPT_RESEND_IGMP, + BOND_OPT_LP_INTERVAL, + BOND_OPT_SLAVES, + BOND_OPT_TLB_DYNAMIC_LB, + BOND_OPT_LAST +}; + +/* This structure is used for storing option values and for passing option + * values when changing an option. The logic when used as an arg is as follows: + * - if string != NULL -> parse it, if the opt is RAW type then return it, else + * return the parse result + * - if string == NULL -> parse value + */ +struct bond_opt_value { + char *string; + u64 value; + u32 flags; +}; + +struct bonding; + +struct bond_option { + int id; + const char *name; + const char *desc; + u32 flags; + + /* unsuppmodes is used to denote modes in which the option isn't + * supported. + */ + unsigned long unsuppmodes; + /* supported values which this option can have, can be a subset of + * BOND_OPTVAL_RANGE's value range + */ + const struct bond_opt_value *values; + + int (*set)(struct bonding *bond, const struct bond_opt_value *val); +}; + +int __bond_opt_set(struct bonding *bond, unsigned int option, + struct bond_opt_value *val); +int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf); + +const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt, + struct bond_opt_value *val); +const struct bond_option *bond_opt_get(unsigned int option); +const struct bond_option *bond_opt_get_by_name(const char *name); +const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val); + +/* This helper is used to initialize a bond_opt_value structure for parameter + * passing. There should be either a valid string or value, but not both. + * When value is ULLONG_MAX then string will be used. + */ +static inline void __bond_opt_init(struct bond_opt_value *optval, + char *string, u64 value) +{ + memset(optval, 0, sizeof(*optval)); + optval->value = ULLONG_MAX; + if (value == ULLONG_MAX) + optval->string = string; + else + optval->value = value; +} +#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value) +#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX) + +void bond_option_arp_ip_targets_clear(struct bonding *bond); + +#endif /* _NET_BOND_OPTIONS_H */ diff --git a/include/net/bonding.h b/include/net/bonding.h new file mode 100644 index 000000000000..983a94b86b95 --- /dev/null +++ b/include/net/bonding.h @@ -0,0 +1,654 @@ +/* + * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'. + * + * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes + * NCM: Network and Communications Management, Inc. + * + * BUT, I'm the one who modified it for ethernet, so: + * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov + * + * This software may be used and distributed according to the terms + * of the GNU Public License, incorporated herein by reference. + * + */ + +#ifndef _NET_BONDING_H +#define _NET_BONDING_H + +#include <linux/timer.h> +#include <linux/proc_fs.h> +#include <linux/if_bonding.h> +#include <linux/cpumask.h> +#include <linux/in6.h> +#include <linux/netpoll.h> +#include <linux/inetdevice.h> +#include <linux/etherdevice.h> +#include <linux/reciprocal_div.h> +#include <linux/if_link.h> + +#include <net/bond_3ad.h> +#include <net/bond_alb.h> +#include <net/bond_options.h> + +#define DRV_VERSION "3.7.1" +#define DRV_RELDATE "April 27, 2011" +#define DRV_NAME "bonding" +#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" + +#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" + +#define BOND_MAX_ARP_TARGETS 16 + +#define BOND_DEFAULT_MIIMON 100 + +/* + * Less bad way to call ioctl from within the kernel; this needs to be + * done some other way to get the call out of interrupt context. + * Needs "ioctl" variable to be supplied by calling context. + */ +#define IOCTL(dev, arg, cmd) ({ \ + int res = 0; \ + mm_segment_t fs = get_fs(); \ + set_fs(get_ds()); \ + res = ioctl(dev, arg, cmd); \ + set_fs(fs); \ + res; }) + +#define BOND_MODE(bond) ((bond)->params.mode) + +/* slave list primitives */ +#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower) + +#define bond_has_slaves(bond) !list_empty(bond_slave_list(bond)) + +/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */ +#define bond_first_slave(bond) \ + (bond_has_slaves(bond) ? \ + netdev_adjacent_get_private(bond_slave_list(bond)->next) : \ + NULL) +#define bond_last_slave(bond) \ + (bond_has_slaves(bond) ? \ + netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \ + NULL) + +/* Caller must have rcu_read_lock */ +#define bond_first_slave_rcu(bond) \ + netdev_lower_get_first_private_rcu(bond->dev) + +#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond)) +#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond)) + +/** + * bond_for_each_slave - iterate over all slaves + * @bond: the bond holding this list + * @pos: current slave + * @iter: list_head * iterator + * + * Caller must hold RTNL + */ +#define bond_for_each_slave(bond, pos, iter) \ + netdev_for_each_lower_private((bond)->dev, pos, iter) + +/* Caller must have rcu_read_lock */ +#define bond_for_each_slave_rcu(bond, pos, iter) \ + netdev_for_each_lower_private_rcu((bond)->dev, pos, iter) + +#ifdef CONFIG_NET_POLL_CONTROLLER +extern atomic_t netpoll_block_tx; + +static inline void block_netpoll_tx(void) +{ + atomic_inc(&netpoll_block_tx); +} + +static inline void unblock_netpoll_tx(void) +{ + atomic_dec(&netpoll_block_tx); +} + +static inline int is_netpoll_tx_blocked(struct net_device *dev) +{ + if (unlikely(netpoll_tx_running(dev))) + return atomic_read(&netpoll_block_tx); + return 0; +} +#else +#define block_netpoll_tx() +#define unblock_netpoll_tx() +#define is_netpoll_tx_blocked(dev) (0) +#endif + +struct bond_params { + int mode; + int xmit_policy; + int miimon; + u8 num_peer_notif; + int arp_interval; + int arp_validate; + int arp_all_targets; + int use_carrier; + int fail_over_mac; + int updelay; + int downdelay; + int lacp_fast; + unsigned int min_links; + int ad_select; + char primary[IFNAMSIZ]; + int primary_reselect; + __be32 arp_targets[BOND_MAX_ARP_TARGETS]; + int tx_queues; + int all_slaves_active; + int resend_igmp; + int lp_interval; + int packets_per_slave; + int tlb_dynamic_lb; + struct reciprocal_value reciprocal_packets_per_slave; +}; + +struct bond_parm_tbl { + char *modename; + int mode; +}; + +struct slave { + struct net_device *dev; /* first - useful for panic debug */ + struct bonding *bond; /* our master */ + int delay; + /* all three in jiffies */ + unsigned long last_link_up; + unsigned long last_rx; + unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS]; + s8 link; /* one of BOND_LINK_XXXX */ + s8 new_link; + u8 backup:1, /* indicates backup slave. Value corresponds with + BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ + inactive:1, /* indicates inactive slave */ + should_notify:1; /* indicateds whether the state changed */ + u8 duplex; + u32 original_mtu; + u32 link_failure_count; + u32 speed; + u16 queue_id; + u8 perm_hwaddr[ETH_ALEN]; + struct ad_slave_info *ad_info; + struct tlb_slave_info tlb_info; +#ifdef CONFIG_NET_POLL_CONTROLLER + struct netpoll *np; +#endif + struct kobject kobj; + struct rtnl_link_stats64 slave_stats; +}; + +struct bond_up_slave { + unsigned int count; + struct rcu_head rcu; + struct slave *arr[0]; +}; + +/* + * Link pseudo-state only used internally by monitors + */ +#define BOND_LINK_NOCHANGE -1 + +/* + * Here are the locking policies for the two bonding locks: + * Get rcu_read_lock when reading or RTNL when writing slave list. + */ +struct bonding { + struct net_device *dev; /* first - useful for panic debug */ + struct slave __rcu *curr_active_slave; + struct slave __rcu *current_arp_slave; + struct slave __rcu *primary_slave; + struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */ + bool force_primary; + s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ + int (*recv_probe)(const struct sk_buff *, struct bonding *, + struct slave *); + /* mode_lock is used for mode-specific locking needs, currently used by: + * 3ad mode (4) - protect against running bond_3ad_unbind_slave() and + * bond_3ad_state_machine_handler() concurrently and also + * the access to the state machine shared variables. + * TLB mode (5) - to sync the use and modifications of its hash table + * ALB mode (6) - to sync the use and modifications of its hash table + */ + spinlock_t mode_lock; + u8 send_peer_notif; + u8 igmp_retrans; +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *proc_entry; + char proc_file_name[IFNAMSIZ]; +#endif /* CONFIG_PROC_FS */ + struct list_head bond_list; + u32 rr_tx_counter; + struct ad_bond_info ad_info; + struct alb_bond_info alb_info; + struct bond_params params; + struct workqueue_struct *wq; + struct delayed_work mii_work; + struct delayed_work arp_work; + struct delayed_work alb_work; + struct delayed_work ad_work; + struct delayed_work mcast_work; + struct delayed_work slave_arr_work; +#ifdef CONFIG_DEBUG_FS + /* debugging support via debugfs */ + struct dentry *debug_dir; +#endif /* CONFIG_DEBUG_FS */ + struct rtnl_link_stats64 bond_stats; +}; + +#define bond_slave_get_rcu(dev) \ + ((struct slave *) rcu_dereference(dev->rx_handler_data)) + +#define bond_slave_get_rtnl(dev) \ + ((struct slave *) rtnl_dereference(dev->rx_handler_data)) + +struct bond_vlan_tag { + __be16 vlan_proto; + unsigned short vlan_id; +}; + +/** + * Returns NULL if the net_device does not belong to any of the bond's slaves + * + * Caller must hold bond lock for read + */ +static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, + struct net_device *slave_dev) +{ + return netdev_lower_dev_get_private(bond->dev, slave_dev); +} + +static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) +{ + return slave->bond; +} + +static inline bool bond_should_override_tx_queue(struct bonding *bond) +{ + return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || + BOND_MODE(bond) == BOND_MODE_ROUNDROBIN; +} + +static inline bool bond_is_lb(const struct bonding *bond) +{ + return BOND_MODE(bond) == BOND_MODE_TLB || + BOND_MODE(bond) == BOND_MODE_ALB; +} + +static inline bool bond_is_nondyn_tlb(const struct bonding *bond) +{ + return (BOND_MODE(bond) == BOND_MODE_TLB) && + (bond->params.tlb_dynamic_lb == 0); +} + +static inline bool bond_mode_uses_xmit_hash(const struct bonding *bond) +{ + return (BOND_MODE(bond) == BOND_MODE_8023AD || + BOND_MODE(bond) == BOND_MODE_XOR || + bond_is_nondyn_tlb(bond)); +} + +static inline bool bond_mode_uses_arp(int mode) +{ + return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB && + mode != BOND_MODE_ALB; +} + +static inline bool bond_mode_uses_primary(int mode) +{ + return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB || + mode == BOND_MODE_ALB; +} + +static inline bool bond_uses_primary(struct bonding *bond) +{ + return bond_mode_uses_primary(BOND_MODE(bond)); +} + +static inline bool bond_slave_is_up(struct slave *slave) +{ + return netif_running(slave->dev) && netif_carrier_ok(slave->dev); +} + +static inline void bond_set_active_slave(struct slave *slave) +{ + if (slave->backup) { + slave->backup = 0; + rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); + } +} + +static inline void bond_set_backup_slave(struct slave *slave) +{ + if (!slave->backup) { + slave->backup = 1; + rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); + } +} + +static inline void bond_set_slave_state(struct slave *slave, + int slave_state, bool notify) +{ + if (slave->backup == slave_state) + return; + + slave->backup = slave_state; + if (notify) { + rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); + slave->should_notify = 0; + } else { + if (slave->should_notify) + slave->should_notify = 0; + else + slave->should_notify = 1; + } +} + +static inline void bond_slave_state_change(struct bonding *bond) +{ + struct list_head *iter; + struct slave *tmp; + + bond_for_each_slave(bond, tmp, iter) { + if (tmp->link == BOND_LINK_UP) + bond_set_active_slave(tmp); + else if (tmp->link == BOND_LINK_DOWN) + bond_set_backup_slave(tmp); + } +} + +static inline void bond_slave_state_notify(struct bonding *bond) +{ + struct list_head *iter; + struct slave *tmp; + + bond_for_each_slave(bond, tmp, iter) { + if (tmp->should_notify) { + rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_ATOMIC); + tmp->should_notify = 0; + } + } +} + +static inline int bond_slave_state(struct slave *slave) +{ + return slave->backup; +} + +static inline bool bond_is_active_slave(struct slave *slave) +{ + return !bond_slave_state(slave); +} + +static inline bool bond_slave_can_tx(struct slave *slave) +{ + return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP && + bond_is_active_slave(slave); +} + +#define BOND_PRI_RESELECT_ALWAYS 0 +#define BOND_PRI_RESELECT_BETTER 1 +#define BOND_PRI_RESELECT_FAILURE 2 + +#define BOND_FOM_NONE 0 +#define BOND_FOM_ACTIVE 1 +#define BOND_FOM_FOLLOW 2 + +#define BOND_ARP_TARGETS_ANY 0 +#define BOND_ARP_TARGETS_ALL 1 + +#define BOND_ARP_VALIDATE_NONE 0 +#define BOND_ARP_VALIDATE_ACTIVE (1 << BOND_STATE_ACTIVE) +#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP) +#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ + BOND_ARP_VALIDATE_BACKUP) +#define BOND_ARP_FILTER (BOND_ARP_VALIDATE_ALL + 1) +#define BOND_ARP_FILTER_ACTIVE (BOND_ARP_VALIDATE_ACTIVE | \ + BOND_ARP_FILTER) +#define BOND_ARP_FILTER_BACKUP (BOND_ARP_VALIDATE_BACKUP | \ + BOND_ARP_FILTER) + +#define BOND_SLAVE_NOTIFY_NOW true +#define BOND_SLAVE_NOTIFY_LATER false + +static inline int slave_do_arp_validate(struct bonding *bond, + struct slave *slave) +{ + return bond->params.arp_validate & (1 << bond_slave_state(slave)); +} + +static inline int slave_do_arp_validate_only(struct bonding *bond) +{ + return bond->params.arp_validate & BOND_ARP_FILTER; +} + +static inline int bond_is_ip_target_ok(__be32 addr) +{ + return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr); +} + +/* Get the oldest arp which we've received on this slave for bond's + * arp_targets. + */ +static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond, + struct slave *slave) +{ + int i = 1; + unsigned long ret = slave->target_last_arp_rx[0]; + + for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++) + if (time_before(slave->target_last_arp_rx[i], ret)) + ret = slave->target_last_arp_rx[i]; + + return ret; +} + +static inline unsigned long slave_last_rx(struct bonding *bond, + struct slave *slave) +{ + if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL) + return slave_oldest_target_arp_rx(bond, slave); + + return slave->last_rx; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static inline void bond_netpoll_send_skb(const struct slave *slave, + struct sk_buff *skb) +{ + struct netpoll *np = slave->np; + + if (np) + netpoll_send_skb(np, skb); +} +#else +static inline void bond_netpoll_send_skb(const struct slave *slave, + struct sk_buff *skb) +{ +} +#endif + +static inline void bond_set_slave_inactive_flags(struct slave *slave, + bool notify) +{ + if (!bond_is_lb(slave->bond)) + bond_set_slave_state(slave, BOND_STATE_BACKUP, notify); + if (!slave->bond->params.all_slaves_active) + slave->inactive = 1; +} + +static inline void bond_set_slave_active_flags(struct slave *slave, + bool notify) +{ + bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify); + slave->inactive = 0; +} + +static inline bool bond_is_slave_inactive(struct slave *slave) +{ + return slave->inactive; +} + +static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local) +{ + struct in_device *in_dev; + __be32 addr = 0; + + rcu_read_lock(); + in_dev = __in_dev_get_rcu(dev); + + if (in_dev) + addr = inet_confirm_addr(dev_net(dev), in_dev, dst, local, + RT_SCOPE_HOST); + rcu_read_unlock(); + return addr; +} + +struct bond_net { + struct net *net; /* Associated network namespace */ + struct list_head dev_list; +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *proc_dir; +#endif + struct class_attribute class_attr_bonding_masters; +}; + +int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); +void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); +int bond_create(struct net *net, const char *name); +int bond_create_sysfs(struct bond_net *net); +void bond_destroy_sysfs(struct bond_net *net); +void bond_prepare_sysfs_group(struct bonding *bond); +int bond_sysfs_slave_add(struct slave *slave); +void bond_sysfs_slave_del(struct slave *slave); +int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); +int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); +u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb); +void bond_select_active_slave(struct bonding *bond); +void bond_change_active_slave(struct bonding *bond, struct slave *new_active); +void bond_create_debugfs(void); +void bond_destroy_debugfs(void); +void bond_debug_register(struct bonding *bond); +void bond_debug_unregister(struct bonding *bond); +void bond_debug_reregister(struct bonding *bond); +const char *bond_mode_name(int mode); +void bond_setup(struct net_device *bond_dev); +unsigned int bond_get_num_tx_queues(void); +int bond_netlink_init(void); +void bond_netlink_fini(void); +struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond); +const char *bond_slave_link_status(s8 link); +struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev, + struct net_device *end_dev, + int level); +int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave); +void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay); + +#ifdef CONFIG_PROC_FS +void bond_create_proc_entry(struct bonding *bond); +void bond_remove_proc_entry(struct bonding *bond); +void bond_create_proc_dir(struct bond_net *bn); +void bond_destroy_proc_dir(struct bond_net *bn); +#else +static inline void bond_create_proc_entry(struct bonding *bond) +{ +} + +static inline void bond_remove_proc_entry(struct bonding *bond) +{ +} + +static inline void bond_create_proc_dir(struct bond_net *bn) +{ +} + +static inline void bond_destroy_proc_dir(struct bond_net *bn) +{ +} +#endif + +static inline struct slave *bond_slave_has_mac(struct bonding *bond, + const u8 *mac) +{ + struct list_head *iter; + struct slave *tmp; + + bond_for_each_slave(bond, tmp, iter) + if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) + return tmp; + + return NULL; +} + +/* Caller must hold rcu_read_lock() for read */ +static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond, + const u8 *mac) +{ + struct list_head *iter; + struct slave *tmp; + + bond_for_each_slave_rcu(bond, tmp, iter) + if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) + return tmp; + + return NULL; +} + +/* Caller must hold rcu_read_lock() for read */ +static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac) +{ + struct list_head *iter; + struct slave *tmp; + struct netdev_hw_addr *ha; + + bond_for_each_slave_rcu(bond, tmp, iter) + if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) + return true; + + if (netdev_uc_empty(bond->dev)) + return false; + + netdev_for_each_uc_addr(ha, bond->dev) + if (ether_addr_equal_64bits(mac, ha->addr)) + return true; + + return false; +} + +/* Check if the ip is present in arp ip list, or first free slot if ip == 0 + * Returns -1 if not found, index if found + */ +static inline int bond_get_targets_ip(__be32 *targets, __be32 ip) +{ + int i; + + for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) + if (targets[i] == ip) + return i; + else if (targets[i] == 0) + break; + + return -1; +} + +/* exported from bond_main.c */ +extern int bond_net_id; +extern const struct bond_parm_tbl bond_lacp_tbl[]; +extern const struct bond_parm_tbl xmit_hashtype_tbl[]; +extern const struct bond_parm_tbl arp_validate_tbl[]; +extern const struct bond_parm_tbl arp_all_targets_tbl[]; +extern const struct bond_parm_tbl fail_over_mac_tbl[]; +extern const struct bond_parm_tbl pri_reselect_tbl[]; +extern struct bond_parm_tbl ad_select_tbl[]; + +/* exported from bond_netlink.c */ +extern struct rtnl_link_ops bond_link_ops; + +static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb) +{ + atomic_long_inc(&dev->tx_dropped); + dev_kfree_skb_any(skb); +} + +#endif /* _NET_BONDING_H */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index a2ddcf2398fd..4ebb816241fa 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -319,9 +319,12 @@ struct ieee80211_supported_band { /** * struct vif_params - describes virtual interface parameters * @use_4addr: use 4-address frames - * @macaddr: address to use for this virtual interface. This will only - * be used for non-netdevice interfaces. If this parameter is set - * to zero address the driver may determine the address as needed. + * @macaddr: address to use for this virtual interface. + * If this parameter is set to zero address the driver may + * determine the address as needed. + * This feature is only fully supported by drivers that enable the + * %NL80211_FEATURE_MAC_ON_CREATE flag. Others may support creating + ** only p2p devices with specified MAC. */ struct vif_params { int use_4addr; @@ -799,6 +802,22 @@ struct station_parameters { }; /** + * struct station_del_parameters - station deletion parameters + * + * Used to delete a station entry (or all stations). + * + * @mac: MAC address of the station to remove or NULL to remove all stations + * @subtype: Management frame subtype to use for indicating removal + * (10 = Disassociation, 12 = Deauthentication) + * @reason_code: Reason code for the Disassociation/Deauthentication frame + */ +struct station_del_parameters { + const u8 *mac; + u8 subtype; + u16 reason_code; +}; + +/** * enum cfg80211_station_type - the type of station being modified * @CFG80211_STA_AP_CLIENT: client of an AP interface * @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has @@ -1340,6 +1359,16 @@ struct mesh_setup { }; /** + * struct ocb_setup - 802.11p OCB mode setup configuration + * @chandef: defines the channel to use + * + * These parameters are fixed when connecting to the network + */ +struct ocb_setup { + struct cfg80211_chan_def chandef; +}; + +/** * struct ieee80211_txq_params - TX queue parameters * @ac: AC identifier * @txop: Maximum burst time in units of 32 usecs, 0 meaning disabled @@ -1408,6 +1437,10 @@ struct cfg80211_ssid { * @aborted: (internal) scan request was notified as aborted * @notified: (internal) scan request was notified as done or aborted * @no_cck: used to send probe requests at non CCK rate in 2GHz band + * @mac_addr: MAC address used with randomisation + * @mac_addr_mask: MAC address mask used with randomisation, bits that + * are 0 in the mask should be randomised, bits that are 1 should + * be taken from the @mac_addr */ struct cfg80211_scan_request { struct cfg80211_ssid *ssids; @@ -1422,6 +1455,9 @@ struct cfg80211_scan_request { struct wireless_dev *wdev; + u8 mac_addr[ETH_ALEN] __aligned(2); + u8 mac_addr_mask[ETH_ALEN] __aligned(2); + /* internal */ struct wiphy *wiphy; unsigned long scan_start; @@ -1432,6 +1468,17 @@ struct cfg80211_scan_request { struct ieee80211_channel *channels[0]; }; +static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask) +{ + int i; + + get_random_bytes(buf, ETH_ALEN); + for (i = 0; i < ETH_ALEN; i++) { + buf[i] &= ~mask[i]; + buf[i] |= addr[i] & mask[i]; + } +} + /** * struct cfg80211_match_set - sets of attributes to match * @@ -1465,6 +1512,10 @@ struct cfg80211_match_set { * @channels: channels to scan * @min_rssi_thold: for drivers only supporting a single threshold, this * contains the minimum over all matchsets + * @mac_addr: MAC address used with randomisation + * @mac_addr_mask: MAC address mask used with randomisation, bits that + * are 0 in the mask should be randomised, bits that are 1 should + * be taken from the @mac_addr */ struct cfg80211_sched_scan_request { struct cfg80211_ssid *ssids; @@ -1479,6 +1530,9 @@ struct cfg80211_sched_scan_request { int n_match_sets; s32 min_rssi_thold; + u8 mac_addr[ETH_ALEN] __aligned(2); + u8 mac_addr_mask[ETH_ALEN] __aligned(2); + /* internal */ struct wiphy *wiphy; struct net_device *dev; @@ -1911,6 +1965,7 @@ struct cfg80211_wowlan_tcp { * @rfkill_release: wake up when rfkill is released * @tcp: TCP connection establishment/wakeup parameters, see nl80211.h. * NULL if not configured. + * @nd_config: configuration for the scan to be used for net detect wake. */ struct cfg80211_wowlan { bool any, disconnect, magic_pkt, gtk_rekey_failure, @@ -1919,6 +1974,7 @@ struct cfg80211_wowlan { struct cfg80211_pkt_pattern *patterns; struct cfg80211_wowlan_tcp *tcp; int n_patterns; + struct cfg80211_sched_scan_request *nd_config; }; /** @@ -1951,6 +2007,35 @@ struct cfg80211_coalesce { }; /** + * struct cfg80211_wowlan_nd_match - information about the match + * + * @ssid: SSID of the match that triggered the wake up + * @n_channels: Number of channels where the match occurred. This + * value may be zero if the driver can't report the channels. + * @channels: center frequencies of the channels where a match + * occurred (in MHz) + */ +struct cfg80211_wowlan_nd_match { + struct cfg80211_ssid ssid; + int n_channels; + u32 channels[]; +}; + +/** + * struct cfg80211_wowlan_nd_info - net detect wake up information + * + * @n_matches: Number of match information instances provided in + * @matches. This value may be zero if the driver can't provide + * match information. + * @matches: Array of pointers to matches containing information about + * the matches that triggered the wake up. + */ +struct cfg80211_wowlan_nd_info { + int n_matches; + struct cfg80211_wowlan_nd_match *matches[]; +}; + +/** * struct cfg80211_wowlan_wakeup - wakeup report * @disconnect: woke up by getting disconnected * @magic_pkt: woke up by receiving magic packet @@ -1969,6 +2054,7 @@ struct cfg80211_coalesce { * @tcp_match: TCP wakeup packet received * @tcp_connlost: TCP connection lost or failed to establish * @tcp_nomoretokens: TCP data ran out of tokens + * @net_detect: if not %NULL, woke up because of net detect */ struct cfg80211_wowlan_wakeup { bool disconnect, magic_pkt, gtk_rekey_failure, @@ -1978,6 +2064,7 @@ struct cfg80211_wowlan_wakeup { s32 pattern_idx; u32 packet_present_len, packet_len; const void *packet; + struct cfg80211_wowlan_nd_info *net_detect; }; /** @@ -2132,7 +2219,7 @@ struct cfg80211_qos_map { * @stop_ap: Stop being an AP, including stopping beaconing. * * @add_station: Add a new station. - * @del_station: Remove a station; @mac may be NULL to remove all stations. + * @del_station: Remove a station * @change_station: Modify a given station. Note that flags changes are not much * validated in cfg80211, in particular the auth/assoc/authorized flags * might come to the driver in invalid combinations -- make sure to check @@ -2146,6 +2233,8 @@ struct cfg80211_qos_map { * @change_mpath: change a given mesh path * @get_mpath: get a mesh path for the given parameters * @dump_mpath: dump mesh path callback -- resume dump at index @idx + * @get_mpp: get a mesh proxy path for the given parameters + * @dump_mpp: dump mesh proxy path callback -- resume dump at index @idx * @join_mesh: join the mesh network with the specified parameters * (invoked with the wireless_dev mutex held) * @leave_mesh: leave the current mesh network @@ -2331,6 +2420,17 @@ struct cfg80211_qos_map { * with the peer followed by immediate teardown when the addition is later * rejected) * @del_tx_ts: remove an existing TX TS + * + * @join_ocb: join the OCB network with the specified parameters + * (invoked with the wireless_dev mutex held) + * @leave_ocb: leave the current OCB network + * (invoked with the wireless_dev mutex held) + * + * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver + * is responsible for continually initiating channel-switching operations + * and returning to the base channel for communication with the AP. + * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both + * peers must be on the base channel when the call completes. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -2376,7 +2476,7 @@ struct cfg80211_ops { const u8 *mac, struct station_parameters *params); int (*del_station)(struct wiphy *wiphy, struct net_device *dev, - const u8 *mac); + struct station_del_parameters *params); int (*change_station)(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params); @@ -2396,6 +2496,11 @@ struct cfg80211_ops { int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *dst, u8 *next_hop, struct mpath_info *pinfo); + int (*get_mpp)(struct wiphy *wiphy, struct net_device *dev, + u8 *dst, u8 *mpp, struct mpath_info *pinfo); + int (*dump_mpp)(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *dst, u8 *mpp, + struct mpath_info *pinfo); int (*get_mesh_config)(struct wiphy *wiphy, struct net_device *dev, struct mesh_config *conf); @@ -2407,6 +2512,10 @@ struct cfg80211_ops { const struct mesh_setup *setup); int (*leave_mesh)(struct wiphy *wiphy, struct net_device *dev); + int (*join_ocb)(struct wiphy *wiphy, struct net_device *dev, + struct ocb_setup *setup); + int (*leave_ocb)(struct wiphy *wiphy, struct net_device *dev); + int (*change_bss)(struct wiphy *wiphy, struct net_device *dev, struct bss_parameters *params); @@ -2577,6 +2686,14 @@ struct cfg80211_ops { u16 admitted_time); int (*del_tx_ts)(struct wiphy *wiphy, struct net_device *dev, u8 tsid, const u8 *peer); + + int (*tdls_channel_switch)(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr, u8 oper_class, + struct cfg80211_chan_def *chandef); + void (*tdls_cancel_channel_switch)(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr); }; /* @@ -2623,13 +2740,9 @@ struct cfg80211_ops { * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels. * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in * beaconing mode (AP, IBSS, Mesh, ...). - * @WIPHY_FLAG_SUPPORTS_WMM_ADMISSION: the device supports setting up WMM - * TSPEC sessions (TID aka TSID 0-7) with the NL80211_CMD_ADD_TX_TS - * command. Standard IEEE 802.11 TSPEC setup is not yet supported, it - * needs to be able to handle Block-Ack agreements and other things. */ enum wiphy_flags { - WIPHY_FLAG_SUPPORTS_WMM_ADMISSION = BIT(0), + /* use hole at 0 */ /* use hole at 1 */ /* use hole at 2 */ WIPHY_FLAG_NETNS_OK = BIT(3), @@ -2755,6 +2868,7 @@ struct ieee80211_txrx_stypes { * @WIPHY_WOWLAN_EAP_IDENTITY_REQ: supports wakeup on EAP identity request * @WIPHY_WOWLAN_4WAY_HANDSHAKE: supports wakeup on 4-way handshake failure * @WIPHY_WOWLAN_RFKILL_RELEASE: supports wakeup on RF-kill release + * @WIPHY_WOWLAN_NET_DETECT: supports wakeup on network detection */ enum wiphy_wowlan_support_flags { WIPHY_WOWLAN_ANY = BIT(0), @@ -2765,6 +2879,7 @@ enum wiphy_wowlan_support_flags { WIPHY_WOWLAN_EAP_IDENTITY_REQ = BIT(5), WIPHY_WOWLAN_4WAY_HANDSHAKE = BIT(6), WIPHY_WOWLAN_RFKILL_RELEASE = BIT(7), + WIPHY_WOWLAN_NET_DETECT = BIT(8), }; struct wiphy_wowlan_tcp_support { @@ -2783,6 +2898,11 @@ struct wiphy_wowlan_tcp_support { * @pattern_max_len: maximum length of each pattern * @pattern_min_len: minimum length of each pattern * @max_pkt_offset: maximum Rx packet offset + * @max_nd_match_sets: maximum number of matchsets for net-detect, + * similar, but not necessarily identical, to max_match_sets for + * scheduled scans. + * See &struct cfg80211_sched_scan_request.@match_sets for more + * details. * @tcp: TCP wakeup support information */ struct wiphy_wowlan_support { @@ -2791,6 +2911,7 @@ struct wiphy_wowlan_support { int pattern_max_len; int pattern_min_len; int max_pkt_offset; + int max_nd_match_sets; const struct wiphy_wowlan_tcp_support *tcp; }; @@ -3166,6 +3287,23 @@ static inline const char *wiphy_name(const struct wiphy *wiphy) } /** + * wiphy_new_nm - create a new wiphy for use with cfg80211 + * + * @ops: The configuration operations for this device + * @sizeof_priv: The size of the private area to allocate + * @requested_name: Request a particular name. + * NULL is valid value, and means use the default phy%d naming. + * + * Create a new wiphy and associate the given operations with it. + * @sizeof_priv bytes are allocated for private use. + * + * Return: A pointer to the new wiphy. This pointer must be + * assigned to each netdev's ieee80211_ptr for proper operation. + */ +struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, + const char *requested_name); + +/** * wiphy_new - create a new wiphy for use with cfg80211 * * @ops: The configuration operations for this device @@ -3177,7 +3315,11 @@ static inline const char *wiphy_name(const struct wiphy *wiphy) * Return: A pointer to the new wiphy. This pointer must be * assigned to each netdev's ieee80211_ptr for proper operation. */ -struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv); +static inline struct wiphy *wiphy_new(const struct cfg80211_ops *ops, + int sizeof_priv) +{ + return wiphy_new_nm(ops, sizeof_priv, NULL); +} /** * wiphy_register - register a wiphy with cfg80211 @@ -4501,33 +4643,6 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev, gfp_t gfp); /** - * cfg80211_radar_event - radar detection event - * @wiphy: the wiphy - * @chandef: chandef for the current channel - * @gfp: context flags - * - * This function is called when a radar is detected on the current chanenl. - */ -void cfg80211_radar_event(struct wiphy *wiphy, - struct cfg80211_chan_def *chandef, gfp_t gfp); - -/** - * cfg80211_cac_event - Channel availability check (CAC) event - * @netdev: network device - * @chandef: chandef for the current channel - * @event: type of event - * @gfp: context flags - * - * This function is called when a Channel availability check (CAC) is finished - * or aborted. This must be called to notify the completion of a CAC process, - * also by full-MAC drivers. - */ -void cfg80211_cac_event(struct net_device *netdev, - const struct cfg80211_chan_def *chandef, - enum nl80211_radar_event event, gfp_t gfp); - - -/** * cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer * @dev: network device * @peer: peer's MAC address @@ -4555,6 +4670,42 @@ void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer, u32 num_packets, u32 rate, u32 intvl, gfp_t gfp); /** + * cfg80211_cqm_beacon_loss_notify - beacon loss event + * @dev: network device + * @gfp: context flags + * + * Notify userspace about beacon loss from the connected AP. + */ +void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp); + +/** + * cfg80211_radar_event - radar detection event + * @wiphy: the wiphy + * @chandef: chandef for the current channel + * @gfp: context flags + * + * This function is called when a radar is detected on the current chanenl. + */ +void cfg80211_radar_event(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, gfp_t gfp); + +/** + * cfg80211_cac_event - Channel availability check (CAC) event + * @netdev: network device + * @chandef: chandef for the current channel + * @event: type of event + * @gfp: context flags + * + * This function is called when a Channel availability check (CAC) is finished + * or aborted. This must be called to notify the completion of a CAC process, + * also by full-MAC drivers. + */ +void cfg80211_cac_event(struct net_device *netdev, + const struct cfg80211_chan_def *chandef, + enum nl80211_radar_event event, gfp_t gfp); + + +/** * cfg80211_gtk_rekey_notify - notify userspace about driver rekeying * @dev: network device * @bssid: BSSID of AP (to avoid races) @@ -4657,6 +4808,20 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy, void cfg80211_ch_switch_notify(struct net_device *dev, struct cfg80211_chan_def *chandef); +/* + * cfg80211_ch_switch_started_notify - notify channel switch start + * @dev: the device on which the channel switch started + * @chandef: the future channel definition + * @count: the number of TBTTs until the channel switch happens + * + * Inform the userspace about the channel switch that has just + * started, so that it can take appropriate actions (eg. starting + * channel switch on other vifs), if necessary. + */ +void cfg80211_ch_switch_started_notify(struct net_device *dev, + struct cfg80211_chan_def *chandef, + u8 count); + /** * ieee80211_operating_class_to_band - convert operating class to band * diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h new file mode 100644 index 000000000000..7f713acfa106 --- /dev/null +++ b/include/net/cfg802154.h @@ -0,0 +1,161 @@ +/* + * Copyright (C) 2007, 2008, 2009 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Written by: + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + */ + +#ifndef __NET_CFG802154_H +#define __NET_CFG802154_H + +#include <linux/ieee802154.h> +#include <linux/netdevice.h> +#include <linux/mutex.h> +#include <linux/bug.h> + +#include <net/nl802154.h> + +struct wpan_phy; + +struct cfg802154_ops { + struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, + const char *name, + int type); + void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, + struct net_device *dev); + int (*add_virtual_intf)(struct wpan_phy *wpan_phy, + const char *name, + enum nl802154_iftype type, + __le64 extended_addr); + int (*del_virtual_intf)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev); + int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel); + int (*set_pan_id)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, __le16 pan_id); + int (*set_short_addr)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, __le16 short_addr); + int (*set_backoff_exponent)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, u8 min_be, + u8 max_be); + int (*set_max_csma_backoffs)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + u8 max_csma_backoffs); + int (*set_max_frame_retries)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + s8 max_frame_retries); + int (*set_lbt_mode)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, bool mode); +}; + +struct wpan_phy { + struct mutex pib_lock; + + /* If multiple wpan_phys are registered and you're handed e.g. + * a regular netdev with assigned ieee802154_ptr, you won't + * know whether it points to a wpan_phy your driver has registered + * or not. Assign this to something global to your driver to + * help determine whether you own this wpan_phy or not. + */ + const void *privid; + + /* + * This is a PIB according to 802.15.4-2011. + * We do not provide timing-related variables, as they + * aren't used outside of driver + */ + u8 current_channel; + u8 current_page; + u32 channels_supported[IEEE802154_MAX_PAGE + 1]; + s8 transmit_power; + u8 cca_mode; + + __le64 perm_extended_addr; + + s32 cca_ed_level; + + /* PHY depended MAC PIB values */ + + /* 802.15.4 acronym: Tdsym in usec */ + u8 symbol_duration; + /* lifs and sifs periods timing */ + u16 lifs_period; + u16 sifs_period; + + struct device dev; + + char priv[0] __aligned(NETDEV_ALIGN); +}; + +struct wpan_dev { + struct wpan_phy *wpan_phy; + int iftype; + + /* the remainder of this struct should be private to cfg802154 */ + struct list_head list; + struct net_device *netdev; + + u32 identifier; + + /* MAC PIB */ + __le16 pan_id; + __le16 short_addr; + __le64 extended_addr; + + /* MAC BSN field */ + u8 bsn; + /* MAC DSN field */ + u8 dsn; + + u8 min_be; + u8 max_be; + u8 csma_retries; + s8 frame_retries; + + bool lbt; + + bool promiscuous_mode; +}; + +#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev) + +struct wpan_phy * +wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size); +static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev) +{ + phy->dev.parent = dev; +} + +int wpan_phy_register(struct wpan_phy *phy); +void wpan_phy_unregister(struct wpan_phy *phy); +void wpan_phy_free(struct wpan_phy *phy); +/* Same semantics as for class_for_each_device */ +int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data); + +static inline void *wpan_phy_priv(struct wpan_phy *phy) +{ + BUG_ON(!phy); + return &phy->priv; +} + +struct wpan_phy *wpan_phy_find(const char *str); + +static inline void wpan_phy_put(struct wpan_phy *phy) +{ + put_device(&phy->dev); +} + +static inline const char *wpan_phy_name(struct wpan_phy *phy) +{ + return dev_name(&phy->dev); +} + +#endif /* __NET_CFG802154_H */ diff --git a/include/net/checksum.h b/include/net/checksum.h index 6465bae80a4f..e339a9513e29 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -151,4 +151,20 @@ static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, (__force __be32)to, pseudohdr); } +static inline __wsum remcsum_adjust(void *ptr, __wsum csum, + int start, int offset) +{ + __sum16 *psum = (__sum16 *)(ptr + offset); + __wsum delta; + + /* Subtract out checksum up to start */ + csum = csum_sub(csum, csum_partial(ptr, start, 0)); + + /* Set derived checksum in packet */ + delta = csum_sub(csum_fold(csum), *psum); + *psum = csum_fold(csum); + + return delta; +} + #endif diff --git a/include/net/compat.h b/include/net/compat.h index 3b603b199c01..42a9c8431177 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -40,9 +40,8 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *); #define compat_mmsghdr mmsghdr #endif /* defined(CONFIG_COMPAT) */ -int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *); -int verify_compat_iovec(struct msghdr *, struct iovec *, - struct sockaddr_storage *, int); +ssize_t get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, + struct sockaddr __user **, struct iovec **); asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *, unsigned int); asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *, diff --git a/include/net/dsa.h b/include/net/dsa.h index b76559293535..ed3c34bbb67a 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -38,6 +38,9 @@ struct dsa_chip_data { struct device *host_dev; int sw_addr; + /* set to size of eeprom if supported by the switch */ + int eeprom_len; + /* Device tree node pointer for this specific switch chip * used during switch setup in case additional properties * and resources needs to be used @@ -139,6 +142,14 @@ struct dsa_switch { */ struct device *master_dev; +#ifdef CONFIG_NET_DSA_HWMON + /* + * Hardware monitoring information + */ + char hwmon_name[IFNAMSIZ + 8]; + struct device *hwmon_dev; +#endif + /* * Slave mii_bus and devices for the individual ports. */ @@ -242,6 +253,28 @@ struct dsa_switch_driver { struct ethtool_eee *e); int (*get_eee)(struct dsa_switch *ds, int port, struct ethtool_eee *e); + +#ifdef CONFIG_NET_DSA_HWMON + /* Hardware monitoring */ + int (*get_temp)(struct dsa_switch *ds, int *temp); + int (*get_temp_limit)(struct dsa_switch *ds, int *temp); + int (*set_temp_limit)(struct dsa_switch *ds, int temp); + int (*get_temp_alarm)(struct dsa_switch *ds, bool *alarm); +#endif + + /* EEPROM access */ + int (*get_eeprom_len)(struct dsa_switch *ds); + int (*get_eeprom)(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data); + int (*set_eeprom)(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data); + + /* + * Register access. + */ + int (*get_regs_len)(struct dsa_switch *ds, int port); + void (*get_regs)(struct dsa_switch *ds, int port, + struct ethtool_regs *regs, void *p); }; void register_switch_driver(struct dsa_switch_driver *type); diff --git a/include/net/fou.h b/include/net/fou.h new file mode 100644 index 000000000000..19b8a0c62a98 --- /dev/null +++ b/include/net/fou.h @@ -0,0 +1,19 @@ +#ifndef __NET_FOU_H +#define __NET_FOU_H + +#include <linux/skbuff.h> + +#include <net/flow.h> +#include <net/gue.h> +#include <net/ip_tunnels.h> +#include <net/udp.h> + +size_t fou_encap_hlen(struct ip_tunnel_encap *e); +static size_t gue_encap_hlen(struct ip_tunnel_encap *e); + +int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi4 *fl4); +int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi4 *fl4); + +#endif diff --git a/include/net/gue.h b/include/net/gue.h index b6c332788084..3f28ec7f1c7f 100644 --- a/include/net/gue.h +++ b/include/net/gue.h @@ -1,23 +1,116 @@ #ifndef __NET_GUE_H #define __NET_GUE_H +/* Definitions for the GUE header, standard and private flags, lengths + * of optional fields are below. + * + * Diagram of GUE header: + * + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |Ver|C| Hlen | Proto/ctype | Standard flags |P| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | + * ~ Fields (optional) ~ + * | | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Private flags (optional, P bit is set) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | + * ~ Private fields (optional) ~ + * | | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * C bit indicates contol message when set, data message when unset. + * For a control message, proto/ctype is interpreted as a type of + * control message. For data messages, proto/ctype is the IP protocol + * of the next header. + * + * P bit indicates private flags field is present. The private flags + * may refer to options placed after this field. + */ + struct guehdr { union { struct { #if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 hlen:4, - version:4; + __u8 hlen:5, + control:1, + version:2; #elif defined (__BIG_ENDIAN_BITFIELD) - __u8 version:4, - hlen:4; + __u8 version:2, + control:1, + hlen:5; #else #error "Please fix <asm/byteorder.h>" #endif - __u8 next_hdr; + __u8 proto_ctype; __u16 flags; }; __u32 word; }; }; +/* Standard flags in GUE header */ + +#define GUE_FLAG_PRIV htons(1<<0) /* Private flags are in options */ +#define GUE_LEN_PRIV 4 + +#define GUE_FLAGS_ALL (GUE_FLAG_PRIV) + +/* Private flags in the private option extension */ + +#define GUE_PFLAG_REMCSUM htonl(1 << 31) +#define GUE_PLEN_REMCSUM 4 + +#define GUE_PFLAGS_ALL (GUE_PFLAG_REMCSUM) + +/* Functions to compute options length corresponding to flags. + * If we ever have a lot of flags this can be potentially be + * converted to a more optimized algorithm (table lookup + * for instance). + */ +static inline size_t guehdr_flags_len(__be16 flags) +{ + return ((flags & GUE_FLAG_PRIV) ? GUE_LEN_PRIV : 0); +} + +static inline size_t guehdr_priv_flags_len(__be32 flags) +{ + return 0; +} + +/* Validate standard and private flags. Returns non-zero (meaning invalid) + * if there is an unknown standard or private flags, or the options length for + * the flags exceeds the options length specific in hlen of the GUE header. + */ +static inline int validate_gue_flags(struct guehdr *guehdr, + size_t optlen) +{ + size_t len; + __be32 flags = guehdr->flags; + + if (flags & ~GUE_FLAGS_ALL) + return 1; + + len = guehdr_flags_len(flags); + if (len > optlen) + return 1; + + if (flags & GUE_FLAG_PRIV) { + /* Private flags are last four bytes accounted in + * guehdr_flags_len + */ + flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV); + + if (flags & ~GUE_PFLAGS_ALL) + return 1; + + len += guehdr_priv_flags_len(flags); + if (len > optlen) + return 1; + } + + return 0; +} + #endif diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index 3b53c8e405e4..83bb8a73d23c 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Pavel Smolenskiy <pavel.smolenskiy@gmail.com> * Maxim Gorbachyov <maxim.gorbachev@siemens.com> @@ -27,10 +23,10 @@ #ifndef IEEE802154_NETDEVICE_H #define IEEE802154_NETDEVICE_H -#include <net/ieee802154.h> #include <net/af_ieee802154.h> #include <linux/netdevice.h> #include <linux/skbuff.h> +#include <linux/ieee802154.h> struct ieee802154_sechdr { #if defined(__LITTLE_ENDIAN_BITFIELD) @@ -427,8 +423,6 @@ struct ieee802154_mlme_ops { /* The fields below are required. */ - struct wpan_phy *(*get_phy)(const struct net_device *dev); - /* * FIXME: these should become the part of PIB/MIB interface. * However we still don't have IB interface of any kind @@ -438,16 +432,6 @@ struct ieee802154_mlme_ops { u8 (*get_dsn)(const struct net_device *dev); }; -/* The IEEE 802.15.4 standard defines 2 type of the devices: - * - FFD - full functionality device - * - RFD - reduce functionality device - * - * So 2 sets of mlme operations are needed - */ -struct ieee802154_reduced_mlme_ops { - struct wpan_phy *(*get_phy)(const struct net_device *dev); -}; - static inline struct ieee802154_mlme_ops * ieee802154_mlme_ops(const struct net_device *dev) { diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index d1d272843b3b..9201afe083fa 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -99,4 +99,14 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, const struct in6_addr *daddr, const __be16 dport, const int dif); #endif /* IS_ENABLED(CONFIG_IPV6) */ + +#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ + (((__sk)->sk_portpair == (__ports)) && \ + ((__sk)->sk_family == AF_INET6) && \ + ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \ + ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \ + (!(__sk)->sk_bound_dev_if || \ + ((__sk)->sk_bound_dev_if == (__dif))) && \ + net_eq(sock_net(__sk), (__net))) + #endif /* _INET6_HASHTABLES_H */ diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index a5593dab6af7..9326c41c2d7f 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -65,7 +65,8 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t); void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst); int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); -int ip6_tnl_xmit_ctl(struct ip6_tnl *t); +int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, + const struct in6_addr *raddr); __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw); __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index dc9d2a27c315..09a819ee2151 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -201,8 +201,8 @@ void fib_free_table(struct fib_table *tb); #ifndef CONFIG_IP_MULTIPLE_TABLES -#define TABLE_LOCAL_INDEX 0 -#define TABLE_MAIN_INDEX 1 +#define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1)) +#define TABLE_MAIN_INDEX (RT_TABLE_MAIN & (FIB_TABLE_HASHSZ - 1)) static inline struct fib_table *fib_get_table(struct net *net, u32 id) { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 5bc6edeb7143..25a59eb388a6 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -117,6 +117,22 @@ struct ip_tunnel_net { struct hlist_head tunnels[IP_TNL_HASH_SIZE]; }; +struct ip_tunnel_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *e); + int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi4 *fl4); +}; + +#define MAX_IPTUN_ENCAP_OPS 8 + +extern const struct ip_tunnel_encap_ops __rcu * + iptun_encaps[MAX_IPTUN_ENCAP_OPS]; + +int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op, + unsigned int num); +int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, + unsigned int num); + #ifdef CONFIG_INET int ip_tunnel_init(struct net_device *dev); diff --git a/include/net/ipx.h b/include/net/ipx.h index 0143180fecc9..e5cff6811b30 100644 --- a/include/net/ipx.h +++ b/include/net/ipx.h @@ -42,6 +42,9 @@ struct ipxhdr { struct ipx_address ipx_source __packed; }; +/* From af_ipx.c */ +extern int sysctl_ipx_pprop_broadcasting; + static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb) { return (struct ipxhdr *)skb_transport_header(skb); @@ -147,7 +150,7 @@ int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc, unsigned char *node); void ipxrtr_del_routes(struct ipx_interface *intrfc); int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, - struct iovec *iov, size_t len, int noblock); + struct msghdr *msg, size_t len, int noblock); int ipxrtr_route_skb(struct sk_buff *skb); struct ipx_route *ipxrtr_lookup(__be32 net); int ipxrtr_ioctl(unsigned int cmd, void __user *arg); diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h index a059465101ff..92c8fb575213 100644 --- a/include/net/irda/irda.h +++ b/include/net/irda/irda.h @@ -55,16 +55,6 @@ typedef __u32 magic_t; #endif #ifdef CONFIG_IRDA_DEBUG - -extern unsigned int irda_debug; - -/* use 0 for production, 1 for verification, >2 for debug */ -#define IRDA_DEBUG_LEVEL 0 - -#define IRDA_DEBUG(n, args...) \ -do { if (irda_debug >= (n)) \ - printk(KERN_DEBUG args); \ -} while (0) #define IRDA_ASSERT(expr, func) \ do { if(!(expr)) { \ printk( "Assertion failed! %s:%s:%d %s\n", \ @@ -72,15 +62,10 @@ do { if(!(expr)) { \ func } } while (0) #define IRDA_ASSERT_LABEL(label) label #else -#define IRDA_DEBUG(n, args...) do { } while (0) #define IRDA_ASSERT(expr, func) do { (void)(expr); } while (0) #define IRDA_ASSERT_LABEL(label) #endif /* CONFIG_IRDA_DEBUG */ -#define IRDA_WARNING(args...) do { if (net_ratelimit()) printk(KERN_WARNING args); } while (0) -#define IRDA_MESSAGE(args...) do { if (net_ratelimit()) printk(KERN_INFO args); } while (0) -#define IRDA_ERROR(args...) do { if (net_ratelimit()) printk(KERN_ERR args); } while (0) - /* * Magic numbers used by Linux-IrDA. Random numbers which must be unique to * give the best protection diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h index fb4b76d5d7f1..6f23e820618c 100644 --- a/include/net/irda/irlap.h +++ b/include/net/irda/irlap.h @@ -303,7 +303,7 @@ static inline void irlap_next_state(struct irlap_cb *self, IRLAP_STATE state) if (!self || self->magic != LAP_MAGIC) return; - IRDA_DEBUG(4, "next LAP state = %s\n", irlap_state[state]); + pr_debug("next LAP state = %s\n", irlap_state[state]); */ self->state = state; } diff --git a/include/net/irda/parameters.h b/include/net/irda/parameters.h index 42713c931d1f..2d9cd0007cba 100644 --- a/include/net/irda/parameters.h +++ b/include/net/irda/parameters.h @@ -71,17 +71,17 @@ typedef int (*PV_HANDLER)(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func); typedef struct { - PI_HANDLER func; /* Handler for this parameter identifier */ + const PI_HANDLER func; /* Handler for this parameter identifier */ PV_TYPE type; /* Data type for this parameter */ } pi_minor_info_t; typedef struct { - pi_minor_info_t *pi_minor_call_table; + const pi_minor_info_t *pi_minor_call_table; int len; } pi_major_info_t; typedef struct { - pi_major_info_t *tables; + const pi_major_info_t *tables; int len; __u8 pi_mask; int pi_major_offset; diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h index 0e79cfba4b3b..48f3f891b2f9 100644 --- a/include/net/llc_c_st.h +++ b/include/net/llc_c_st.h @@ -35,8 +35,8 @@ struct llc_conn_state_trans { llc_conn_ev_t ev; u8 next_state; - llc_conn_ev_qfyr_t *ev_qualifiers; - llc_conn_action_t *ev_actions; + const llc_conn_ev_qfyr_t *ev_qualifiers; + const llc_conn_action_t *ev_actions; }; struct llc_conn_state { diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h index 567c681f1f3e..c4359e203013 100644 --- a/include/net/llc_s_st.h +++ b/include/net/llc_s_st.h @@ -19,7 +19,7 @@ struct llc_sap_state_trans { llc_sap_ev_t ev; u8 next_state; - llc_sap_action_t *ev_actions; + const llc_sap_action_t *ev_actions; }; struct llc_sap_state { diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 0ad1f47d2dc7..58d719ddaa60 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -263,6 +263,7 @@ struct ieee80211_vif_chanctx_switch { * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed, * note that this is only called when it changes after the channel * context had been assigned. + * @BSS_CHANGED_OCB: OCB join status changed */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@ -287,6 +288,7 @@ enum ieee80211_bss_change { BSS_CHANGED_P2P_PS = 1<<19, BSS_CHANGED_BEACON_INFO = 1<<20, BSS_CHANGED_BANDWIDTH = 1<<21, + BSS_CHANGED_OCB = 1<<22, /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -739,7 +741,8 @@ struct ieee80211_tx_info { u8 ampdu_ack_len; u8 ampdu_len; u8 antenna; - void *status_driver_data[21 / sizeof(void *)]; + u16 tx_time; + void *status_driver_data[19 / sizeof(void *)]; } status; struct { struct ieee80211_tx_rate driver_rates[ @@ -879,6 +882,9 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * subframes share the same sequence number. Reported subframes can be * either regular MSDU or singly A-MSDUs. Subframes must not be * interleaved with other frames. + * @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific + * radiotap data in the skb->data (before the frame) as described by + * the &struct ieee80211_vendor_radiotap. */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -908,6 +914,7 @@ enum mac80211_rx_flags { RX_FLAG_10MHZ = BIT(28), RX_FLAG_5MHZ = BIT(29), RX_FLAG_AMSDU_MORE = BIT(30), + RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31), }; #define RX_FLAG_STBC_SHIFT 26 @@ -979,6 +986,39 @@ struct ieee80211_rx_status { }; /** + * struct ieee80211_vendor_radiotap - vendor radiotap data information + * @present: presence bitmap for this vendor namespace + * (this could be extended in the future if any vendor needs more + * bits, the radiotap spec does allow for that) + * @align: radiotap vendor namespace alignment. This defines the needed + * alignment for the @data field below, not for the vendor namespace + * description itself (which has a fixed 2-byte alignment) + * Must be a power of two, and be set to at least 1! + * @oui: radiotap vendor namespace OUI + * @subns: radiotap vendor sub namespace + * @len: radiotap vendor sub namespace skip length, if alignment is done + * then that's added to this, i.e. this is only the length of the + * @data field. + * @pad: number of bytes of padding after the @data, this exists so that + * the skb data alignment can be preserved even if the data has odd + * length + * @data: the actual vendor namespace data + * + * This struct, including the vendor data, goes into the skb->data before + * the 802.11 header. It's split up in mac80211 using the align/oui/subns + * data. + */ +struct ieee80211_vendor_radiotap { + u32 present; + u8 align; + u8 oui[3]; + u8 subns; + u8 pad; + u16 len; + u8 data[]; +} __packed; + +/** * enum ieee80211_conf_flags - configuration flags * * Flags to define PHY configuration options @@ -1117,6 +1157,8 @@ struct ieee80211_conf { * Function (TSF) timer when the frame containing the channel switch * announcement was received. This is simply the rx.mactime parameter * the driver passed into mac80211. + * @device_timestamp: arbitrary timestamp for the device, this is the + * rx.device_timestamp parameter the driver passed to mac80211. * @block_tx: Indicates whether transmission must be blocked before the * scheduled channel switch, as indicated by the AP. * @chandef: the new channel to switch to @@ -1124,6 +1166,7 @@ struct ieee80211_conf { */ struct ieee80211_channel_switch { u64 timestamp; + u32 device_timestamp; bool block_tx; struct cfg80211_chan_def chandef; u8 count; @@ -1423,6 +1466,8 @@ struct ieee80211_sta_rates { * @smps_mode: current SMPS mode (off, static or dynamic) * @rates: rate control selection table * @tdls: indicates whether the STA is a TDLS peer + * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only + * valid if the STA is a TDLS peer in the first place. */ struct ieee80211_sta { u32 supp_rates[IEEE80211_NUM_BANDS]; @@ -1438,6 +1483,7 @@ struct ieee80211_sta { enum ieee80211_smps_mode smps_mode; struct ieee80211_sta_rates __rcu *rates; bool tdls; + bool tdls_initiator; /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); @@ -1576,6 +1622,10 @@ struct ieee80211_tx_control { * a virtual monitor interface when monitor interfaces are the only * active interfaces. * + * @IEEE80211_HW_NO_AUTO_VIF: The driver would like for no wlanX to + * be created. It is expected user-space will create vifs as + * desired (and thus have them named as desired). + * * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface * queue mapping in order to use different queues (not just one per AC) * for different virtual interfaces. See the doc section on HW queue @@ -1622,7 +1672,8 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, IEEE80211_HW_MFP_CAPABLE = 1<<13, IEEE80211_HW_WANT_MONITOR_VIF = 1<<14, - /* free slots */ + IEEE80211_HW_NO_AUTO_VIF = 1<<15, + /* free slot */ IEEE80211_HW_SUPPORTS_UAPSD = 1<<17, IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, IEEE80211_HW_CONNECTION_MONITOR = 1<<19, @@ -1776,6 +1827,31 @@ struct ieee80211_scan_request { }; /** + * struct ieee80211_tdls_ch_sw_params - TDLS channel switch parameters + * + * @sta: peer this TDLS channel-switch request/response came from + * @chandef: channel referenced in a TDLS channel-switch request + * @action_code: see &enum ieee80211_tdls_actioncode + * @status: channel-switch response status + * @timestamp: time at which the frame was received + * @switch_time: switch-timing parameter received in the frame + * @switch_timeout: switch-timing parameter received in the frame + * @tmpl_skb: TDLS switch-channel response template + * @ch_sw_tm_ie: offset of the channel-switch timing IE inside @tmpl_skb + */ +struct ieee80211_tdls_ch_sw_params { + struct ieee80211_sta *sta; + struct cfg80211_chan_def *chandef; + u8 action_code; + u32 status; + u32 timestamp; + u16 switch_time; + u16 switch_timeout; + struct sk_buff *tmpl_skb; + u32 ch_sw_tm_ie; +}; + +/** * wiphy_to_ieee80211_hw - return a mac80211 driver hw struct from a wiphy * * @wiphy: the &struct wiphy which we want to query @@ -2375,6 +2451,22 @@ enum ieee80211_roc_type { }; /** + * enum ieee80211_reconfig_complete_type - reconfig type + * + * This enum is used by the reconfig_complete() callback to indicate what + * reconfiguration type was completed. + * + * @IEEE80211_RECONFIG_TYPE_RESTART: hw restart type + * (also due to resume() callback returning 1) + * @IEEE80211_RECONFIG_TYPE_SUSPEND: suspend type (regardless + * of wowlan configuration) + */ +enum ieee80211_reconfig_type { + IEEE80211_RECONFIG_TYPE_RESTART, + IEEE80211_RECONFIG_TYPE_SUSPEND, +}; + +/** * struct ieee80211_ops - callbacks from mac80211 to the driver * * This structure contains various callbacks that the driver may @@ -2530,7 +2622,9 @@ enum ieee80211_roc_type { * * @sw_scan_start: Notifier function that is called just before a software scan * is started. Can be NULL, if the driver doesn't need this notification. - * The callback can sleep. + * The mac_addr parameter allows supporting NL80211_SCAN_FLAG_RANDOM_ADDR, + * the driver may set the NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR flag if it + * can use this parameter. The callback can sleep. * * @sw_scan_complete: Notifier function that is called just after a * software scan finished. Can be NULL, if the driver doesn't need @@ -2601,6 +2695,9 @@ enum ieee80211_roc_type { * uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since * otherwise the rate control algorithm is notified directly. * Must be atomic. + * @sta_rate_tbl_update: Notifies the driver that the rate table changed. This + * is only used if the configured rate control algorithm actually uses + * the new rate table API, and is therefore optional. Must be atomic. * * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), * bursting) for a hardware TX queue. @@ -2809,11 +2906,11 @@ enum ieee80211_roc_type { * disabled/enabled via @bss_info_changed. * @stop_ap: Stop operation on the AP interface. * - * @restart_complete: Called after a call to ieee80211_restart_hw(), when the - * reconfiguration has completed. This can help the driver implement the - * reconfiguration step. Also called when reconfiguring because the - * driver's resume function returned 1, as this is just like an "inline" - * hardware restart. This callback may sleep. + * @reconfig_complete: Called after a call to ieee80211_restart_hw() and + * during resume, when the reconfiguration has completed. + * This can help the driver implement the reconfiguration step (and + * indicate mac80211 is ready to receive frames). + * This callback may sleep. * * @ipv6_addr_change: IPv6 address assignment on the given interface changed. * Currently, this is only called for managed or P2P client interfaces. @@ -2829,6 +2926,13 @@ enum ieee80211_roc_type { * transmitted and then call ieee80211_csa_finish(). * If the CSA count starts as zero or 1, this function will not be called, * since there won't be any time to beacon before the switch anyway. + * @pre_channel_switch: This is an optional callback that is called + * before a channel switch procedure is started (ie. when a STA + * gets a CSA or an userspace initiated channel-switch), allowing + * the driver to prepare for the channel switch. + * @post_channel_switch: This is an optional callback that is called + * after a channel switch procedure is completed, allowing the + * driver to go back to a normal configuration. * * @join_ibss: Join an IBSS (on an IBSS interface); this is called after all * information in bss_conf is set up and the beacon can be retrieved. A @@ -2838,6 +2942,26 @@ enum ieee80211_roc_type { * @get_expected_throughput: extract the expected throughput towards the * specified station. The returned value is expressed in Kbps. It returns 0 * if the RC algorithm does not have proper data to provide. + * + * @get_txpower: get current maximum tx power (in dBm) based on configuration + * and hardware limits. + * + * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver + * is responsible for continually initiating channel-switching operations + * and returning to the base channel for communication with the AP. The + * driver receives a channel-switch request template and the location of + * the switch-timing IE within the template as part of the invocation. + * The template is valid only within the call, and the driver can + * optionally copy the skb for further re-use. + * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both + * peers must be on the base channel when the call completes. + * @tdls_recv_channel_switch: a TDLS channel-switch related frame (request or + * response) has been received from a remote peer. The driver gets + * parameters parsed from the incoming frame and may use them to continue + * an ongoing channel-switch operation. In addition, a channel-switch + * response template is provided, together with the location of the + * switch-timing IE within the template. The skb can only be used within + * the function call. */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -2897,8 +3021,11 @@ struct ieee80211_ops { struct ieee80211_scan_ies *ies); int (*sched_scan_stop)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); - void (*sw_scan_start)(struct ieee80211_hw *hw); - void (*sw_scan_complete)(struct ieee80211_hw *hw); + void (*sw_scan_start)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr); + void (*sw_scan_complete)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); int (*get_stats)(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats); void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx, @@ -2932,6 +3059,9 @@ struct ieee80211_ops { struct ieee80211_vif *vif, struct ieee80211_sta *sta, u32 changed); + void (*sta_rate_tbl_update)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); int (*conf_tx)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 ac, const struct ieee80211_tx_queue_params *params); @@ -2959,6 +3089,7 @@ struct ieee80211_ops { void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop); void (*channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, struct ieee80211_channel_switch *ch_switch); int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant); int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); @@ -3025,7 +3156,8 @@ struct ieee80211_ops { int n_vifs, enum ieee80211_chanctx_switch_mode mode); - void (*restart_complete)(struct ieee80211_hw *hw); + void (*reconfig_complete)(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type); #if IS_ENABLED(CONFIG_IPV6) void (*ipv6_addr_change)(struct ieee80211_hw *hw, @@ -3035,14 +3167,54 @@ struct ieee80211_ops { void (*channel_switch_beacon)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_chan_def *chandef); + int (*pre_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *ch_switch); + + int (*post_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); u32 (*get_expected_throughput)(struct ieee80211_sta *sta); + int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + int *dbm); + + int (*tdls_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u8 oper_class, + struct cfg80211_chan_def *chandef, + struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie); + void (*tdls_cancel_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + void (*tdls_recv_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_tdls_ch_sw_params *params); }; /** - * ieee80211_alloc_hw - Allocate a new hardware device + * ieee80211_alloc_hw_nm - Allocate a new hardware device + * + * This must be called once for each hardware device. The returned pointer + * must be used to refer to this device when calling other functions. + * mac80211 allocates a private data area for the driver pointed to by + * @priv in &struct ieee80211_hw, the size of this area is given as + * @priv_data_len. + * + * @priv_data_len: length of private data + * @ops: callbacks for this device + * @requested_name: Requested name for this device. + * NULL is valid value, and means use the default naming (phy%d) + * + * Return: A pointer to the new hardware device, or %NULL on error. + */ +struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, + const struct ieee80211_ops *ops, + const char *requested_name); + +/** + * ieee80211_alloc_hw - Allocate a new hardware device * * This must be called once for each hardware device. The returned pointer * must be used to refer to this device when calling other functions. @@ -3055,8 +3227,12 @@ struct ieee80211_ops { * * Return: A pointer to the new hardware device, or %NULL on error. */ +static inline struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, - const struct ieee80211_ops *ops); + const struct ieee80211_ops *ops) +{ + return ieee80211_alloc_hw_nm(priv_data_len, ops, NULL); +} /** * ieee80211_register_hw - Register hardware device @@ -3443,6 +3619,26 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb); /** + * ieee80211_tx_status_noskb - transmit status callback without skb + * + * This function can be used as a replacement for ieee80211_tx_status + * in drivers that cannot reliably map tx status information back to + * specific skbs. + * + * Calls to this function for a single hardware must be synchronized + * against each other. Calls to this function, ieee80211_tx_status_ni() + * and ieee80211_tx_status_irqsafe() may not be mixed for a single hardware. + * + * @hw: the hardware the frame was transmitted by + * @sta: the receiver station to which this packet is sent + * (NULL for multicast packets) + * @info: tx status information + */ +void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct ieee80211_tx_info *info); + +/** * ieee80211_tx_status_ni - transmit status callback (in process context) * * Like ieee80211_tx_status() but can be called in process context. @@ -3655,7 +3851,7 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, /** * ieee80211_probereq_get - retrieve a Probe Request template * @hw: pointer obtained from ieee80211_alloc_hw(). - * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @src_addr: source MAC address * @ssid: SSID buffer * @ssid_len: length of SSID * @tailroom: tailroom to reserve at end of SKB for IEs @@ -3666,7 +3862,7 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, * Return: The Probe Request template. %NULL on error. */ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, + const u8 *src_addr, const u8 *ssid, size_t ssid_len, size_t tailroom); @@ -4172,6 +4368,22 @@ void ieee80211_iterate_active_interfaces_rtnl(struct ieee80211_hw *hw, void *data); /** + * ieee80211_iterate_stations_atomic - iterate stations + * + * This function iterates over all stations associated with a given + * hardware that are currently uploaded to the driver and calls the callback + * function for them. + * This function requires the iterator callback function to be atomic, + * + * @hw: the hardware struct of which the interfaces should be iterated over + * @iterator: the iterator function to call, cannot sleep + * @data: first argument of the iterator function + */ +void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw, + void (*iterator)(void *data, + struct ieee80211_sta *sta), + void *data); +/** * ieee80211_queue_work - add work onto the mac80211 workqueue * * Drivers and mac80211 use this to add work onto the mac80211 workqueue. @@ -4480,6 +4692,14 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, gfp_t gfp); /** + * ieee80211_cqm_beacon_loss_notify - inform CQM of beacon loss + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @gfp: context flags + */ +void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp); + +/** * ieee80211_radar_detected - inform that a radar was detected * * @hw: pointer as obtained from ieee80211_alloc_hw() @@ -4637,6 +4857,10 @@ struct rate_control_ops { void (*free_sta)(void *priv, struct ieee80211_sta *sta, void *priv_sta); + void (*tx_status_noskb)(void *priv, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct ieee80211_tx_info *info); void (*tx_status)(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, struct sk_buff *skb); @@ -4888,4 +5112,69 @@ void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf); void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer, enum nl80211_tdls_operation oper, u16 reason_code, gfp_t gfp); + +/** + * ieee80211_reserve_tid - request to reserve a specific TID + * + * There is sometimes a need (such as in TDLS) for blocking the driver from + * using a specific TID so that the FW can use it for certain operations such + * as sending PTI requests. To make sure that the driver doesn't use that TID, + * this function must be called as it flushes out packets on this TID and marks + * it as blocked, so that any transmit for the station on this TID will be + * redirected to the alternative TID in the same AC. + * + * Note that this function blocks and may call back into the driver, so it + * should be called without driver locks held. Also note this function should + * only be called from the driver's @sta_state callback. + * + * @sta: the station to reserve the TID for + * @tid: the TID to reserve + * + * Returns: 0 on success, else on failure + */ +int ieee80211_reserve_tid(struct ieee80211_sta *sta, u8 tid); + +/** + * ieee80211_unreserve_tid - request to unreserve a specific TID + * + * Once there is no longer any need for reserving a certain TID, this function + * should be called, and no longer will packets have their TID modified for + * preventing use of this TID in the driver. + * + * Note that this function blocks and acquires a lock, so it should be called + * without driver locks held. Also note this function should only be called + * from the driver's @sta_state callback. + * + * @sta: the station + * @tid: the TID to unreserve + */ +void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); + +/** + * ieee80211_ie_split - split an IE buffer according to ordering + * + * @ies: the IE buffer + * @ielen: the length of the IE buffer + * @ids: an array with element IDs that are allowed before + * the split + * @n_ids: the size of the element ID array + * @offset: offset where to start splitting in the buffer + * + * This function splits an IE buffer by updating the @offset + * variable to point to the location where the buffer should be + * split. + * + * It assumes that the given IE buffer is well-formed, this + * has to be guaranteed by the caller! + * + * It also assumes that the IEs in the buffer are ordered + * correctly, if not the result of using this function will not + * be ordered correctly either, i.e. it does no reordering. + * + * The function returns the offset where the next part of the + * buffer starts, which may be @ielen if the entire (remainder) + * of the buffer should be used. + */ +size_t ieee80211_ie_split(const u8 *ies, size_t ielen, + const u8 *ids, int n_ids, size_t offset); #endif /* MAC80211_H */ diff --git a/include/net/mac802154.h b/include/net/mac802154.h index 2e67cdd19cdc..c823d910b46c 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -12,14 +12,12 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef NET_MAC802154_H #define NET_MAC802154_H #include <net/af_ieee802154.h> +#include <linux/ieee802154.h> #include <linux/skbuff.h> /* General MAC frame format: @@ -35,13 +33,13 @@ */ /* indicates that the Short Address changed */ -#define IEEE802515_AFILT_SADDR_CHANGED 0x00000001 +#define IEEE802154_AFILT_SADDR_CHANGED 0x00000001 /* indicates that the IEEE Address changed */ -#define IEEE802515_AFILT_IEEEADDR_CHANGED 0x00000002 +#define IEEE802154_AFILT_IEEEADDR_CHANGED 0x00000002 /* indicates that the PAN ID changed */ -#define IEEE802515_AFILT_PANID_CHANGED 0x00000004 +#define IEEE802154_AFILT_PANID_CHANGED 0x00000004 /* indicates that PAN Coordinator status changed */ -#define IEEE802515_AFILT_PANC_CHANGED 0x00000008 +#define IEEE802154_AFILT_PANC_CHANGED 0x00000008 struct ieee802154_hw_addr_filt { __le16 pan_id; /* Each independent PAN selects a unique @@ -55,7 +53,14 @@ struct ieee802154_hw_addr_filt { u8 pan_coord; }; -struct ieee802154_dev { +struct ieee802154_vif { + int type; + + /* must be last */ + u8 drv_priv[0] __aligned(sizeof(void *)); +}; + +struct ieee802154_hw { /* filled by the driver */ int extra_tx_headroom; u32 flags; @@ -65,6 +70,7 @@ struct ieee802154_dev { struct ieee802154_hw_addr_filt hw_filt; void *priv; struct wpan_phy *phy; + size_t vif_data_size; }; /* Checksum is in hardware and is omitted from a packet @@ -76,28 +82,43 @@ struct ieee802154_dev { * however, so you are advised to review these flags carefully. */ -/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */ -#define IEEE802154_HW_OMIT_CKSUM 0x00000001 +/* Indicates that xmitter will add FCS on it's own. */ +#define IEEE802154_HW_TX_OMIT_CKSUM 0x00000001 /* Indicates that receiver will autorespond with ACK frames. */ -#define IEEE802154_HW_AACK 0x00000002 +#define IEEE802154_HW_AACK 0x00000002 /* Indicates that transceiver will support transmit power setting. */ -#define IEEE802154_HW_TXPOWER 0x00000004 +#define IEEE802154_HW_TXPOWER 0x00000004 /* Indicates that transceiver will support listen before transmit. */ -#define IEEE802154_HW_LBT 0x00000008 +#define IEEE802154_HW_LBT 0x00000008 /* Indicates that transceiver will support cca mode setting. */ -#define IEEE802154_HW_CCA_MODE 0x00000010 +#define IEEE802154_HW_CCA_MODE 0x00000010 /* Indicates that transceiver will support cca ed level setting. */ -#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020 +#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020 /* Indicates that transceiver will support csma (max_be, min_be, csma retries) * settings. */ -#define IEEE802154_HW_CSMA_PARAMS 0x00000040 +#define IEEE802154_HW_CSMA_PARAMS 0x00000040 /* Indicates that transceiver will support ARET frame retries setting. */ -#define IEEE802154_HW_FRAME_RETRIES 0x00000080 +#define IEEE802154_HW_FRAME_RETRIES 0x00000080 +/* Indicates that transceiver will support hardware address filter setting. */ +#define IEEE802154_HW_AFILT 0x00000100 +/* Indicates that transceiver will support promiscuous mode setting. */ +#define IEEE802154_HW_PROMISCUOUS 0x00000200 +/* Indicates that receiver omits FCS. */ +#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000400 +/* Indicates that receiver will not filter frames with bad checksum. */ +#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000800 + +/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */ +#define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \ + IEEE802154_HW_RX_OMIT_CKSUM) /* This groups the most common CSMA support fields into one. */ #define IEEE802154_HW_CSMA (IEEE802154_HW_CCA_MODE | \ IEEE802154_HW_CCA_ED_LEVEL | \ - IEEE802154_HW_CSMA_PARAMS | \ + IEEE802154_HW_CSMA_PARAMS) + +/* This groups the most common ARET support fields into one. */ +#define IEEE802154_HW_ARET (IEEE802154_HW_CSMA | \ IEEE802154_HW_FRAME_RETRIES) /* struct ieee802154_ops - callbacks from mac802154 to the driver @@ -112,12 +133,24 @@ struct ieee802154_dev { * stop: Handler that 802.15.4 module calls for device cleanup. * This function is called after the last interface is removed. * - * xmit: Handler that 802.15.4 module calls for each transmitted frame. + * xmit_sync: + * Handler that 802.15.4 module calls for each transmitted frame. + * skb cntains the buffer starting from the IEEE 802.15.4 header. + * The low-level driver should send the frame based on available + * configuration. This is called by a workqueue and useful for + * synchronous 802.15.4 drivers. + * This function should return zero or negative errno. + * + * WARNING: + * This will be deprecated soon. We don't accept synced xmit callbacks + * drivers anymore. + * + * xmit_async: + * Handler that 802.15.4 module calls for each transmitted frame. * skb cntains the buffer starting from the IEEE 802.15.4 header. * The low-level driver should send the frame based on available * configuration. - * This function should return zero or negative errno. Called with - * pib_lock held. + * This function should return zero or negative errno. * * ed: Handler that 802.15.4 module calls for Energy Detection. * This function should place the value for detected energy @@ -159,40 +192,75 @@ struct ieee802154_dev { * set_frame_retries * Sets the retransmission attempt limit. Called with pib_lock held. * Returns either zero, or negative errno. + * + * set_promiscuous_mode + * Enables or disable promiscuous mode. */ struct ieee802154_ops { struct module *owner; - int (*start)(struct ieee802154_dev *dev); - void (*stop)(struct ieee802154_dev *dev); - int (*xmit)(struct ieee802154_dev *dev, - struct sk_buff *skb); - int (*ed)(struct ieee802154_dev *dev, u8 *level); - int (*set_channel)(struct ieee802154_dev *dev, - int page, - int channel); - int (*set_hw_addr_filt)(struct ieee802154_dev *dev, - struct ieee802154_hw_addr_filt *filt, + int (*start)(struct ieee802154_hw *hw); + void (*stop)(struct ieee802154_hw *hw); + int (*xmit_sync)(struct ieee802154_hw *hw, + struct sk_buff *skb); + int (*xmit_async)(struct ieee802154_hw *hw, + struct sk_buff *skb); + int (*ed)(struct ieee802154_hw *hw, u8 *level); + int (*set_channel)(struct ieee802154_hw *hw, u8 page, + u8 channel); + int (*set_hw_addr_filt)(struct ieee802154_hw *hw, + struct ieee802154_hw_addr_filt *filt, unsigned long changed); - int (*ieee_addr)(struct ieee802154_dev *dev, __le64 addr); - int (*set_txpower)(struct ieee802154_dev *dev, int db); - int (*set_lbt)(struct ieee802154_dev *dev, bool on); - int (*set_cca_mode)(struct ieee802154_dev *dev, u8 mode); - int (*set_cca_ed_level)(struct ieee802154_dev *dev, + int (*set_txpower)(struct ieee802154_hw *hw, int db); + int (*set_lbt)(struct ieee802154_hw *hw, bool on); + int (*set_cca_mode)(struct ieee802154_hw *hw, u8 mode); + int (*set_cca_ed_level)(struct ieee802154_hw *hw, s32 level); - int (*set_csma_params)(struct ieee802154_dev *dev, + int (*set_csma_params)(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries); - int (*set_frame_retries)(struct ieee802154_dev *dev, + int (*set_frame_retries)(struct ieee802154_hw *hw, s8 retries); + int (*set_promiscuous_mode)(struct ieee802154_hw *hw, + const bool on); }; -/* Basic interface to register ieee802154 device */ -struct ieee802154_dev * -ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops); -void ieee802154_free_device(struct ieee802154_dev *dev); -int ieee802154_register_device(struct ieee802154_dev *dev); -void ieee802154_unregister_device(struct ieee802154_dev *dev); +/** + * ieee802154_be64_to_le64 - copies and convert be64 to le64 + * @le64_dst: le64 destination pointer + * @be64_src: be64 source pointer + */ +static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) +{ + __le64 tmp = (__force __le64)swab64p(be64_src); + + memcpy(le64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN); +} -void ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, +/** + * ieee802154_le64_to_be64 - copies and convert le64 to be64 + * @be64_dst: be64 destination pointer + * @le64_src: le64 source pointer + */ +static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) +{ + __be64 tmp = (__force __be64)swab64p(le64_src); + + memcpy(be64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN); +} + +/* Basic interface to register ieee802154 hwice */ +struct ieee802154_hw * +ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops); +void ieee802154_free_hw(struct ieee802154_hw *hw); +int ieee802154_register_hw(struct ieee802154_hw *hw); +void ieee802154_unregister_hw(struct ieee802154_hw *hw); + +void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb); +void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi); +void ieee802154_wake_queue(struct ieee802154_hw *hw); +void ieee802154_stop_queue(struct ieee802154_hw *hw); +void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, + bool ifs_handling); + #endif /* NET_MAC802154_H */ diff --git a/include/net/mpls.h b/include/net/mpls.h new file mode 100644 index 000000000000..5b3b5addfb08 --- /dev/null +++ b/include/net/mpls.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2014 Nicira, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _NET_MPLS_H +#define _NET_MPLS_H 1 + +#include <linux/if_ether.h> +#include <linux/netdevice.h> + +#define MPLS_HLEN 4 + +static inline bool eth_p_mpls(__be16 eth_type) +{ + return eth_type == htons(ETH_P_MPLS_UC) || + eth_type == htons(ETH_P_MPLS_MC); +} + +/* + * For non-MPLS skbs this will correspond to the network header. + * For MPLS skbs it will be before the network_header as the MPLS + * label stack lies between the end of the mac header and the network + * header. That is, for MPLS skbs the end of the mac header + * is the top of the MPLS label stack. + */ +static inline unsigned char *skb_mpls_header(struct sk_buff *skb) +{ + return skb_mac_header(skb) + skb->mac_len; +} +#endif diff --git a/include/net/neighbour.h b/include/net/neighbour.h index f60558d0254c..eb070b3674a1 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -69,7 +69,7 @@ struct neigh_parms { struct net *net; #endif struct net_device *dev; - struct neigh_parms *next; + struct list_head list; int (*neigh_setup)(struct neighbour *); void (*neigh_cleanup)(struct neighbour *); struct neigh_table *tbl; @@ -203,6 +203,7 @@ struct neigh_table { void (*proxy_redo)(struct sk_buff *skb); char *id; struct neigh_parms parms; + struct list_head parms_list; int gc_interval; int gc_thresh1; int gc_thresh2; @@ -219,6 +220,13 @@ struct neigh_table { struct pneigh_entry **phash_buckets; }; +enum { + NEIGH_ARP_TABLE = 0, + NEIGH_ND_TABLE = 1, + NEIGH_DN_TABLE = 2, + NEIGH_NR_TABLES, +}; + static inline int neigh_parms_family(struct neigh_parms *p) { return p->tbl->family; @@ -239,8 +247,8 @@ static inline void *neighbour_priv(const struct neighbour *n) #define NEIGH_UPDATE_F_ISROUTER 0x40000000 #define NEIGH_UPDATE_F_ADMIN 0x80000000 -void neigh_table_init(struct neigh_table *tbl); -int neigh_table_clear(struct neigh_table *tbl); +void neigh_table_init(int index, struct neigh_table *tbl); +int neigh_table_clear(int index, struct neigh_table *tbl); struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev); struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index e0d64667a4b3..2e8756b8c775 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -26,6 +26,7 @@ #endif #include <net/netns/nftables.h> #include <net/netns/xfrm.h> +#include <linux/ns_common.h> struct user_namespace; struct proc_dir_entry; @@ -60,7 +61,7 @@ struct net { struct user_namespace *user_ns; /* Owning user namespace */ - unsigned int proc_inum; + struct ns_common ns; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index c8a7db605e03..f0daed2b54d1 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -92,12 +92,18 @@ struct nf_conn { /* Have we seen traffic both ways yet? (bitset) */ unsigned long status; - /* If we were expected by an expectation, this will be it */ - struct nf_conn *master; - /* Timer function; drops refcnt when it goes off. */ struct timer_list timeout; +#ifdef CONFIG_NET_NS + struct net *ct_net; +#endif + /* all members below initialized via memset */ + u8 __nfct_init_offset[0]; + + /* If we were expected by an expectation, this will be it */ + struct nf_conn *master; + #if defined(CONFIG_NF_CONNTRACK_MARK) u_int32_t mark; #endif @@ -108,9 +114,6 @@ struct nf_conn { /* Extensions */ struct nf_ct_ext *ext; -#ifdef CONFIG_NET_NS - struct net *ct_net; -#endif /* Storage reserved for other modules, must be the last member */ union nf_conntrack_proto proto; diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index cc0c18827602..f2f0fa3bb150 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -72,7 +72,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) return ret; } -int +void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *proto); diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index adc1fa3dd7ab..cdc920b4c4c2 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -38,8 +38,8 @@ struct nf_conntrack_l3proto { const struct nf_conntrack_tuple *orig); /* Print out the per-protocol part of the tuple. */ - int (*print_tuple)(struct seq_file *s, - const struct nf_conntrack_tuple *); + void (*print_tuple)(struct seq_file *s, + const struct nf_conntrack_tuple *); /* * Called before tracking. diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 4c8d573830b7..1f7061313d54 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -56,11 +56,11 @@ struct nf_conntrack_l4proto { u_int8_t pf, unsigned int hooknum); /* Print out the per-protocol part of the tuple. Return like seq_* */ - int (*print_tuple)(struct seq_file *s, - const struct nf_conntrack_tuple *); + void (*print_tuple)(struct seq_file *s, + const struct nf_conntrack_tuple *); /* Print out the private part of the conntrack. */ - int (*print_conntrack)(struct seq_file *s, struct nf_conn *); + void (*print_conntrack)(struct seq_file *s, struct nf_conn *); /* Return the array of timeouts for this protocol. */ unsigned int *(*get_timeouts)(struct net *net); diff --git a/include/net/netfilter/nf_nat_redirect.h b/include/net/netfilter/nf_nat_redirect.h new file mode 100644 index 000000000000..73b729543309 --- /dev/null +++ b/include/net/netfilter/nf_nat_redirect.h @@ -0,0 +1,12 @@ +#ifndef _NF_NAT_REDIRECT_H_ +#define _NF_NAT_REDIRECT_H_ + +unsigned int +nf_nat_redirect_ipv4(struct sk_buff *skb, + const struct nf_nat_ipv4_multi_range_compat *mr, + unsigned int hooknum); +unsigned int +nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, + unsigned int hooknum); + +#endif /* _NF_NAT_REDIRECT_H_ */ diff --git a/include/net/netfilter/nf_tables_bridge.h b/include/net/netfilter/nf_tables_bridge.h new file mode 100644 index 000000000000..511fb79f6dad --- /dev/null +++ b/include/net/netfilter/nf_tables_bridge.h @@ -0,0 +1,7 @@ +#ifndef _NET_NF_TABLES_BRIDGE_H +#define _NET_NF_TABLES_BRIDGE_H + +int nft_bridge_iphdr_validate(struct sk_buff *skb); +int nft_bridge_ip6hdr_validate(struct sk_buff *skb); + +#endif /* _NET_NF_TABLES_BRIDGE_H */ diff --git a/include/net/netfilter/nft_redir.h b/include/net/netfilter/nft_redir.h new file mode 100644 index 000000000000..a2d67546afab --- /dev/null +++ b/include/net/netfilter/nft_redir.h @@ -0,0 +1,21 @@ +#ifndef _NFT_REDIR_H_ +#define _NFT_REDIR_H_ + +struct nft_redir { + enum nft_registers sreg_proto_min:8; + enum nft_registers sreg_proto_max:8; + u16 flags; +}; + +extern const struct nla_policy nft_redir_policy[]; + +int nft_redir_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]); + +int nft_redir_dump(struct sk_buff *skb, const struct nft_expr *expr); + +int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, + const struct nft_data **data); + +#endif /* _NFT_REDIR_H_ */ diff --git a/include/net/netlink.h b/include/net/netlink.h index 7b903e1bdbbb..64158353ecb2 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -1185,4 +1185,14 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype, #define nla_for_each_nested(pos, nla, rem) \ nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem) +/** + * nla_is_last - Test if attribute is last in stream + * @nla: attribute to test + * @rem: bytes remaining in stream + */ +static inline bool nla_is_last(const struct nlattr *nla, int rem) +{ + return nla->nla_len == rem; +} + #endif diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 9da798256f0e..730d82ad6ee5 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -50,8 +50,8 @@ struct netns_xfrm { struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; - struct hlist_head policy_inexact[XFRM_POLICY_MAX * 2]; - struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2]; + struct hlist_head policy_inexact[XFRM_POLICY_MAX]; + struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX]; unsigned int policy_count[XFRM_POLICY_MAX * 2]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h index d9a5cf7ac1c4..0ae101eef0f4 100644 --- a/include/net/nfc/digital.h +++ b/include/net/nfc/digital.h @@ -225,6 +225,19 @@ struct nfc_digital_dev { u8 curr_protocol; u8 curr_rf_tech; u8 curr_nfc_dep_pni; + u8 did; + + u8 local_payload_max; + u8 remote_payload_max; + + struct sk_buff *chaining_skb; + struct digital_data_exch *data_exch; + + int atn_count; + int nack_count; + + struct sk_buff *saved_skb; + unsigned int saved_skb_len; u16 target_fsc; diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index 7ee8f4cc610b..14bd0e1c47fa 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h @@ -57,10 +57,14 @@ struct nfc_hci_ops { int (*discover_se)(struct nfc_hci_dev *dev); int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx); int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx); + int (*se_io)(struct nfc_hci_dev *dev, u32 se_idx, + u8 *apdu, size_t apdu_length, + se_io_cb_t cb, void *cb_context); }; /* Pipes */ #define NFC_HCI_INVALID_PIPE 0x80 +#define NFC_HCI_DO_NOT_CREATE_PIPE 0x81 #define NFC_HCI_LINK_MGMT_PIPE 0x00 #define NFC_HCI_ADMIN_PIPE 0x01 diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h index 9eca9ae2280c..e7257a4653b4 100644 --- a/include/net/nfc/nci.h +++ b/include/net/nfc/nci.h @@ -28,6 +28,8 @@ #ifndef __NCI_H #define __NCI_H +#include <net/nfc/nfc.h> + /* NCI constants */ #define NCI_MAX_NUM_MAPPING_CONFIGS 10 #define NCI_MAX_NUM_RF_CONFIGS 10 @@ -73,6 +75,8 @@ #define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83 #define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85 +#define NCI_RF_TECH_MODE_LISTEN_MASK 0x80 + /* NCI RF Technologies */ #define NCI_NFC_RF_TECHNOLOGY_A 0x00 #define NCI_NFC_RF_TECHNOLOGY_B 0x01 @@ -106,6 +110,17 @@ /* NCI Configuration Parameter Tags */ #define NCI_PN_ATR_REQ_GEN_BYTES 0x29 +#define NCI_LN_ATR_RES_GEN_BYTES 0x61 +#define NCI_LA_SEL_INFO 0x32 +#define NCI_LF_PROTOCOL_TYPE 0x50 +#define NCI_LF_CON_BITR_F 0x54 + +/* NCI Configuration Parameters masks */ +#define NCI_LA_SEL_INFO_ISO_DEP_MASK 0x20 +#define NCI_LA_SEL_INFO_NFC_DEP_MASK 0x40 +#define NCI_LF_PROTOCOL_TYPE_NFC_DEP_MASK 0x02 +#define NCI_LF_CON_BITR_F_212 0x02 +#define NCI_LF_CON_BITR_F_424 0x04 /* NCI Reset types */ #define NCI_RESET_TYPE_KEEP_CONFIG 0x00 @@ -314,26 +329,31 @@ struct nci_core_intf_error_ntf { struct rf_tech_specific_params_nfca_poll { __u16 sens_res; __u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */ - __u8 nfcid1[10]; + __u8 nfcid1[NFC_NFCID1_MAXSIZE]; __u8 sel_res_len; /* 0 or 1 Bytes */ __u8 sel_res; } __packed; struct rf_tech_specific_params_nfcb_poll { __u8 sensb_res_len; - __u8 sensb_res[12]; /* 11 or 12 Bytes */ + __u8 sensb_res[NFC_SENSB_RES_MAXSIZE]; /* 11 or 12 Bytes */ } __packed; struct rf_tech_specific_params_nfcf_poll { __u8 bit_rate; __u8 sensf_res_len; - __u8 sensf_res[18]; /* 16 or 18 Bytes */ + __u8 sensf_res[NFC_SENSF_RES_MAXSIZE]; /* 16 or 18 Bytes */ } __packed; struct rf_tech_specific_params_nfcv_poll { __u8 res_flags; __u8 dsfid; - __u8 uid[8]; /* 8 Bytes */ + __u8 uid[NFC_ISO15693_UID_MAXSIZE]; /* 8 Bytes */ +} __packed; + +struct rf_tech_specific_params_nfcf_listen { + __u8 local_nfcid2_len; + __u8 local_nfcid2[NFC_NFCID2_MAXSIZE]; /* 0 or 8 Bytes */ } __packed; struct nci_rf_discover_ntf { @@ -365,7 +385,12 @@ struct activation_params_nfcb_poll_iso_dep { struct activation_params_poll_nfc_dep { __u8 atr_res_len; - __u8 atr_res[63]; + __u8 atr_res[NFC_ATR_RES_MAXSIZE - 2]; /* ATR_RES from byte 3 */ +}; + +struct activation_params_listen_nfc_dep { + __u8 atr_req_len; + __u8 atr_req[NFC_ATR_REQ_MAXSIZE - 2]; /* ATR_REQ from byte 3 */ }; struct nci_rf_intf_activated_ntf { @@ -382,6 +407,7 @@ struct nci_rf_intf_activated_ntf { struct rf_tech_specific_params_nfcb_poll nfcb_poll; struct rf_tech_specific_params_nfcf_poll nfcf_poll; struct rf_tech_specific_params_nfcv_poll nfcv_poll; + struct rf_tech_specific_params_nfcf_listen nfcf_listen; } rf_tech_specific_params; __u8 data_exch_rf_tech_and_mode; @@ -393,6 +419,7 @@ struct nci_rf_intf_activated_ntf { struct activation_params_nfca_poll_iso_dep nfca_poll_iso_dep; struct activation_params_nfcb_poll_iso_dep nfcb_poll_iso_dep; struct activation_params_poll_nfc_dep poll_nfc_dep; + struct activation_params_listen_nfc_dep listen_nfc_dep; } activation_params; } __packed; diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 75d10e625c49..9e51bb4d841e 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -4,6 +4,7 @@ * * Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2013 Intel Corporation. All rights reserved. + * Copyright (C) 2014 Marvell International Ltd. * * Written by Ilan Elias <ilane@ti.com> * @@ -49,6 +50,8 @@ enum nci_state { NCI_W4_ALL_DISCOVERIES, NCI_W4_HOST_SELECT, NCI_POLL_ACTIVE, + NCI_LISTEN_ACTIVE, + NCI_LISTEN_SLEEP, }; /* NCI timeouts */ @@ -69,6 +72,12 @@ struct nci_ops { int (*send)(struct nci_dev *ndev, struct sk_buff *skb); int (*setup)(struct nci_dev *ndev); __u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol); + int (*discover_se)(struct nci_dev *ndev); + int (*disable_se)(struct nci_dev *ndev, u32 se_idx); + int (*enable_se)(struct nci_dev *ndev, u32 se_idx); + int (*se_io)(struct nci_dev *ndev, u32 se_idx, + u8 *apdu, size_t apdu_length, + se_io_cb_t cb, void *cb_context); }; #define NCI_MAX_SUPPORTED_RF_INTERFACES 4 diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 6c583e244de2..12adb817c27a 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -1,5 +1,6 @@ /* * Copyright (C) 2011 Instituto Nokia de Tecnologia + * Copyright (C) 2014 Marvell International Ltd. * * Authors: * Lauro Ramos Venancio <lauro.venancio@openbossa.org> @@ -87,6 +88,7 @@ struct nfc_ops { #define NFC_TARGET_IDX_ANY -1 #define NFC_MAX_GT_LEN 48 #define NFC_ATR_RES_GT_OFFSET 15 +#define NFC_ATR_REQ_GT_OFFSET 14 /** * struct nfc_target - NFC target descriptiom diff --git a/include/net/nl802154.h b/include/net/nl802154.h index b23548e04098..6dbd406ca41b 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -1,126 +1,122 @@ +#ifndef __NL802154_H +#define __NL802154_H /* - * nl802154.h + * 802.15.4 netlink interface public header * - * Copyright (C) 2007, 2008, 2009 Siemens AG + * Copyright 2014 Alexander Aring <aar@pengutronix.de> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ -#ifndef IEEE802154_NL_H -#define IEEE802154_NL_H +#define NL802154_GENL_NAME "nl802154" -struct net_device; -struct ieee802154_addr; +enum nl802154_commands { +/* don't change the order or add anything between, this is ABI! */ +/* currently we don't shipping this file via uapi, ignore the above one */ + NL802154_CMD_UNSPEC, -/** - * ieee802154_nl_assoc_indic - Notify userland of an association request. - * @dev: The network device on which this association request was - * received. - * @addr: The address of the device requesting association. - * @cap: The capability information field from the device. - * - * This informs a userland coordinator of a device requesting to - * associate with the PAN controlled by the coordinator. - * - * Note: This is in section 7.3.1 of the IEEE 802.15.4-2006 document. - */ -int ieee802154_nl_assoc_indic(struct net_device *dev, - struct ieee802154_addr *addr, u8 cap); - -/** - * ieee802154_nl_assoc_confirm - Notify userland of association. - * @dev: The device which has completed association. - * @short_addr: The short address assigned to the device. - * @status: The status of the association. - * - * Inform userland of the result of an association request. If the - * association request included asking the coordinator to allocate - * a short address then it is returned in @short_addr. - * - * Note: This is in section 7.3.2 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_assoc_confirm(struct net_device *dev, - __le16 short_addr, u8 status); - -/** - * ieee802154_nl_disassoc_indic - Notify userland of disassociation. - * @dev: The device on which disassociation was indicated. - * @addr: The device which is disassociating. - * @reason: The reason for the disassociation. - * - * Inform userland that a device has disassociated from the network. - * - * Note: This is in section 7.3.3 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_disassoc_indic(struct net_device *dev, - struct ieee802154_addr *addr, u8 reason); - -/** - * ieee802154_nl_disassoc_confirm - Notify userland of disassociation - * completion. - * @dev: The device on which disassociation was ordered. - * @status: The result of the disassociation. - * - * Inform userland of the result of requesting that a device - * disassociate, or the result of requesting that we disassociate from - * a PAN managed by another coordinator. - * - * Note: This is in section 7.1.4.3 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_disassoc_confirm(struct net_device *dev, - u8 status); - -/** - * ieee802154_nl_scan_confirm - Notify userland of completion of scan. - * @dev: The device which was instructed to scan. - * @status: The status of the scan operation. - * @scan_type: What type of scan was performed. - * @unscanned: Any channels that the device was unable to scan. - * @edl: The energy levels (if a passive scan). - * - * - * Note: This is in section 7.1.11 of the IEEE 802.15.4 document. - * Note: This API does not permit the return of an active scan result. - */ -int ieee802154_nl_scan_confirm(struct net_device *dev, - u8 status, u8 scan_type, u32 unscanned, u8 page, - u8 *edl/*, struct list_head *pan_desc_list */); - -/** - * ieee802154_nl_beacon_indic - Notify userland of a received beacon. - * @dev: The device on which a beacon was received. - * @panid: The PAN of the coordinator. - * @coord_addr: The short address of the coordinator on that PAN. - * - * Note: This is in section 7.1.5 of the IEEE 802.15.4 document. - * Note: This API does not provide extended information such as what - * channel the PAN is on or what the LQI of the beacon frame was on - * receipt. - * Note: This API cannot indicate a beacon frame for a coordinator - * operating in long addressing mode. - */ -int ieee802154_nl_beacon_indic(struct net_device *dev, __le16 panid, - __le16 coord_addr); + NL802154_CMD_GET_WPAN_PHY, /* can dump */ + NL802154_CMD_SET_WPAN_PHY, + NL802154_CMD_NEW_WPAN_PHY, + NL802154_CMD_DEL_WPAN_PHY, -/** - * ieee802154_nl_start_confirm - Notify userland of completion of start. - * @dev: The device which was instructed to scan. - * @status: The status of the scan operation. - * - * Note: This is in section 7.1.14 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_start_confirm(struct net_device *dev, u8 status); + NL802154_CMD_GET_INTERFACE, /* can dump */ + NL802154_CMD_SET_INTERFACE, + NL802154_CMD_NEW_INTERFACE, + NL802154_CMD_DEL_INTERFACE, + + NL802154_CMD_SET_CHANNEL, + + NL802154_CMD_SET_PAN_ID, + NL802154_CMD_SET_SHORT_ADDR, + + NL802154_CMD_SET_TX_POWER, + NL802154_CMD_SET_CCA_MODE, + NL802154_CMD_SET_CCA_ED_LEVEL, + + NL802154_CMD_SET_MAX_FRAME_RETRIES, + + NL802154_CMD_SET_BACKOFF_EXPONENT, + NL802154_CMD_SET_MAX_CSMA_BACKOFFS, + + NL802154_CMD_SET_LBT_MODE, + + /* add new commands above here */ + + /* used to define NL802154_CMD_MAX below */ + __NL802154_CMD_AFTER_LAST, + NL802154_CMD_MAX = __NL802154_CMD_AFTER_LAST - 1 +}; + +enum nl802154_attrs { +/* don't change the order or add anything between, this is ABI! */ +/* currently we don't shipping this file via uapi, ignore the above one */ + NL802154_ATTR_UNSPEC, + + NL802154_ATTR_WPAN_PHY, + NL802154_ATTR_WPAN_PHY_NAME, + + NL802154_ATTR_IFINDEX, + NL802154_ATTR_IFNAME, + NL802154_ATTR_IFTYPE, + + NL802154_ATTR_WPAN_DEV, + + NL802154_ATTR_PAGE, + NL802154_ATTR_CHANNEL, + + NL802154_ATTR_PAN_ID, + NL802154_ATTR_SHORT_ADDR, + + NL802154_ATTR_TX_POWER, + + NL802154_ATTR_CCA_MODE, + NL802154_ATTR_CCA_MODE3_AND, + NL802154_ATTR_CCA_ED_LEVEL, + + NL802154_ATTR_MAX_FRAME_RETRIES, + + NL802154_ATTR_MAX_BE, + NL802154_ATTR_MIN_BE, + NL802154_ATTR_MAX_CSMA_BACKOFFS, + + NL802154_ATTR_LBT_MODE, + + NL802154_ATTR_GENERATION, + + NL802154_ATTR_CHANNELS_SUPPORTED, + NL802154_ATTR_SUPPORTED_CHANNEL, + + NL802154_ATTR_EXTENDED_ADDR, + + /* add attributes here, update the policy in nl802154.c */ + + __NL802154_ATTR_AFTER_LAST, + NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1 +}; + +enum nl802154_iftype { + /* for backwards compatibility TODO */ + NL802154_IFTYPE_UNSPEC = -1, + + NL802154_IFTYPE_NODE, + NL802154_IFTYPE_MONITOR, + NL802154_IFTYPE_COORD, + + /* keep last */ + NUM_NL802154_IFTYPES, + NL802154_IFTYPE_MAX = NUM_NL802154_IFTYPES - 1 +}; -#endif +#endif /* __NL802154_H */ diff --git a/include/net/ping.h b/include/net/ping.h index 026479b61a2d..f074060bc5de 100644 --- a/include/net/ping.h +++ b/include/net/ping.h @@ -82,7 +82,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len); int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); -void ping_rcv(struct sk_buff *skb); +bool ping_rcv(struct sk_buff *skb); #ifdef CONFIG_PROC_FS struct ping_seq_afinfo { diff --git a/include/net/regulatory.h b/include/net/regulatory.h index dad7ab20a8cb..b776d72d84be 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -136,6 +136,17 @@ struct regulatory_request { * otherwise initiating radiation is not allowed. This will enable the * relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration * option + * @REGULATORY_IGNORE_STALE_KICKOFF: the regulatory core will _not_ make sure + * all interfaces on this wiphy reside on allowed channels. If this flag + * is not set, upon a regdomain change, the interfaces are given a grace + * period (currently 60 seconds) to disconnect or move to an allowed + * channel. Interfaces on forbidden channels are forcibly disconnected. + * Currently these types of interfaces are supported for enforcement: + * NL80211_IFTYPE_ADHOC, NL80211_IFTYPE_STATION, NL80211_IFTYPE_AP, + * NL80211_IFTYPE_AP_VLAN, NL80211_IFTYPE_MONITOR, + * NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO, + * NL80211_IFTYPE_P2P_DEVICE. The flag will be set by default if a device + * includes any modes unsupported for enforcement checking. */ enum ieee80211_regulatory_flags { REGULATORY_CUSTOM_REG = BIT(0), @@ -144,6 +155,7 @@ enum ieee80211_regulatory_flags { REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3), REGULATORY_COUNTRY_IE_IGNORE = BIT(4), REGULATORY_ENABLE_RELAX_NO_IR = BIT(5), + REGULATORY_IGNORE_STALE_KICKOFF = BIT(6), }; struct ieee80211_freq_range { diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index d17ed6fb2f70..3d282cbb66bf 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -219,7 +219,6 @@ struct tcf_proto_ops { void (*destroy)(struct tcf_proto*); unsigned long (*get)(struct tcf_proto*, u32 handle); - void (*put)(struct tcf_proto*, unsigned long); int (*change)(struct net *net, struct sk_buff *, struct tcf_proto*, unsigned long, u32 handle, struct nlattr **, diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 72a31db47ded..487ef34bbd63 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -219,7 +219,7 @@ struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *, const struct sctp_chunk *, __u32 tsn); struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *, - const struct msghdr *, size_t msg_len); + struct msghdr *, size_t msg_len); struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *, const struct sctp_chunk *, const __u8 *, diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 4ff3f67be62c..2bb2fcf5b11f 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -531,7 +531,7 @@ struct sctp_datamsg { struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *, struct sctp_sndrcvinfo *, - struct msghdr *, int len); + struct iov_iter *); void sctp_datamsg_free(struct sctp_datamsg *); void sctp_datamsg_put(struct sctp_datamsg *); void sctp_chunk_fail(struct sctp_chunk *, int error); @@ -647,8 +647,8 @@ struct sctp_chunk { void sctp_chunk_hold(struct sctp_chunk *); void sctp_chunk_put(struct sctp_chunk *); -int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, - struct iovec *data); +int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len, + struct iov_iter *from); void sctp_chunk_free(struct sctp_chunk *); void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); struct sctp_chunk *sctp_chunkify(struct sk_buff *, @@ -1116,7 +1116,6 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len, sctp_scope_t sctp_scope(const union sctp_addr *); int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope); int sctp_is_any(struct sock *sk, const union sctp_addr *addr); -int sctp_addr_is_valid(const union sctp_addr *addr); int sctp_is_ep_boundall(struct sock *sk); diff --git a/include/net/sock.h b/include/net/sock.h index e6f235ebf6c9..2210fec65669 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -54,8 +54,8 @@ #include <linux/security.h> #include <linux/slab.h> #include <linux/uaccess.h> +#include <linux/page_counter.h> #include <linux/memcontrol.h> -#include <linux/res_counter.h> #include <linux/static_key.h> #include <linux/aio.h> #include <linux/sched.h> @@ -273,6 +273,7 @@ struct cg_proto; * @sk_rcvtimeo: %SO_RCVTIMEO setting * @sk_sndtimeo: %SO_SNDTIMEO setting * @sk_rxhash: flow hash received from netif layer + * @sk_incoming_cpu: record cpu processing incoming packets * @sk_txhash: computed flow hash for use on transmit * @sk_filter: socket filtering instructions * @sk_protinfo: private area, net family specific, when not using slab @@ -350,6 +351,12 @@ struct sock { #ifdef CONFIG_RPS __u32 sk_rxhash; #endif + u16 sk_incoming_cpu; + /* 16bit hole + * Warned : sk_incoming_cpu can be set from softirq, + * Do not use this hole without fully understanding possible issues. + */ + __u32 sk_txhash; #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sk_napi_id; @@ -833,6 +840,11 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) return sk->sk_backlog_rcv(sk, skb); } +static inline void sk_incoming_cpu_update(struct sock *sk) +{ + sk->sk_incoming_cpu = raw_smp_processor_id(); +} + static inline void sock_rps_record_flow_hash(__u32 hash) { #ifdef CONFIG_RPS @@ -1062,7 +1074,7 @@ enum cg_proto_flags { }; struct cg_proto { - struct res_counter memory_allocated; /* Current allocated memory. */ + struct page_counter memory_allocated; /* Current allocated memory. */ struct percpu_counter sockets_allocated; /* Current number of sockets. */ int memory_pressure; long sysctl_mem[3]; @@ -1214,34 +1226,26 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot, unsigned long amt, int *parent_status) { - struct res_counter *fail; - int ret; + page_counter_charge(&prot->memory_allocated, amt); - ret = res_counter_charge_nofail(&prot->memory_allocated, - amt << PAGE_SHIFT, &fail); - if (ret < 0) + if (page_counter_read(&prot->memory_allocated) > + prot->memory_allocated.limit) *parent_status = OVER_LIMIT; } static inline void memcg_memory_allocated_sub(struct cg_proto *prot, unsigned long amt) { - res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT); -} - -static inline u64 memcg_memory_allocated_read(struct cg_proto *prot) -{ - u64 ret; - ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE); - return ret >> PAGE_SHIFT; + page_counter_uncharge(&prot->memory_allocated, amt); } static inline long sk_memory_allocated(const struct sock *sk) { struct proto *prot = sk->sk_prot; + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return memcg_memory_allocated_read(sk->sk_cgrp); + return page_counter_read(&sk->sk_cgrp->memory_allocated); return atomic_long_read(prot->memory_allocated); } @@ -1255,7 +1259,7 @@ sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status); /* update the root cgroup regardless */ atomic_long_add_return(amt, prot->memory_allocated); - return memcg_memory_allocated_read(sk->sk_cgrp); + return page_counter_read(&sk->sk_cgrp->memory_allocated); } return atomic_long_add_return(amt, prot->memory_allocated); @@ -1589,6 +1593,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, int *errcode, int max_page_order); void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); void sock_kfree_s(struct sock *sk, void *mem, int size); +void sock_kzfree_s(struct sock *sk, void *mem, int size); void sk_send_sigurg(struct sock *sk); /* @@ -1873,29 +1878,6 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from, return 0; } -static inline int skb_copy_to_page(struct sock *sk, char __user *from, - struct sk_buff *skb, struct page *page, - int off, int copy) -{ - if (skb->ip_summed == CHECKSUM_NONE) { - int err = 0; - __wsum csum = csum_and_copy_from_user(from, - page_address(page) + off, - copy, 0, &err); - if (err) - return err; - skb->csum = csum_block_add(skb->csum, csum, skb->len); - } else if (copy_from_user(page_address(page) + off, from, copy)) - return -EFAULT; - - skb->len += copy; - skb->data_len += copy; - skb->truesize += copy; - sk->sk_wmem_queued += copy; - sk_mem_charge(sk, copy); - return 0; -} - /** * sk_wmem_alloc_get - returns write allocations * @sk: socket @@ -2277,16 +2259,6 @@ bool sk_ns_capable(const struct sock *sk, bool sk_capable(const struct sock *sk, int cap); bool sk_net_capable(const struct sock *sk, int cap); -/* - * Enable debug/info messages - */ -extern int net_msg_warn; -#define NETDEBUG(fmt, args...) \ - do { if (net_msg_warn) printk(fmt,##args); } while (0) - -#define LIMIT_NETDEBUG(fmt, args...) \ - do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0) - extern __u32 sysctl_wmem_max; extern __u32 sysctl_rmem_max; diff --git a/include/net/switchdev.h b/include/net/switchdev.h new file mode 100644 index 000000000000..8a6d1641fd9b --- /dev/null +++ b/include/net/switchdev.h @@ -0,0 +1,37 @@ +/* + * include/net/switchdev.h - Switch device API + * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _LINUX_SWITCHDEV_H_ +#define _LINUX_SWITCHDEV_H_ + +#include <linux/netdevice.h> + +#ifdef CONFIG_NET_SWITCHDEV + +int netdev_switch_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid); +int netdev_switch_port_stp_update(struct net_device *dev, u8 state); + +#else + +static inline int netdev_switch_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid) +{ + return -EOPNOTSUPP; +} + +static inline int netdev_switch_port_stp_update(struct net_device *dev, + u8 state) +{ + return -EOPNOTSUPP; +} + +#endif + +#endif /* _LINUX_SWITCHDEV_H_ */ diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h new file mode 100644 index 000000000000..93b70ade1ff3 --- /dev/null +++ b/include/net/tc_act/tc_vlan.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __NET_TC_VLAN_H +#define __NET_TC_VLAN_H + +#include <net/act_api.h> + +#define VLAN_F_POP 0x1 +#define VLAN_F_PUSH 0x2 + +struct tcf_vlan { + struct tcf_common common; + int tcfv_action; + u16 tcfv_push_vid; + __be16 tcfv_push_proto; +}; +#define to_vlan(a) \ + container_of(a->priv, struct tcf_vlan, common) + +#endif /* __NET_TC_VLAN_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 4062b4f0d121..f50f29faf76f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -55,9 +55,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define MAX_TCP_HEADER (128 + MAX_HEADER) #define MAX_TCP_OPTION_SPACE 40 -/* +/* * Never offer a window over 32767 without using window scaling. Some - * poor stacks do signed 16bit maths! + * poor stacks do signed 16bit maths! */ #define MAX_TCP_WINDOW 32767U @@ -70,9 +70,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); /* After receiving this amount of duplicate ACKs fast retransmit starts. */ #define TCP_FASTRETRANS_THRESH 3 -/* Maximal reordering. */ -#define TCP_MAX_REORDERING 127 - /* Maximal number of ACKs sent quickly to accelerate slow-start. */ #define TCP_MAX_QUICKACKS 16U @@ -167,7 +164,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); /* * TCP option */ - + #define TCPOPT_NOP 1 /* Padding */ #define TCPOPT_EOL 0 /* End of options */ #define TCPOPT_MSS 2 /* Segment size negotiating */ @@ -252,6 +249,7 @@ extern int sysctl_tcp_abort_on_overflow; extern int sysctl_tcp_max_orphans; extern int sysctl_tcp_fack; extern int sysctl_tcp_reordering; +extern int sysctl_tcp_max_reordering; extern int sysctl_tcp_dsack; extern long sysctl_tcp_mem[3]; extern int sysctl_tcp_wmem[3]; @@ -492,17 +490,16 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, u16 *mssp); __u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mss); -#endif - __u32 cookie_init_timestamp(struct request_sock *req); -bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net, - bool *ecn_ok); +bool cookie_timestamp_decode(struct tcp_options_received *opt); +bool cookie_ecn_ok(const struct tcp_options_received *opt, + const struct net *net, const struct dst_entry *dst); /* From net/ipv6/syncookies.c */ int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th, u32 cookie); struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); -#ifdef CONFIG_SYN_COOKIES + u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp); __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, @@ -1104,16 +1101,16 @@ static inline int tcp_win_from_space(int space) space - (space>>sysctl_tcp_adv_win_scale); } -/* Note: caller must be prepared to deal with negative returns */ +/* Note: caller must be prepared to deal with negative returns */ static inline int tcp_space(const struct sock *sk) { return tcp_win_from_space(sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)); -} +} static inline int tcp_full_space(const struct sock *sk) { - return tcp_win_from_space(sk->sk_rcvbuf); + return tcp_win_from_space(sk->sk_rcvbuf); } static inline void tcp_openreq_init(struct request_sock *req, diff --git a/include/net/udplite.h b/include/net/udplite.h index 2caadabcd07b..ae7c8d1fbcad 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -19,7 +19,9 @@ extern struct udp_table udplite_table; static __inline__ int udplite_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { - return memcpy_fromiovecend(to, (struct iovec *) from, offset, len); + struct msghdr *msg = from; + /* XXX: stripping const */ + return memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len); } /* Designate sk as UDP-Lite socket */ @@ -40,7 +42,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) * checksum. UDP-Lite (like IPv6) mandates checksums, hence packets * with a zero checksum field are illegal. */ if (uh->check == 0) { - LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: zeroed checksum field\n"); + net_dbg_ratelimited("UDPLite: zeroed checksum field\n"); return 1; } @@ -52,8 +54,8 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) /* * Coverage length violates RFC 3828: log and discard silently. */ - LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: bad csum coverage %d/%d\n", - cscov, skb->len); + net_dbg_ratelimited("UDPLite: bad csum coverage %d/%d\n", + cscov, skb->len); return 1; } else if (cscov < skb->len) { diff --git a/include/net/wpan-phy.h b/include/net/wpan-phy.h deleted file mode 100644 index 10ab0fc6d4f7..000000000000 --- a/include/net/wpan-phy.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (C) 2007, 2008, 2009 Siemens AG - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Written by: - * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> - */ - -#ifndef WPAN_PHY_H -#define WPAN_PHY_H - -#include <linux/netdevice.h> -#include <linux/mutex.h> -#include <linux/bug.h> - -/* According to the IEEE 802.15.4 stadard the upper most significant bits of - * the 32-bit channel bitmaps shall be used as an integer value to specify 32 - * possible channel pages. The lower 27 bits of the channel bit map shall be - * used as a bit mask to specify channel numbers within a channel page. - */ -#define WPAN_NUM_CHANNELS 27 -#define WPAN_NUM_PAGES 32 - -struct wpan_phy { - struct mutex pib_lock; - - /* - * This is a PIB according to 802.15.4-2011. - * We do not provide timing-related variables, as they - * aren't used outside of driver - */ - u8 current_channel; - u8 current_page; - u32 channels_supported[32]; - s8 transmit_power; - u8 cca_mode; - u8 min_be; - u8 max_be; - u8 csma_retries; - s8 frame_retries; - - bool lbt; - s32 cca_ed_level; - - struct device dev; - int idx; - - struct net_device *(*add_iface)(struct wpan_phy *phy, - const char *name, int type); - void (*del_iface)(struct wpan_phy *phy, struct net_device *dev); - - int (*set_txpower)(struct wpan_phy *phy, int db); - int (*set_lbt)(struct wpan_phy *phy, bool on); - int (*set_cca_mode)(struct wpan_phy *phy, u8 cca_mode); - int (*set_cca_ed_level)(struct wpan_phy *phy, int level); - int (*set_csma_params)(struct wpan_phy *phy, u8 min_be, u8 max_be, - u8 retries); - int (*set_frame_retries)(struct wpan_phy *phy, s8 retries); - - char priv[0] __attribute__((__aligned__(NETDEV_ALIGN))); -}; - -#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev) - -struct wpan_phy *wpan_phy_alloc(size_t priv_size); -static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev) -{ - phy->dev.parent = dev; -} -int wpan_phy_register(struct wpan_phy *phy); -void wpan_phy_unregister(struct wpan_phy *phy); -void wpan_phy_free(struct wpan_phy *phy); -/* Same semantics as for class_for_each_device */ -int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data); - -static inline void *wpan_phy_priv(struct wpan_phy *phy) -{ - BUG_ON(!phy); - return &phy->priv; -} - -struct wpan_phy *wpan_phy_find(const char *str); - -static inline void wpan_phy_put(struct wpan_phy *phy) -{ - put_device(&phy->dev); -} - -static inline const char *wpan_phy_name(struct wpan_phy *phy) -{ - return dev_name(&phy->dev); -} -#endif diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index a2bf41e0bde9..2d83cfd7e6ce 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -38,11 +38,12 @@ #include <linux/workqueue.h> struct ib_ucontext; +struct ib_umem_odp; struct ib_umem { struct ib_ucontext *context; size_t length; - int offset; + unsigned long address; int page_size; int writable; int hugetlb; @@ -50,17 +51,43 @@ struct ib_umem { struct pid *pid; struct mm_struct *mm; unsigned long diff; + struct ib_umem_odp *odp_data; struct sg_table sg_head; int nmap; int npages; }; +/* Returns the offset of the umem start relative to the first page. */ +static inline int ib_umem_offset(struct ib_umem *umem) +{ + return umem->address & ((unsigned long)umem->page_size - 1); +} + +/* Returns the first page of an ODP umem. */ +static inline unsigned long ib_umem_start(struct ib_umem *umem) +{ + return umem->address - ib_umem_offset(umem); +} + +/* Returns the address of the page after the last one of an ODP umem. */ +static inline unsigned long ib_umem_end(struct ib_umem *umem) +{ + return PAGE_ALIGN(umem->address + umem->length); +} + +static inline size_t ib_umem_num_pages(struct ib_umem *umem) +{ + return (ib_umem_end(umem) - ib_umem_start(umem)) >> PAGE_SHIFT; +} + #ifdef CONFIG_INFINIBAND_USER_MEM struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, size_t size, int access, int dmasync); void ib_umem_release(struct ib_umem *umem); int ib_umem_page_count(struct ib_umem *umem); +int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, + size_t length); #else /* CONFIG_INFINIBAND_USER_MEM */ @@ -73,7 +100,10 @@ static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context, } static inline void ib_umem_release(struct ib_umem *umem) { } static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; } - +static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, + size_t length) { + return -EINVAL; +} #endif /* CONFIG_INFINIBAND_USER_MEM */ #endif /* IB_UMEM_H */ diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h new file mode 100644 index 000000000000..3da0b167041b --- /dev/null +++ b/include/rdma/ib_umem_odp.h @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2014 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef IB_UMEM_ODP_H +#define IB_UMEM_ODP_H + +#include <rdma/ib_umem.h> +#include <rdma/ib_verbs.h> +#include <linux/interval_tree.h> + +struct umem_odp_node { + u64 __subtree_last; + struct rb_node rb; +}; + +struct ib_umem_odp { + /* + * An array of the pages included in the on-demand paging umem. + * Indices of pages that are currently not mapped into the device will + * contain NULL. + */ + struct page **page_list; + /* + * An array of the same size as page_list, with DMA addresses mapped + * for pages the pages in page_list. The lower two bits designate + * access permissions. See ODP_READ_ALLOWED_BIT and + * ODP_WRITE_ALLOWED_BIT. + */ + dma_addr_t *dma_list; + /* + * The umem_mutex protects the page_list and dma_list fields of an ODP + * umem, allowing only a single thread to map/unmap pages. The mutex + * also protects access to the mmu notifier counters. + */ + struct mutex umem_mutex; + void *private; /* for the HW driver to use. */ + + /* When false, use the notifier counter in the ucontext struct. */ + bool mn_counters_active; + int notifiers_seq; + int notifiers_count; + + /* A linked list of umems that don't have private mmu notifier + * counters yet. */ + struct list_head no_private_counters; + struct ib_umem *umem; + + /* Tree tracking */ + struct umem_odp_node interval_tree; + + struct completion notifier_completion; + int dying; +}; + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + +int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem); + +void ib_umem_odp_release(struct ib_umem *umem); + +/* + * The lower 2 bits of the DMA address signal the R/W permissions for + * the entry. To upgrade the permissions, provide the appropriate + * bitmask to the map_dma_pages function. + * + * Be aware that upgrading a mapped address might result in change of + * the DMA address for the page. + */ +#define ODP_READ_ALLOWED_BIT (1<<0ULL) +#define ODP_WRITE_ALLOWED_BIT (1<<1ULL) + +#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) + +int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt, + u64 access_mask, unsigned long current_seq); + +void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset, + u64 bound); + +void rbt_ib_umem_insert(struct umem_odp_node *node, struct rb_root *root); +void rbt_ib_umem_remove(struct umem_odp_node *node, struct rb_root *root); +typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end, + void *cookie); +/* + * Call the callback on each ib_umem in the range. Returns the logical or of + * the return values of the functions called. + */ +int rbt_ib_umem_for_each_in_range(struct rb_root *root, u64 start, u64 end, + umem_call_back cb, void *cookie); + +struct umem_odp_node *rbt_ib_umem_iter_first(struct rb_root *root, + u64 start, u64 last); +struct umem_odp_node *rbt_ib_umem_iter_next(struct umem_odp_node *node, + u64 start, u64 last); + +static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item, + unsigned long mmu_seq) +{ + /* + * This code is strongly based on the KVM code from + * mmu_notifier_retry. Should be called with + * the relevant locks taken (item->odp_data->umem_mutex + * and the ucontext umem_mutex semaphore locked for read). + */ + + /* Do not allow page faults while the new ib_umem hasn't seen a state + * with zero notifiers yet, and doesn't have its own valid set of + * private counters. */ + if (!item->odp_data->mn_counters_active) + return 1; + + if (unlikely(item->odp_data->notifiers_count)) + return 1; + if (item->odp_data->notifiers_seq != mmu_seq) + return 1; + return 0; +} + +#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ + +static inline int ib_umem_odp_get(struct ib_ucontext *context, + struct ib_umem *umem) +{ + return -EINVAL; +} + +static inline void ib_umem_odp_release(struct ib_umem *umem) {} + +#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ + +#endif /* IB_UMEM_ODP_H */ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 470a011d6fa4..0d74f1de99aa 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -51,6 +51,7 @@ #include <uapi/linux/if_ether.h> #include <linux/atomic.h> +#include <linux/mmu_notifier.h> #include <asm/uaccess.h> extern struct workqueue_struct *ib_wq; @@ -123,7 +124,8 @@ enum ib_device_cap_flags { IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24), IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29), - IB_DEVICE_SIGNATURE_HANDOVER = (1<<30) + IB_DEVICE_SIGNATURE_HANDOVER = (1<<30), + IB_DEVICE_ON_DEMAND_PAGING = (1<<31), }; enum ib_signature_prot_cap { @@ -143,6 +145,27 @@ enum ib_atomic_cap { IB_ATOMIC_GLOB }; +enum ib_odp_general_cap_bits { + IB_ODP_SUPPORT = 1 << 0, +}; + +enum ib_odp_transport_cap_bits { + IB_ODP_SUPPORT_SEND = 1 << 0, + IB_ODP_SUPPORT_RECV = 1 << 1, + IB_ODP_SUPPORT_WRITE = 1 << 2, + IB_ODP_SUPPORT_READ = 1 << 3, + IB_ODP_SUPPORT_ATOMIC = 1 << 4, +}; + +struct ib_odp_caps { + uint64_t general_caps; + struct { + uint32_t rc_odp_caps; + uint32_t uc_odp_caps; + uint32_t ud_odp_caps; + } per_transport_caps; +}; + struct ib_device_attr { u64 fw_ver; __be64 sys_image_guid; @@ -186,6 +209,7 @@ struct ib_device_attr { u8 local_ca_ack_delay; int sig_prot_cap; int sig_guard_cap; + struct ib_odp_caps odp_caps; }; enum ib_mtu { @@ -1073,7 +1097,8 @@ enum ib_access_flags { IB_ACCESS_REMOTE_READ = (1<<2), IB_ACCESS_REMOTE_ATOMIC = (1<<3), IB_ACCESS_MW_BIND = (1<<4), - IB_ZERO_BASED = (1<<5) + IB_ZERO_BASED = (1<<5), + IB_ACCESS_ON_DEMAND = (1<<6), }; struct ib_phys_buf { @@ -1115,6 +1140,8 @@ struct ib_fmr_attr { u8 page_shift; }; +struct ib_umem; + struct ib_ucontext { struct ib_device *device; struct list_head pd_list; @@ -1127,6 +1154,24 @@ struct ib_ucontext { struct list_head xrcd_list; struct list_head rule_list; int closing; + + struct pid *tgid; +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + struct rb_root umem_tree; + /* + * Protects .umem_rbroot and tree, as well as odp_mrs_count and + * mmu notifiers registration. + */ + struct rw_semaphore umem_rwsem; + void (*invalidate_range)(struct ib_umem *umem, + unsigned long start, unsigned long end); + + struct mmu_notifier mn; + atomic_t notifier_count; + /* A list of umems that don't have private mmu notifier counters yet. */ + struct list_head no_private_counters; + int odp_mrs_count; +#endif }; struct ib_uobject { @@ -1662,7 +1707,10 @@ static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) { - return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; + size_t copy_sz; + + copy_sz = min_t(size_t, len, udata->outlen); + return copy_to_user(udata->outbuf, src, copy_sz) ? -EFAULT : 0; } /** diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 832dcc9f86ec..9d87a37aecad 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -161,17 +161,12 @@ struct expander_device { }; /* ---------- SATA device ---------- */ -enum ata_command_set { - ATA_COMMAND_SET = 0, - ATAPI_COMMAND_SET = 1, -}; - #define ATA_RESP_FIS_SIZE 24 struct sata_device { - enum ata_command_set command_set; - struct smp_resp rps_resp; /* report_phy_sata_resp */ - u8 port_no; /* port number, if this is a PM (Port) */ + unsigned int class; + struct smp_resp rps_resp; /* report_phy_sata_resp */ + u8 port_no; /* port number, if this is a PM (Port) */ struct ata_port *ap; struct ata_host ata_host; diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 6364e23454dd..3a4edd1f7dbb 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -441,13 +441,13 @@ static inline int scsi_execute_req(struct scsi_device *sdev, extern void sdev_disable_disk_events(struct scsi_device *sdev); extern void sdev_enable_disk_events(struct scsi_device *sdev); -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM extern int scsi_autopm_get_device(struct scsi_device *); extern void scsi_autopm_put_device(struct scsi_device *); #else static inline int scsi_autopm_get_device(struct scsi_device *d) { return 0; } static inline void scsi_autopm_put_device(struct scsi_device *d) {} -#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM */ static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev) { diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index c8a462ef9a4e..e939d2b3757a 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -554,7 +554,7 @@ struct Scsi_Host { * __devices is protected by the host_lock, but you should * usually use scsi_device_lookup / shost_for_each_device * to access it and don't care about locking yourself. - * In the rare case of beeing in irq context you can use + * In the rare case of being in irq context you can use * their __ prefixed variants with the lock held. NEVER * access this list directly from a driver. */ diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index ae6c3b8ed2f5..396e8f73670a 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h @@ -42,12 +42,11 @@ struct snd_compr_ops; * @buffer_size: size of the above buffer * @fragment_size: size of buffer fragment in bytes * @fragments: number of such fragments - * @hw_pointer: offset of last location in buffer where DSP copied data - * @app_pointer: offset of last location in buffer where app wrote data * @total_bytes_available: cumulative number of bytes made available in * the ring buffer * @total_bytes_transferred: cumulative bytes transferred by offload DSP * @sleep: poll sleep + * @private_data: driver private data pointer */ struct snd_compr_runtime { snd_pcm_state_t state; @@ -94,6 +93,8 @@ struct snd_compr_stream { * This can be called in during stream creation only to set codec params * and the stream properties * @get_params: retrieve the codec parameters, mandatory + * @set_metadata: Set the metadata values for a stream + * @get_metadata: retreives the requested metadata values from stream * @trigger: Trigger operations like start, pause, resume, drain, stop. * This callback is mandatory * @pointer: Retrieve current h/w pointer information. Mandatory diff --git a/include/sound/jack.h b/include/sound/jack.h index 58916573db58..218235030ebc 100644 --- a/include/sound/jack.h +++ b/include/sound/jack.h @@ -28,8 +28,23 @@ struct input_dev; /** - * Jack types which can be reported. These values are used as a - * bitmask. + * enum snd_jack_types - Jack types which can be reported + * @SND_JACK_HEADPHONE: Headphone + * @SND_JACK_MICROPHONE: Microphone + * @SND_JACK_HEADSET: Headset + * @SND_JACK_LINEOUT: Line out + * @SND_JACK_MECHANICAL: Mechanical switch + * @SND_JACK_VIDEOOUT: Video out + * @SND_JACK_AVOUT: AV (Audio Video) out + * @SND_JACK_LINEIN: Line in + * @SND_JACK_BTN_0: Button 0 + * @SND_JACK_BTN_1: Button 1 + * @SND_JACK_BTN_2: Button 2 + * @SND_JACK_BTN_3: Button 3 + * @SND_JACK_BTN_4: Button 4 + * @SND_JACK_BTN_5: Button 5 + * + * These values are used as a bitmask. * * Note that this must be kept in sync with the lookup table in * sound/core/jack.c. @@ -90,6 +105,13 @@ static inline void snd_jack_set_parent(struct snd_jack *jack, { } +static inline int snd_jack_set_key(struct snd_jack *jack, + enum snd_jack_types type, + int keytype) +{ + return 0; +} + static inline void snd_jack_report(struct snd_jack *jack, int status) { } diff --git a/include/sound/omap-hdmi-audio.h b/include/sound/omap-hdmi-audio.h new file mode 100644 index 000000000000..afdb416898e0 --- /dev/null +++ b/include/sound/omap-hdmi-audio.h @@ -0,0 +1,43 @@ +/* + * hdmi-audio.c -- OMAP4+ DSS HDMI audio support library + * + * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * + * Author: Jyri Sarha <jsarha@ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ + +#include <video/omapdss.h> + +#ifndef __OMAP_HDMI_AUDIO_H__ +#define __OMAP_HDMI_AUDIO_H__ + +struct omap_hdmi_audio_ops { + int (*audio_startup)(struct device *dev, + void (*abort_cb)(struct device *dev)); + int (*audio_shutdown)(struct device *dev); + int (*audio_start)(struct device *dev); + void (*audio_stop)(struct device *dev); + int (*audio_config)(struct device *dev, + struct omap_dss_audio *dss_audio); +}; + +/* HDMI audio initalization data */ +struct omap_hdmi_audio_pdata { + struct device *dev; + enum omapdss_version dss_version; + phys_addr_t audio_dma_addr; + + const struct omap_hdmi_audio_ops *ops; +}; + +#endif /* __OMAP_HDMI_AUDIO_H__ */ diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 8bb00a27e219..1e7f74acc2ec 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -418,7 +418,10 @@ struct snd_pcm_substream { struct snd_info_entry *proc_status_entry; struct snd_info_entry *proc_prealloc_entry; struct snd_info_entry *proc_prealloc_max_entry; +#ifdef CONFIG_SND_PCM_XRUN_DEBUG + struct snd_info_entry *proc_xrun_injection_entry; #endif +#endif /* CONFIG_SND_VERBOSE_PROCFS */ /* misc flags */ unsigned int hw_opened: 1; }; @@ -505,6 +508,7 @@ int snd_pcm_status(struct snd_pcm_substream *substream, int snd_pcm_start(struct snd_pcm_substream *substream); int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t status); int snd_pcm_drain_done(struct snd_pcm_substream *substream); +int snd_pcm_stop_xrun(struct snd_pcm_substream *substream); #ifdef CONFIG_PM int snd_pcm_suspend(struct snd_pcm_substream *substream); int snd_pcm_suspend_all(struct snd_pcm *pcm); @@ -535,6 +539,12 @@ snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size) * PCM library */ +/** + * snd_pcm_stream_linked - Check whether the substream is linked with others + * @substream: substream to check + * + * Returns true if the given substream is being linked with others. + */ static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream) { return substream->group != &substream->self_group; @@ -545,6 +555,16 @@ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream); void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream); void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream); unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream); + +/** + * snd_pcm_stream_lock_irqsave - Lock the PCM stream + * @substream: PCM substream + * @flags: irq flags + * + * This locks the PCM stream like snd_pcm_stream_lock() but with the local + * IRQ (only when nonatomic is false). In nonatomic case, this is identical + * as snd_pcm_stream_lock(). + */ #define snd_pcm_stream_lock_irqsave(substream, flags) \ do { \ typecheck(unsigned long, flags); \ @@ -553,9 +573,25 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream); void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream, unsigned long flags); +/** + * snd_pcm_group_for_each_entry - iterate over the linked substreams + * @s: the iterator + * @substream: the substream + * + * Iterate over the all linked substreams to the given @substream. + * When @substream isn't linked with any others, this gives returns @substream + * itself once. + */ #define snd_pcm_group_for_each_entry(s, substream) \ list_for_each_entry(s, &substream->group->substreams, link_list) +/** + * snd_pcm_running - Check whether the substream is in a running state + * @substream: substream to check + * + * Returns true if the given substream is in the state RUNNING, or in the + * state DRAINING for playback. + */ static inline int snd_pcm_running(struct snd_pcm_substream *substream) { return (substream->runtime->status->state == SNDRV_PCM_STATE_RUNNING || @@ -563,45 +599,81 @@ static inline int snd_pcm_running(struct snd_pcm_substream *substream) substream->stream == SNDRV_PCM_STREAM_PLAYBACK)); } +/** + * bytes_to_samples - Unit conversion of the size from bytes to samples + * @runtime: PCM runtime instance + * @size: size in bytes + */ static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t size) { return size * 8 / runtime->sample_bits; } +/** + * bytes_to_frames - Unit conversion of the size from bytes to frames + * @runtime: PCM runtime instance + * @size: size in bytes + */ static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime, ssize_t size) { return size * 8 / runtime->frame_bits; } +/** + * samples_to_bytes - Unit conversion of the size from samples to bytes + * @runtime: PCM runtime instance + * @size: size in samples + */ static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t size) { return size * runtime->sample_bits / 8; } +/** + * frames_to_bytes - Unit conversion of the size from frames to bytes + * @runtime: PCM runtime instance + * @size: size in frames + */ static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_sframes_t size) { return size * runtime->frame_bits / 8; } +/** + * frame_aligned - Check whether the byte size is aligned to frames + * @runtime: PCM runtime instance + * @bytes: size in bytes + */ static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes) { return bytes % runtime->byte_align == 0; } +/** + * snd_pcm_lib_buffer_bytes - Get the buffer size of the current PCM in bytes + * @substream: PCM substream + */ static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return frames_to_bytes(runtime, runtime->buffer_size); } +/** + * snd_pcm_lib_period_bytes - Get the period size of the current PCM in bytes + * @substream: PCM substream + */ static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return frames_to_bytes(runtime, runtime->period_size); } -/* - * result is: 0 ... (boundary - 1) +/** + * snd_pcm_playback_avail - Get the available (writable) space for playback + * @runtime: PCM runtime instance + * + * Result is between 0 ... (boundary - 1) */ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *runtime) { @@ -613,8 +685,11 @@ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *r return avail; } -/* - * result is: 0 ... (boundary - 1) +/** + * snd_pcm_playback_avail - Get the available (readable) space for capture + * @runtime: PCM runtime instance + * + * Result is between 0 ... (boundary - 1) */ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *runtime) { @@ -624,11 +699,19 @@ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *ru return avail; } +/** + * snd_pcm_playback_hw_avail - Get the queued space for playback + * @runtime: PCM runtime instance + */ static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime *runtime) { return runtime->buffer_size - snd_pcm_playback_avail(runtime); } +/** + * snd_pcm_capture_hw_avail - Get the free space for capture + * @runtime: PCM runtime instance + */ static inline snd_pcm_sframes_t snd_pcm_capture_hw_avail(struct snd_pcm_runtime *runtime) { return runtime->buffer_size - snd_pcm_capture_avail(runtime); @@ -708,6 +791,20 @@ static inline int snd_pcm_capture_empty(struct snd_pcm_substream *substream) return snd_pcm_capture_avail(runtime) == 0; } +/** + * snd_pcm_trigger_done - Mark the master substream + * @substream: the pcm substream instance + * @master: the linked master substream + * + * When multiple substreams of the same card are linked and the hardware + * supports the single-shot operation, the driver calls this in the loop + * in snd_pcm_group_for_each_entry() for marking the substream as "done". + * Then most of trigger operations are performed only to the given master + * substream. + * + * The trigger_master mark is cleared at timestamp updates at the end + * of trigger operations. + */ static inline void snd_pcm_trigger_done(struct snd_pcm_substream *substream, struct snd_pcm_substream *master) { @@ -750,18 +847,59 @@ static inline const struct snd_interval *hw_param_interval_c(const struct snd_pc return ¶ms->intervals[var - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL]; } -#define params_channels(p) \ - (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_CHANNELS)->min) -#define params_rate(p) \ - (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_RATE)->min) -#define params_period_size(p) \ - (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_PERIOD_SIZE)->min) -#define params_periods(p) \ - (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_PERIODS)->min) -#define params_buffer_size(p) \ - (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_BUFFER_SIZE)->min) -#define params_buffer_bytes(p) \ - (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_BUFFER_BYTES)->min) +/** + * params_channels - Get the number of channels from the hw params + * @p: hw params + */ +static inline unsigned int params_channels(const struct snd_pcm_hw_params *p) +{ + return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_CHANNELS)->min; +} + +/** + * params_channels - Get the sample rate from the hw params + * @p: hw params + */ +static inline unsigned int params_rate(const struct snd_pcm_hw_params *p) +{ + return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_RATE)->min; +} + +/** + * params_channels - Get the period size (in frames) from the hw params + * @p: hw params + */ +static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p) +{ + return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIOD_SIZE)->min; +} + +/** + * params_channels - Get the number of periods from the hw params + * @p: hw params + */ +static inline unsigned int params_periods(const struct snd_pcm_hw_params *p) +{ + return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIODS)->min; +} + +/** + * params_channels - Get the buffer size (in frames) from the hw params + * @p: hw params + */ +static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p) +{ + return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_BUFFER_SIZE)->min; +} + +/** + * params_channels - Get the buffer size (in bytes) from the hw params + * @p: hw params + */ +static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p) +{ + return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_BUFFER_BYTES)->min; +} int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v); void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c); @@ -883,6 +1021,14 @@ unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit); unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a, unsigned int rates_b); +/** + * snd_pcm_set_runtime_buffer - Set the PCM runtime buffer + * @substream: PCM substream to set + * @bufp: the buffer information, NULL to clear + * + * Copy the buffer information to runtime->dma_buffer when @bufp is non-NULL. + * Otherwise it clears the current buffer information. + */ static inline void snd_pcm_set_runtime_buffer(struct snd_pcm_substream *substream, struct snd_dma_buffer *bufp) { @@ -908,6 +1054,11 @@ void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream); void snd_pcm_timer_init(struct snd_pcm_substream *substream); void snd_pcm_timer_done(struct snd_pcm_substream *substream); +/** + * snd_pcm_gettime - Fill the timespec depending on the timestamp mode + * @runtime: PCM runtime instance + * @tv: timespec to fill + */ static inline void snd_pcm_gettime(struct snd_pcm_runtime *runtime, struct timespec *tv) { @@ -944,7 +1095,6 @@ int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream, int snd_pcm_lib_free_vmalloc_buffer(struct snd_pcm_substream *substream); struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream, unsigned long offset); -#if 0 /* for kernel-doc */ /** * snd_pcm_lib_alloc_vmalloc_buffer - allocate virtual DMA buffer * @substream: the substream to allocate the buffer to @@ -957,8 +1107,13 @@ struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream, * Return: 1 if the buffer was changed, 0 if not changed, or a negative error * code. */ -static int snd_pcm_lib_alloc_vmalloc_buffer - (struct snd_pcm_substream *substream, size_t size); +static inline int snd_pcm_lib_alloc_vmalloc_buffer + (struct snd_pcm_substream *substream, size_t size) +{ + return _snd_pcm_lib_alloc_vmalloc_buffer(substream, size, + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); +} + /** * snd_pcm_lib_alloc_vmalloc_32_buffer - allocate 32-bit-addressable buffer * @substream: the substream to allocate the buffer to @@ -970,15 +1125,12 @@ static int snd_pcm_lib_alloc_vmalloc_buffer * Return: 1 if the buffer was changed, 0 if not changed, or a negative error * code. */ -static int snd_pcm_lib_alloc_vmalloc_32_buffer - (struct snd_pcm_substream *substream, size_t size); -#endif -#define snd_pcm_lib_alloc_vmalloc_buffer(subs, size) \ - _snd_pcm_lib_alloc_vmalloc_buffer \ - (subs, size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO) -#define snd_pcm_lib_alloc_vmalloc_32_buffer(subs, size) \ - _snd_pcm_lib_alloc_vmalloc_buffer \ - (subs, size, GFP_KERNEL | GFP_DMA32 | __GFP_ZERO) +static inline int snd_pcm_lib_alloc_vmalloc_32_buffer + (struct snd_pcm_substream *substream, size_t size) +{ + return _snd_pcm_lib_alloc_vmalloc_buffer(substream, size, + GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); +} #define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p) @@ -998,18 +1150,35 @@ struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream, #define snd_pcm_sgbuf_ops_page NULL #endif /* SND_DMA_SGBUF */ +/** + * snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset + * @substream: PCM substream + * @ofs: byte offset + */ static inline dma_addr_t snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs) { return snd_sgbuf_get_addr(snd_pcm_get_dma_buf(substream), ofs); } +/** + * snd_pcm_sgbuf_get_ptr - Get the virtual address at the corresponding offset + * @substream: PCM substream + * @ofs: byte offset + */ static inline void * snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs) { return snd_sgbuf_get_ptr(snd_pcm_get_dma_buf(substream), ofs); } +/** + * snd_pcm_sgbuf_chunk_size - Compute the max size that fits within the contig. + * page from the given size + * @substream: PCM substream + * @ofs: byte offset + * @size: byte size to examine + */ static inline unsigned int snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream, unsigned int ofs, unsigned int size) @@ -1017,13 +1186,24 @@ snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream, return snd_sgbuf_get_chunk_size(snd_pcm_get_dma_buf(substream), ofs, size); } -/* handle mmap counter - PCM mmap callback should handle this counter properly */ +/** + * snd_pcm_mmap_data_open - increase the mmap counter + * @area: VMA + * + * PCM mmap callback should handle this counter properly + */ static inline void snd_pcm_mmap_data_open(struct vm_area_struct *area) { struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data; atomic_inc(&substream->mmap_count); } +/** + * snd_pcm_mmap_data_close - decrease the mmap counter + * @area: VMA + * + * PCM mmap callback should handle this counter properly + */ static inline void snd_pcm_mmap_data_close(struct vm_area_struct *area) { struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data; @@ -1043,6 +1223,11 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s #define snd_pcm_lib_mmap_vmalloc NULL +/** + * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer + * @dma: DMA number + * @max: pointer to store the max size + */ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max) { *max = dma < 4 ? 64 * 1024 : 128 * 1024; @@ -1095,7 +1280,11 @@ struct snd_pcm_chmap { void *private_data; /* optional: private data pointer */ }; -/* get the PCM substream assigned to the given chmap info */ +/** + * snd_pcm_chmap_substream - get the PCM substream assigned to the given chmap info + * @info: chmap information + * @idx: the substream number index + */ static inline struct snd_pcm_substream * snd_pcm_chmap_substream(struct snd_pcm_chmap *info, unsigned int idx) { @@ -1122,7 +1311,10 @@ int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream, unsigned long private_value, struct snd_pcm_chmap **info_ret); -/* Strong-typed conversion of pcm_format to bitwise */ +/** + * pcm_format_to_bits - Strong-typed conversion of pcm_format to bitwise + * @pcm_format: PCM format + */ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format) { return 1ULL << (__force int) pcm_format; diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h index d76412b84b48..83284cae464c 100644 --- a/include/sound/rcar_snd.h +++ b/include/sound/rcar_snd.h @@ -36,14 +36,14 @@ #define RSND_SSI_CLK_PIN_SHARE (1 << 31) #define RSND_SSI_NO_BUSIF (1 << 30) /* SSI+DMA without BUSIF */ -#define RSND_SSI(_dma_id, _pio_irq, _flags) \ -{ .dma_id = _dma_id, .pio_irq = _pio_irq, .flags = _flags } +#define RSND_SSI(_dma_id, _irq, _flags) \ +{ .dma_id = _dma_id, .irq = _irq, .flags = _flags } #define RSND_SSI_UNUSED \ -{ .dma_id = -1, .pio_irq = -1, .flags = 0 } +{ .dma_id = -1, .irq = -1, .flags = 0 } struct rsnd_ssi_platform_info { int dma_id; - int pio_irq; + int irq; u32 flags; }; diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h index a5352712194b..120d9610054e 100644 --- a/include/sound/rt5645.h +++ b/include/sound/rt5645.h @@ -23,6 +23,10 @@ struct rt5645_platform_data { unsigned int hp_det_gpio; bool gpio_hp_det_active_high; + + /* true if codec's jd function is used */ + bool en_jd_func; + unsigned int jd_mode; }; #endif diff --git a/include/sound/rt5677.h b/include/sound/rt5677.h index 082670e3a353..d9eb7d861cd0 100644 --- a/include/sound/rt5677.h +++ b/include/sound/rt5677.h @@ -27,6 +27,16 @@ struct rt5677_platform_data { bool lout3_diff; /* DMIC2 clock source selection */ enum rt5677_dmic2_clk dmic2_clk_pin; + + /* configures GPIO, 0 - floating, 1 - pulldown, 2 - pullup */ + u8 gpio_config[6]; + + /* jd1 can select 0 ~ 3 as OFF, GPIO1, GPIO2 and GPIO3 respectively */ + unsigned int jd1_gpio; + /* jd2 and jd3 can select 0 ~ 3 as + OFF, GPIO4, GPIO5 and GPIO6 respectively */ + unsigned int jd2_gpio; + unsigned int jd3_gpio; }; #endif diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h index 2398521f0998..eea5400fe373 100644 --- a/include/sound/seq_kernel.h +++ b/include/sound/seq_kernel.h @@ -108,9 +108,13 @@ int snd_seq_event_port_detach(int client, int port); #ifdef CONFIG_MODULES void snd_seq_autoload_lock(void); void snd_seq_autoload_unlock(void); +void snd_seq_autoload_init(void); +#define snd_seq_autoload_exit() snd_seq_autoload_lock() #else #define snd_seq_autoload_lock() #define snd_seq_autoload_unlock() +#define snd_seq_autoload_init() +#define snd_seq_autoload_exit() #endif #endif /* __SOUND_SEQ_KERNEL_H */ diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index e8b3080d196a..2df96b1384c7 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -206,7 +206,6 @@ struct snd_soc_dai_driver { /* DAI description */ const char *name; unsigned int id; - int ac97_control; unsigned int base; /* DAI driver callbacks */ @@ -216,6 +215,8 @@ struct snd_soc_dai_driver { int (*resume)(struct snd_soc_dai *dai); /* compress dai */ bool compress_dai; + /* DAI is also used for the control bus */ + bool bus_control; /* ops */ const struct snd_soc_dai_ops *ops; @@ -241,7 +242,6 @@ struct snd_soc_dai { const char *name; int id; struct device *dev; - void *ac97_pdata; /* platform_data for the ac97 codec */ /* driver ops */ struct snd_soc_dai_driver *driver; @@ -268,7 +268,6 @@ struct snd_soc_dai { unsigned int sample_bits; /* parent platform/codec */ - struct snd_soc_platform *platform; struct snd_soc_codec *codec; struct snd_soc_component *component; @@ -276,8 +275,6 @@ struct snd_soc_dai { unsigned int tx_mask; unsigned int rx_mask; - struct snd_soc_card *card; - struct list_head list; }; diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 3a4d7da67b8d..89823cfe6f04 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -435,7 +435,7 @@ void snd_soc_dapm_auto_nc_pins(struct snd_soc_card *card); unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol); /* Mostly internal - should not normally be used */ -void dapm_mark_io_dirty(struct snd_soc_dapm_context *dapm); +void dapm_mark_endpoints_dirty(struct snd_soc_card *card); /* dapm path query */ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream, @@ -508,9 +508,9 @@ struct snd_soc_dapm_path { /* status */ u32 connect:1; /* source and sink widgets are connected */ - u32 walked:1; /* path has been walked */ u32 walking:1; /* path is in the process of being walked */ u32 weak:1; /* path ignored for power management */ + u32 is_supply:1; /* At least one of the connected widgets is a supply */ int (*connected)(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink); @@ -544,11 +544,13 @@ struct snd_soc_dapm_widget { unsigned char active:1; /* active stream on DAC, ADC's */ unsigned char connected:1; /* connected codec pin */ unsigned char new:1; /* cnew complete */ - unsigned char ext:1; /* has external widgets */ unsigned char force:1; /* force state */ unsigned char ignore_suspend:1; /* kept enabled over suspend */ unsigned char new_power:1; /* power from this run */ unsigned char power_checked:1; /* power checked this run */ + unsigned char is_supply:1; /* Widget is a supply type widget */ + unsigned char is_sink:1; /* Widget is a sink type widget */ + unsigned char is_source:1; /* Widget is a source type widget */ int subseq; /* sort within widget type */ int (*power_check)(struct snd_soc_dapm_widget *w); @@ -567,6 +569,7 @@ struct snd_soc_dapm_widget { struct list_head sinks; /* used during DAPM updates */ + struct list_head work_list; struct list_head power_list; struct list_head dirty; int inputs; diff --git a/include/sound/soc.h b/include/sound/soc.h index 7ba7130037a0..b4fca9aed2a2 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -36,6 +36,11 @@ {.reg = xreg, .rreg = xreg, .shift = shift_left, \ .rshift = shift_right, .max = xmax, .platform_max = xmax, \ .invert = xinvert, .autodisable = xautodisable}) +#define SOC_DOUBLE_S_VALUE(xreg, shift_left, shift_right, xmin, xmax, xsign_bit, xinvert, xautodisable) \ + ((unsigned long)&(struct soc_mixer_control) \ + {.reg = xreg, .rreg = xreg, .shift = shift_left, \ + .rshift = shift_right, .min = xmin, .max = xmax, .platform_max = xmax, \ + .sign_bit = xsign_bit, .invert = xinvert, .autodisable = xautodisable}) #define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, xautodisable) \ SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmax, xinvert, xautodisable) #define SOC_SINGLE_VALUE_EXT(xreg, xmax, xinvert) \ @@ -171,11 +176,9 @@ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ SNDRV_CTL_ELEM_ACCESS_READWRITE, \ .tlv.p = (tlv_array), \ - .info = snd_soc_info_volsw_s8, .get = snd_soc_get_volsw_s8, \ - .put = snd_soc_put_volsw_s8, \ - .private_value = (unsigned long)&(struct soc_mixer_control) \ - {.reg = xreg, .min = xmin, .max = xmax, \ - .platform_max = xmax} } + .info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\ + .put = snd_soc_put_volsw, \ + .private_value = SOC_DOUBLE_S_VALUE(xreg, 0, 8, xmin, xmax, 7, 0, 0) } #define SOC_ENUM_DOUBLE(xreg, xshift_l, xshift_r, xitems, xtexts) \ { .reg = xreg, .shift_l = xshift_l, .shift_r = xshift_r, \ .items = xitems, .texts = xtexts, \ @@ -366,8 +369,6 @@ struct snd_soc_jack_gpio; typedef int (*hw_write_t)(void *,const char* ,int); -extern struct snd_ac97_bus_ops *soc_ac97_ops; - enum snd_soc_pcm_subclass { SND_SOC_PCM_CLASS_PCM = 0, SND_SOC_PCM_CLASS_BE = 1, @@ -409,13 +410,9 @@ int devm_snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *cmpnt_drv, struct snd_soc_dai_driver *dai_drv, int num_dai); void snd_soc_unregister_component(struct device *dev); -int snd_soc_cache_sync(struct snd_soc_codec *codec); int snd_soc_cache_init(struct snd_soc_codec *codec); int snd_soc_cache_exit(struct snd_soc_codec *codec); -int snd_soc_cache_write(struct snd_soc_codec *codec, - unsigned int reg, unsigned int value); -int snd_soc_cache_read(struct snd_soc_codec *codec, - unsigned int reg, unsigned int *value); + int snd_soc_platform_read(struct snd_soc_platform *platform, unsigned int reg); int snd_soc_platform_write(struct snd_soc_platform *platform, @@ -500,14 +497,28 @@ int snd_soc_update_bits_locked(struct snd_soc_codec *codec, int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg, unsigned int mask, unsigned int value); -int snd_soc_new_ac97_codec(struct snd_soc_codec *codec, - struct snd_ac97_bus_ops *ops, int num); -void snd_soc_free_ac97_codec(struct snd_soc_codec *codec); +#ifdef CONFIG_SND_SOC_AC97_BUS +struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec); +void snd_soc_free_ac97_codec(struct snd_ac97 *ac97); int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops); int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops, struct platform_device *pdev); +extern struct snd_ac97_bus_ops *soc_ac97_ops; +#else +static inline int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops, + struct platform_device *pdev) +{ + return 0; +} + +static inline int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops) +{ + return 0; +} +#endif + /* *Controls */ @@ -545,12 +556,6 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); -int snd_soc_info_volsw_s8(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *uinfo); -int snd_soc_get_volsw_s8(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol); -int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol); int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo); int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, @@ -780,24 +785,18 @@ struct snd_soc_codec { struct device *dev; const struct snd_soc_codec_driver *driver; - struct mutex mutex; struct list_head list; struct list_head card_list; /* runtime */ - struct snd_ac97 *ac97; /* for ad-hoc ac97 devices */ unsigned int cache_bypass:1; /* Suppress access to the cache */ unsigned int suspended:1; /* Codec is in suspend PM state */ - unsigned int ac97_registered:1; /* Codec has been AC97 registered */ - unsigned int ac97_created:1; /* Codec has been created by SoC */ unsigned int cache_init:1; /* codec cache has been initialized */ - u32 cache_sync; /* Cache needs to be synced to hardware */ /* codec IO */ void *control_data; /* codec control (i2c/3wire) data */ hw_write_t hw_write; void *reg_cache; - struct mutex cache_rw_mutex; /* component */ struct snd_soc_component component; @@ -860,8 +859,6 @@ struct snd_soc_platform_driver { int (*probe)(struct snd_soc_platform *); int (*remove)(struct snd_soc_platform *); - int (*suspend)(struct snd_soc_dai *dai); - int (*resume)(struct snd_soc_dai *dai); struct snd_soc_component_driver component_driver; /* pcm creation and destruction */ @@ -886,7 +883,7 @@ struct snd_soc_platform_driver { struct snd_soc_dai_link_component { const char *name; - const struct device_node *of_node; + struct device_node *of_node; const char *dai_name; }; @@ -894,8 +891,6 @@ struct snd_soc_platform { struct device *dev; const struct snd_soc_platform_driver *driver; - unsigned int suspended:1; /* platform is suspended */ - struct list_head list; struct snd_soc_component component; @@ -990,7 +985,7 @@ struct snd_soc_codec_conf { * DT/OF node, but not both. */ const char *dev_name; - const struct device_node *of_node; + struct device_node *of_node; /* * optional map of kcontrol, widget and path name prefixes that are @@ -1007,7 +1002,7 @@ struct snd_soc_aux_dev { * DT/OF node, but not both. */ const char *codec_name; - const struct device_node *codec_of_node; + struct device_node *codec_of_node; /* codec/machine specific init - e.g. add machine controls */ int (*init)(struct snd_soc_component *component); @@ -1264,6 +1259,17 @@ unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg); int snd_soc_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int val); +/** + * snd_soc_cache_sync() - Sync the register cache with the hardware + * @codec: CODEC to sync + * + * Note: This function will call regcache_sync() + */ +static inline int snd_soc_cache_sync(struct snd_soc_codec *codec) +{ + return regcache_sync(codec->component.regmap); +} + /* component IO */ int snd_soc_component_read(struct snd_soc_component *component, unsigned int reg, unsigned int *val); @@ -1277,6 +1283,45 @@ void snd_soc_component_async_complete(struct snd_soc_component *component); int snd_soc_component_test_bits(struct snd_soc_component *component, unsigned int reg, unsigned int mask, unsigned int value); +#ifdef CONFIG_REGMAP + +void snd_soc_component_init_regmap(struct snd_soc_component *component, + struct regmap *regmap); +void snd_soc_component_exit_regmap(struct snd_soc_component *component); + +/** + * snd_soc_codec_init_regmap() - Initialize regmap instance for the CODEC + * @codec: The CODEC for which to initialize the regmap instance + * @regmap: The regmap instance that should be used by the CODEC + * + * This function allows deferred assignment of the regmap instance that is + * associated with the CODEC. Only use this if the regmap instance is not yet + * ready when the CODEC is registered. The function must also be called before + * the first IO attempt of the CODEC. + */ +static inline void snd_soc_codec_init_regmap(struct snd_soc_codec *codec, + struct regmap *regmap) +{ + snd_soc_component_init_regmap(&codec->component, regmap); +} + +/** + * snd_soc_codec_exit_regmap() - De-initialize regmap instance for the CODEC + * @codec: The CODEC for which to de-initialize the regmap instance + * + * Calls regmap_exit() on the regmap instance associated to the CODEC and + * removes the regmap instance from the CODEC. + * + * This function should only be used if snd_soc_codec_init_regmap() was used to + * initialize the regmap instance. + */ +static inline void snd_soc_codec_exit_regmap(struct snd_soc_codec *codec) +{ + snd_soc_component_exit_regmap(&codec->component); +} + +#endif + /* device driver data */ static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card, @@ -1451,6 +1496,9 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np, struct device_node **framemaster); int snd_soc_of_get_dai_name(struct device_node *of_node, const char **dai_name); +int snd_soc_of_get_dai_link_codecs(struct device *dev, + struct device_node *of_node, + struct snd_soc_dai_link *dai_link); #include <sound/soc-dai.h> diff --git a/include/sound/uda134x.h b/include/sound/uda134x.h index e475659bd3be..509efb050176 100644 --- a/include/sound/uda134x.h +++ b/include/sound/uda134x.h @@ -18,18 +18,6 @@ struct uda134x_platform_data { struct l3_pins l3; void (*power) (int); int model; - /* - ALSA SOC usually puts the device in standby mode when it's not used - for sometime. If you unset is_powered_on_standby the driver will - turn off the ADC/DAC when this callback is invoked and turn it back - on when needed. Unfortunately this will result in a very light bump - (it can be audible only with good earphones). If this bothers you - set is_powered_on_standby, you will have slightly higher power - consumption. Please note that sending the L3 command for ADC is - enough to make the bump, so it doesn't make difference if you - completely take off power from the codec. - */ - int is_powered_on_standby; #define UDA134X_UDA1340 1 #define UDA134X_UDA1341 2 #define UDA134X_UDA1344 3 diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h index b04ee7e5a466..88cf39d96d0f 100644 --- a/include/trace/events/asoc.h +++ b/include/trace/events/asoc.h @@ -288,31 +288,6 @@ TRACE_EVENT(snd_soc_jack_notify, TP_printk("jack=%s %x", __get_str(name), (int)__entry->val) ); -TRACE_EVENT(snd_soc_cache_sync, - - TP_PROTO(struct snd_soc_codec *codec, const char *type, - const char *status), - - TP_ARGS(codec, type, status), - - TP_STRUCT__entry( - __string( name, codec->component.name) - __string( status, status ) - __string( type, type ) - __field( int, id ) - ), - - TP_fast_assign( - __assign_str(name, codec->component.name); - __assign_str(status, status); - __assign_str(type, type); - __entry->id = codec->component.id; - ), - - TP_printk("codec=%s.%d type=%s status=%s", __get_str(name), - (int)__entry->id, __get_str(type), __get_str(status)) -); - #endif /* _TRACE_ASOC_H */ /* This part must be outside protection */ diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index ff4bd1b35246..6cfb841fea7c 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -43,15 +43,13 @@ struct extent_status; { EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \ { EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \ { EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \ - { EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" }, \ - { EXT4_GET_BLOCKS_NO_PUT_HOLE, "NO_PUT_HOLE" }) + { EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" }) #define show_mflags(flags) __print_flags(flags, "", \ { EXT4_MAP_NEW, "N" }, \ { EXT4_MAP_MAPPED, "M" }, \ { EXT4_MAP_UNWRITTEN, "U" }, \ - { EXT4_MAP_BOUNDARY, "B" }, \ - { EXT4_MAP_FROM_CLUSTER, "C" }) + { EXT4_MAP_BOUNDARY, "B" }) #define show_free_flags(flags) __print_flags(flags, "|", \ { EXT4_FREE_BLOCKS_METADATA, "METADATA" }, \ @@ -2452,15 +2450,14 @@ TRACE_EVENT(ext4_collapse_range, TRACE_EVENT(ext4_es_shrink, TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time, - int skip_precached, int nr_skipped, int retried), + int nr_skipped, int retried), - TP_ARGS(sb, nr_shrunk, scan_time, skip_precached, nr_skipped, retried), + TP_ARGS(sb, nr_shrunk, scan_time, nr_skipped, retried), TP_STRUCT__entry( __field( dev_t, dev ) __field( int, nr_shrunk ) __field( unsigned long long, scan_time ) - __field( int, skip_precached ) __field( int, nr_skipped ) __field( int, retried ) ), @@ -2469,16 +2466,14 @@ TRACE_EVENT(ext4_es_shrink, __entry->dev = sb->s_dev; __entry->nr_shrunk = nr_shrunk; __entry->scan_time = div_u64(scan_time, 1000); - __entry->skip_precached = skip_precached; __entry->nr_skipped = nr_skipped; __entry->retried = retried; ), - TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu skip_precached %d " + TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu " "nr_skipped %d retried %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_shrunk, - __entry->scan_time, __entry->skip_precached, - __entry->nr_skipped, __entry->retried) + __entry->scan_time, __entry->nr_skipped, __entry->retried) ); #endif /* _TRACE_EXT4_H */ diff --git a/include/trace/events/host1x.h b/include/trace/events/host1x.h index 94db6a2c3540..63116362543c 100644 --- a/include/trace/events/host1x.h +++ b/include/trace/events/host1x.h @@ -29,6 +29,8 @@ #include <linux/ktime.h> #include <linux/tracepoint.h> +struct host1x_bo; + DECLARE_EVENT_CLASS(host1x, TP_PROTO(const char *name), TP_ARGS(name), @@ -79,14 +81,14 @@ TRACE_EVENT(host1x_cdma_push, ); TRACE_EVENT(host1x_cdma_push_gather, - TP_PROTO(const char *name, u32 mem_id, + TP_PROTO(const char *name, struct host1x_bo *bo, u32 words, u32 offset, void *cmdbuf), - TP_ARGS(name, mem_id, words, offset, cmdbuf), + TP_ARGS(name, bo, words, offset, cmdbuf), TP_STRUCT__entry( __field(const char *, name) - __field(u32, mem_id) + __field(struct host1x_bo *, bo) __field(u32, words) __field(u32, offset) __field(bool, cmdbuf) @@ -100,13 +102,13 @@ TRACE_EVENT(host1x_cdma_push_gather, } __entry->cmdbuf = cmdbuf; __entry->name = name; - __entry->mem_id = mem_id; + __entry->bo = bo; __entry->words = words; __entry->offset = offset; ), - TP_printk("name=%s, mem_id=%08x, words=%u, offset=%d, contents=[%s]", - __entry->name, __entry->mem_id, + TP_printk("name=%s, bo=%p, words=%u, offset=%d, contents=[%s]", + __entry->name, __entry->bo, __entry->words, __entry->offset, __print_hex(__get_dynamic_array(cmdbuf), __entry->cmdbuf ? __entry->words * 4 : 0)) @@ -221,12 +223,13 @@ TRACE_EVENT(host1x_syncpt_load_min, ); TRACE_EVENT(host1x_syncpt_wait_check, - TP_PROTO(void *mem_id, u32 offset, u32 syncpt_id, u32 thresh, u32 min), + TP_PROTO(struct host1x_bo *bo, u32 offset, u32 syncpt_id, u32 thresh, + u32 min), - TP_ARGS(mem_id, offset, syncpt_id, thresh, min), + TP_ARGS(bo, offset, syncpt_id, thresh, min), TP_STRUCT__entry( - __field(void *, mem_id) + __field(struct host1x_bo *, bo) __field(u32, offset) __field(u32, syncpt_id) __field(u32, thresh) @@ -234,15 +237,15 @@ TRACE_EVENT(host1x_syncpt_wait_check, ), TP_fast_assign( - __entry->mem_id = mem_id; + __entry->bo = bo; __entry->offset = offset; __entry->syncpt_id = syncpt_id; __entry->thresh = thresh; __entry->min = min; ), - TP_printk("mem_id=%p, offset=%05x, id=%d, thresh=%d, current=%d", - __entry->mem_id, __entry->offset, + TP_printk("bo=%p, offset=%05x, id=%d, thresh=%d, current=%d", + __entry->bo, __entry->offset, __entry->syncpt_id, __entry->thresh, __entry->min) ); diff --git a/include/trace/events/module.h b/include/trace/events/module.h index 7c5cbfe3fc49..81c4c183d348 100644 --- a/include/trace/events/module.h +++ b/include/trace/events/module.h @@ -80,7 +80,7 @@ DECLARE_EVENT_CLASS(module_refcnt, TP_fast_assign( __entry->ip = ip; - __entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs); + __entry->refcnt = atomic_read(&mod->refcnt); __assign_str(name, mod->name); ), diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 1fef3e6e9436..b9c1dc6c825a 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -6,6 +6,9 @@ #include <linux/sunrpc/sched.h> #include <linux/sunrpc/clnt.h> +#include <linux/sunrpc/svc.h> +#include <linux/sunrpc/xprtsock.h> +#include <linux/sunrpc/svc_xprt.h> #include <net/tcp_states.h> #include <linux/net.h> #include <linux/tracepoint.h> @@ -306,6 +309,271 @@ DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection); DEFINE_RPC_SOCKET_EVENT(rpc_socket_close); DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown); +DECLARE_EVENT_CLASS(rpc_xprt_event, + TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), + + TP_ARGS(xprt, xid, status), + + TP_STRUCT__entry( + __field(__be32, xid) + __field(int, status) + __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR]) + __string(port, xprt->address_strings[RPC_DISPLAY_PORT]) + ), + + TP_fast_assign( + __entry->xid = xid; + __entry->status = status; + __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]); + __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]); + ), + + TP_printk("peer=[%s]:%s xid=0x%x status=%d", __get_str(addr), + __get_str(port), be32_to_cpu(__entry->xid), + __entry->status) +); + +DEFINE_EVENT(rpc_xprt_event, xprt_lookup_rqst, + TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), + TP_ARGS(xprt, xid, status)); + +DEFINE_EVENT(rpc_xprt_event, xprt_transmit, + TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), + TP_ARGS(xprt, xid, status)); + +DEFINE_EVENT(rpc_xprt_event, xprt_complete_rqst, + TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), + TP_ARGS(xprt, xid, status)); + +TRACE_EVENT(xs_tcp_data_ready, + TP_PROTO(struct rpc_xprt *xprt, int err, unsigned int total), + + TP_ARGS(xprt, err, total), + + TP_STRUCT__entry( + __field(int, err) + __field(unsigned int, total) + __string(addr, xprt ? xprt->address_strings[RPC_DISPLAY_ADDR] : + "(null)") + __string(port, xprt ? xprt->address_strings[RPC_DISPLAY_PORT] : + "(null)") + ), + + TP_fast_assign( + __entry->err = err; + __entry->total = total; + __assign_str(addr, xprt ? + xprt->address_strings[RPC_DISPLAY_ADDR] : "(null)"); + __assign_str(port, xprt ? + xprt->address_strings[RPC_DISPLAY_PORT] : "(null)"); + ), + + TP_printk("peer=[%s]:%s err=%d total=%u", __get_str(addr), + __get_str(port), __entry->err, __entry->total) +); + +#define rpc_show_sock_xprt_flags(flags) \ + __print_flags(flags, "|", \ + { TCP_RCV_LAST_FRAG, "TCP_RCV_LAST_FRAG" }, \ + { TCP_RCV_COPY_FRAGHDR, "TCP_RCV_COPY_FRAGHDR" }, \ + { TCP_RCV_COPY_XID, "TCP_RCV_COPY_XID" }, \ + { TCP_RCV_COPY_DATA, "TCP_RCV_COPY_DATA" }, \ + { TCP_RCV_READ_CALLDIR, "TCP_RCV_READ_CALLDIR" }, \ + { TCP_RCV_COPY_CALLDIR, "TCP_RCV_COPY_CALLDIR" }, \ + { TCP_RPC_REPLY, "TCP_RPC_REPLY" }) + +TRACE_EVENT(xs_tcp_data_recv, + TP_PROTO(struct sock_xprt *xs), + + TP_ARGS(xs), + + TP_STRUCT__entry( + __string(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]) + __string(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]) + __field(__be32, xid) + __field(unsigned long, flags) + __field(unsigned long, copied) + __field(unsigned int, reclen) + __field(unsigned long, offset) + ), + + TP_fast_assign( + __assign_str(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]); + __assign_str(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]); + __entry->xid = xs->tcp_xid; + __entry->flags = xs->tcp_flags; + __entry->copied = xs->tcp_copied; + __entry->reclen = xs->tcp_reclen; + __entry->offset = xs->tcp_offset; + ), + + TP_printk("peer=[%s]:%s xid=0x%x flags=%s copied=%lu reclen=%u offset=%lu", + __get_str(addr), __get_str(port), be32_to_cpu(__entry->xid), + rpc_show_sock_xprt_flags(__entry->flags), + __entry->copied, __entry->reclen, __entry->offset) +); + +#define show_rqstp_flags(flags) \ + __print_flags(flags, "|", \ + { (1UL << RQ_SECURE), "RQ_SECURE"}, \ + { (1UL << RQ_LOCAL), "RQ_LOCAL"}, \ + { (1UL << RQ_USEDEFERRAL), "RQ_USEDEFERRAL"}, \ + { (1UL << RQ_DROPME), "RQ_DROPME"}, \ + { (1UL << RQ_SPLICE_OK), "RQ_SPLICE_OK"}, \ + { (1UL << RQ_VICTIM), "RQ_VICTIM"}, \ + { (1UL << RQ_BUSY), "RQ_BUSY"}) + +TRACE_EVENT(svc_recv, + TP_PROTO(struct svc_rqst *rqst, int status), + + TP_ARGS(rqst, status), + + TP_STRUCT__entry( + __field(struct sockaddr *, addr) + __field(__be32, xid) + __field(int, status) + __field(unsigned long, flags) + ), + + TP_fast_assign( + __entry->addr = (struct sockaddr *)&rqst->rq_addr; + __entry->xid = status > 0 ? rqst->rq_xid : 0; + __entry->status = status; + __entry->flags = rqst->rq_flags; + ), + + TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr, + be32_to_cpu(__entry->xid), __entry->status, + show_rqstp_flags(__entry->flags)) +); + +DECLARE_EVENT_CLASS(svc_rqst_status, + + TP_PROTO(struct svc_rqst *rqst, int status), + + TP_ARGS(rqst, status), + + TP_STRUCT__entry( + __field(struct sockaddr *, addr) + __field(__be32, xid) + __field(int, dropme) + __field(int, status) + __field(unsigned long, flags) + ), + + TP_fast_assign( + __entry->addr = (struct sockaddr *)&rqst->rq_addr; + __entry->xid = rqst->rq_xid; + __entry->status = status; + __entry->flags = rqst->rq_flags; + ), + + TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s", + __entry->addr, be32_to_cpu(__entry->xid), + __entry->status, show_rqstp_flags(__entry->flags)) +); + +DEFINE_EVENT(svc_rqst_status, svc_process, + TP_PROTO(struct svc_rqst *rqst, int status), + TP_ARGS(rqst, status)); + +DEFINE_EVENT(svc_rqst_status, svc_send, + TP_PROTO(struct svc_rqst *rqst, int status), + TP_ARGS(rqst, status)); + +#define show_svc_xprt_flags(flags) \ + __print_flags(flags, "|", \ + { (1UL << XPT_BUSY), "XPT_BUSY"}, \ + { (1UL << XPT_CONN), "XPT_CONN"}, \ + { (1UL << XPT_CLOSE), "XPT_CLOSE"}, \ + { (1UL << XPT_DATA), "XPT_DATA"}, \ + { (1UL << XPT_TEMP), "XPT_TEMP"}, \ + { (1UL << XPT_DEAD), "XPT_DEAD"}, \ + { (1UL << XPT_CHNGBUF), "XPT_CHNGBUF"}, \ + { (1UL << XPT_DEFERRED), "XPT_DEFERRED"}, \ + { (1UL << XPT_OLD), "XPT_OLD"}, \ + { (1UL << XPT_LISTENER), "XPT_LISTENER"}, \ + { (1UL << XPT_CACHE_AUTH), "XPT_CACHE_AUTH"}, \ + { (1UL << XPT_LOCAL), "XPT_LOCAL"}) + +TRACE_EVENT(svc_xprt_do_enqueue, + TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst), + + TP_ARGS(xprt, rqst), + + TP_STRUCT__entry( + __field(struct svc_xprt *, xprt) + __field(struct svc_rqst *, rqst) + ), + + TP_fast_assign( + __entry->xprt = xprt; + __entry->rqst = rqst; + ), + + TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt, + (struct sockaddr *)&__entry->xprt->xpt_remote, + __entry->rqst ? __entry->rqst->rq_task->pid : 0, + show_svc_xprt_flags(__entry->xprt->xpt_flags)) +); + +TRACE_EVENT(svc_xprt_dequeue, + TP_PROTO(struct svc_xprt *xprt), + + TP_ARGS(xprt), + + TP_STRUCT__entry( + __field(struct svc_xprt *, xprt) + __field_struct(struct sockaddr_storage, ss) + __field(unsigned long, flags) + ), + + TP_fast_assign( + __entry->xprt = xprt, + xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss)); + __entry->flags = xprt ? xprt->xpt_flags : 0; + ), + + TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt, + (struct sockaddr *)&__entry->ss, + show_svc_xprt_flags(__entry->flags)) +); + +TRACE_EVENT(svc_wake_up, + TP_PROTO(int pid), + + TP_ARGS(pid), + + TP_STRUCT__entry( + __field(int, pid) + ), + + TP_fast_assign( + __entry->pid = pid; + ), + + TP_printk("pid=%d", __entry->pid) +); + +TRACE_EVENT(svc_handle_xprt, + TP_PROTO(struct svc_xprt *xprt, int len), + + TP_ARGS(xprt, len), + + TP_STRUCT__entry( + __field(struct svc_xprt *, xprt) + __field(int, len) + ), + + TP_fast_assign( + __entry->xprt = xprt; + __entry->len = len; + ), + + TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt, + (struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len, + show_svc_xprt_flags(__entry->xprt->xpt_flags)) +); #endif /* _TRACE_SUNRPC_H */ #include <trace/define_trace.h> diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 26b4f2e13275..139b5067345b 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -277,14 +277,12 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ field = (typeof(field))iter->ent; \ \ ret = ftrace_raw_output_prep(iter, trace_event); \ - if (ret) \ + if (ret != TRACE_TYPE_HANDLED) \ return ret; \ \ - ret = trace_seq_printf(s, print); \ - if (!ret) \ - return TRACE_TYPE_PARTIAL_LINE; \ + trace_seq_printf(s, print); \ \ - return TRACE_TYPE_HANDLED; \ + return trace_handle_return(s); \ } \ static struct trace_event_functions ftrace_event_type_funcs_##call = { \ .trace = ftrace_raw_output_##call, \ diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index ea0796bdcf88..5c15c2a5c123 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -82,4 +82,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 22749c134117..e016bd9b1a04 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -707,9 +707,11 @@ __SYSCALL(__NR_getrandom, sys_getrandom) __SYSCALL(__NR_memfd_create, sys_memfd_create) #define __NR_bpf 280 __SYSCALL(__NR_bpf, sys_bpf) +#define __NR_execveat 281 +__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat) #undef __NR_syscalls -#define __NR_syscalls 281 +#define __NR_syscalls 282 /* * All syscalls below here should go away really, diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index a0db2d4aa5f0..86574b0005ff 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -286,6 +286,8 @@ struct drm_mode_get_property { char name[DRM_PROP_NAME_LEN]; __u32 count_values; + /* This is only used to count enum values, not blobs. The _blobs is + * simply because of a historical reason, i.e. backwards compat. */ __u32 count_enum_blobs; }; diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index ff57f07c3249..250262265ee3 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -340,6 +340,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 #define I915_PARAM_HAS_WT 27 #define I915_PARAM_CMD_PARSER_VERSION 28 +#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 typedef struct drm_i915_getparam { int param; @@ -876,6 +877,12 @@ struct drm_i915_gem_get_tiling { * mmap mapping. */ __u32 swizzle_mode; + + /** + * Returned address bit 6 swizzling required for CPU access through + * mmap mapping whilst bound. + */ + __u32 phys_swizzle_mode; }; struct drm_i915_gem_get_aperture { diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 8523f9bb72f2..00b100023c47 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -1,4 +1,5 @@ # UAPI Header export list +header-y += android/ header-y += byteorder/ header-y += can/ header-y += caif/ @@ -37,27 +38,27 @@ header-y += aio_abi.h header-y += apm_bios.h header-y += arcfb.h header-y += atalk.h -header-y += atm.h -header-y += atm_eni.h -header-y += atm_he.h -header-y += atm_idt77105.h -header-y += atm_nicstar.h -header-y += atm_tcp.h -header-y += atm_zatm.h header-y += atmapi.h header-y += atmarp.h header-y += atmbr2684.h header-y += atmclip.h header-y += atmdev.h +header-y += atm_eni.h +header-y += atm.h +header-y += atm_he.h +header-y += atm_idt77105.h header-y += atmioc.h header-y += atmlec.h header-y += atmmpc.h +header-y += atm_nicstar.h header-y += atmppp.h header-y += atmsap.h header-y += atmsvc.h +header-y += atm_tcp.h +header-y += atm_zatm.h header-y += audit.h -header-y += auto_fs.h header-y += auto_fs4.h +header-y += auto_fs.h header-y += auxvec.h header-y += ax25.h header-y += b1lli.h @@ -67,8 +68,8 @@ header-y += bfs_fs.h header-y += binfmts.h header-y += blkpg.h header-y += blktrace_api.h -header-y += bpf.h header-y += bpf_common.h +header-y += bpf.h header-y += bpqether.h header-y += bsg.h header-y += btrfs.h @@ -93,21 +94,21 @@ header-y += cyclades.h header-y += cycx_cfm.h header-y += dcbnl.h header-y += dccp.h -header-y += dlm.h +header-y += dlmconstants.h header-y += dlm_device.h +header-y += dlm.h header-y += dlm_netlink.h header-y += dlm_plock.h -header-y += dlmconstants.h header-y += dm-ioctl.h header-y += dm-log-userspace.h header-y += dn.h header-y += dqblk_xfs.h header-y += edd.h header-y += efs_fs_sb.h +header-y += elfcore.h header-y += elf-em.h header-y += elf-fdpic.h header-y += elf.h -header-y += elfcore.h header-y += errno.h header-y += errqueue.h header-y += ethtool.h @@ -131,15 +132,15 @@ header-y += fsl_hypervisor.h header-y += fuse.h header-y += futex.h header-y += gameport.h -header-y += gen_stats.h header-y += genetlink.h +header-y += gen_stats.h header-y += gfs2_ondisk.h header-y += gigaset_dev.h -header-y += hdlc.h header-y += hdlcdrv.h +header-y += hdlc.h header-y += hdreg.h -header-y += hid.h header-y += hiddev.h +header-y += hid.h header-y += hidraw.h header-y += hpet.h header-y += hsr_netlink.h @@ -151,7 +152,6 @@ header-y += i2o-dev.h header-y += i8k.h header-y += icmp.h header-y += icmpv6.h -header-y += if.h header-y += if_addr.h header-y += if_addrlabel.h header-y += if_alg.h @@ -165,6 +165,7 @@ header-y += if_ether.h header-y += if_fc.h header-y += if_fddi.h header-y += if_frad.h +header-y += if.h header-y += if_hippi.h header-y += if_infiniband.h header-y += if_link.h @@ -182,40 +183,41 @@ header-y += if_tunnel.h header-y += if_vlan.h header-y += if_x25.h header-y += igmp.h -header-y += in.h header-y += in6.h -header-y += in_route.h header-y += inet_diag.h +header-y += in.h header-y += inotify.h header-y += input.h +header-y += in_route.h header-y += ioctl.h -header-y += ip.h header-y += ip6_tunnel.h -header-y += ip_vs.h header-y += ipc.h +header-y += ip.h header-y += ipmi.h header-y += ipmi_msgdefs.h header-y += ipsec.h header-y += ipv6.h header-y += ipv6_route.h +header-y += ip_vs.h header-y += ipx.h header-y += irda.h header-y += irqnr.h -header-y += isdn.h header-y += isdn_divertif.h -header-y += isdn_ppp.h +header-y += isdn.h header-y += isdnif.h +header-y += isdn_ppp.h header-y += iso_fs.h -header-y += ivtv.h header-y += ivtvfb.h +header-y += ivtv.h header-y += ixjuser.h header-y += jffs2.h header-y += joystick.h -header-y += kd.h +header-y += kcmp.h header-y += kdev_t.h -header-y += kernel-page-flags.h -header-y += kernel.h +header-y += kd.h header-y += kernelcapi.h +header-y += kernel.h +header-y += kernel-page-flags.h header-y += kexec.h header-y += keyboard.h header-y += keyctl.h @@ -231,6 +233,7 @@ ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h \ header-y += kvm_para.h endif +header-y += hw_breakpoint.h header-y += l2tp.h header-y += libc-compat.h header-y += limits.h @@ -243,6 +246,7 @@ header-y += map_to_7segment.h header-y += matroxfb.h header-y += mdio.h header-y += media.h +header-y += media-bus-format.h header-y += mei.h header-y += memfd.h header-y += mempolicy.h @@ -255,43 +259,43 @@ header-y += mman.h header-y += mmtimer.h header-y += mpls.h header-y += mqueue.h -header-y += mroute.h header-y += mroute6.h +header-y += mroute.h header-y += msdos_fs.h header-y += msg.h header-y += mtio.h -header-y += n_r3964.h header-y += nbd.h -header-y += ncp.h header-y += ncp_fs.h +header-y += ncp.h header-y += ncp_mount.h header-y += ncp_no.h header-y += neighbour.h -header-y += net.h -header-y += net_dropmon.h -header-y += net_tstamp.h header-y += netconf.h header-y += netdevice.h -header-y += netlink_diag.h -header-y += netfilter.h +header-y += net_dropmon.h header-y += netfilter_arp.h header-y += netfilter_bridge.h header-y += netfilter_decnet.h +header-y += netfilter.h header-y += netfilter_ipv4.h header-y += netfilter_ipv6.h +header-y += net.h +header-y += netlink_diag.h header-y += netlink.h header-y += netrom.h +header-y += net_tstamp.h header-y += nfc.h -header-y += nfs.h header-y += nfs2.h header-y += nfs3.h header-y += nfs4.h header-y += nfs4_mount.h +header-y += nfsacl.h header-y += nfs_fs.h +header-y += nfs.h header-y += nfs_idmap.h header-y += nfs_mount.h -header-y += nfsacl.h header-y += nl80211.h +header-y += n_r3964.h header-y += nubus.h header-y += nvme.h header-y += nvram.h @@ -311,16 +315,16 @@ header-y += pfkeyv2.h header-y += pg.h header-y += phantom.h header-y += phonet.h +header-y += pktcdvd.h header-y += pkt_cls.h header-y += pkt_sched.h -header-y += pktcdvd.h header-y += pmu.h header-y += poll.h header-y += posix_types.h header-y += ppdev.h header-y += ppp-comp.h -header-y += ppp-ioctl.h header-y += ppp_defs.h +header-y += ppp-ioctl.h header-y += pps.h header-y += prctl.h header-y += psci.h @@ -352,13 +356,13 @@ header-y += seccomp.h header-y += securebits.h header-y += selinux_netlink.h header-y += sem.h -header-y += serial.h header-y += serial_core.h +header-y += serial.h header-y += serial_reg.h header-y += serio.h header-y += shm.h -header-y += signal.h header-y += signalfd.h +header-y += signal.h header-y += smiapp.h header-y += snmp.h header-y += sock_diag.h @@ -367,8 +371,8 @@ header-y += sockios.h header-y += som.h header-y += sonet.h header-y += sonypi.h -header-y += sound.h header-y += soundcard.h +header-y += sound.h header-y += stat.h header-y += stddef.h header-y += string.h @@ -383,15 +387,17 @@ header-y += tcp.h header-y += tcp_metrics.h header-y += telephony.h header-y += termios.h +header-y += thermal.h header-y += time.h header-y += times.h header-y += timex.h header-y += tiocl.h -header-y += tipc.h header-y += tipc_config.h +header-y += tipc_netlink.h +header-y += tipc.h header-y += toshiba.h -header-y += tty.h header-y += tty_flags.h +header-y += tty.h header-y += types.h header-y += udf_fs_i.h header-y += udp.h @@ -427,6 +433,8 @@ header-y += virtio_net.h header-y += virtio_pci.h header-y += virtio_ring.h header-y += virtio_rng.h +header-y += virtio_scsi.h +header-y += virtio_types.h header-y += vm_sockets.h header-y += vt.h header-y += wait.h @@ -437,6 +445,5 @@ header-y += wireless.h header-y += x25.h header-y += xattr.h header-y += xfrm.h -header-y += hw_breakpoint.h header-y += zorro.h header-y += zorro_ids.h diff --git a/include/uapi/linux/android/Kbuild b/include/uapi/linux/android/Kbuild new file mode 100644 index 000000000000..ca011eec252a --- /dev/null +++ b/include/uapi/linux/android/Kbuild @@ -0,0 +1,2 @@ +# UAPI Header export list +header-y += binder.h diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h new file mode 100644 index 000000000000..41420e341e75 --- /dev/null +++ b/include/uapi/linux/android/binder.h @@ -0,0 +1,352 @@ +/* + * Copyright (C) 2008 Google, Inc. + * + * Based on, but no longer compatible with, the original + * OpenBinder.org binder driver interface, which is: + * + * Copyright (c) 2005 Palmsource, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_BINDER_H +#define _UAPI_LINUX_BINDER_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +#define B_PACK_CHARS(c1, c2, c3, c4) \ + ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) +#define B_TYPE_LARGE 0x85 + +enum { + BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), + BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), + BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), + BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), + BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), +}; + +enum { + FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, + FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, +}; + +#ifdef BINDER_IPC_32BIT +typedef __u32 binder_size_t; +typedef __u32 binder_uintptr_t; +#else +typedef __u64 binder_size_t; +typedef __u64 binder_uintptr_t; +#endif + +/* + * This is the flattened representation of a Binder object for transfer + * between processes. The 'offsets' supplied as part of a binder transaction + * contains offsets into the data where these structures occur. The Binder + * driver takes care of re-writing the structure type and data as it moves + * between processes. + */ +struct flat_binder_object { + /* 8 bytes for large_flat_header. */ + __u32 type; + __u32 flags; + + /* 8 bytes of data. */ + union { + binder_uintptr_t binder; /* local object */ + __u32 handle; /* remote object */ + }; + + /* extra data associated with local object */ + binder_uintptr_t cookie; +}; + +/* + * On 64-bit platforms where user code may run in 32-bits the driver must + * translate the buffer (and local binder) addresses appropriately. + */ + +struct binder_write_read { + binder_size_t write_size; /* bytes to write */ + binder_size_t write_consumed; /* bytes consumed by driver */ + binder_uintptr_t write_buffer; + binder_size_t read_size; /* bytes to read */ + binder_size_t read_consumed; /* bytes consumed by driver */ + binder_uintptr_t read_buffer; +}; + +/* Use with BINDER_VERSION, driver fills in fields. */ +struct binder_version { + /* driver protocol version -- increment with incompatible change */ + __s32 protocol_version; +}; + +/* This is the current protocol version. */ +#ifdef BINDER_IPC_32BIT +#define BINDER_CURRENT_PROTOCOL_VERSION 7 +#else +#define BINDER_CURRENT_PROTOCOL_VERSION 8 +#endif + +#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) +#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) +#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) +#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) +#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) +#define BINDER_THREAD_EXIT _IOW('b', 8, __s32) +#define BINDER_VERSION _IOWR('b', 9, struct binder_version) + +/* + * NOTE: Two special error codes you should check for when calling + * in to the driver are: + * + * EINTR -- The operation has been interupted. This should be + * handled by retrying the ioctl() until a different error code + * is returned. + * + * ECONNREFUSED -- The driver is no longer accepting operations + * from your process. That is, the process is being destroyed. + * You should handle this by exiting from your process. Note + * that once this error code is returned, all further calls to + * the driver from any thread will return this same code. + */ + +enum transaction_flags { + TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ + TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ + TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ + TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ +}; + +struct binder_transaction_data { + /* The first two are only used for bcTRANSACTION and brTRANSACTION, + * identifying the target and contents of the transaction. + */ + union { + /* target descriptor of command transaction */ + __u32 handle; + /* target descriptor of return transaction */ + binder_uintptr_t ptr; + } target; + binder_uintptr_t cookie; /* target object cookie */ + __u32 code; /* transaction command */ + + /* General information about the transaction. */ + __u32 flags; + pid_t sender_pid; + uid_t sender_euid; + binder_size_t data_size; /* number of bytes of data */ + binder_size_t offsets_size; /* number of bytes of offsets */ + + /* If this transaction is inline, the data immediately + * follows here; otherwise, it ends with a pointer to + * the data buffer. + */ + union { + struct { + /* transaction data */ + binder_uintptr_t buffer; + /* offsets from buffer to flat_binder_object structs */ + binder_uintptr_t offsets; + } ptr; + __u8 buf[8]; + } data; +}; + +struct binder_ptr_cookie { + binder_uintptr_t ptr; + binder_uintptr_t cookie; +}; + +struct binder_handle_cookie { + __u32 handle; + binder_uintptr_t cookie; +} __packed; + +struct binder_pri_desc { + __s32 priority; + __u32 desc; +}; + +struct binder_pri_ptr_cookie { + __s32 priority; + binder_uintptr_t ptr; + binder_uintptr_t cookie; +}; + +enum binder_driver_return_protocol { + BR_ERROR = _IOR('r', 0, __s32), + /* + * int: error code + */ + + BR_OK = _IO('r', 1), + /* No parameters! */ + + BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), + BR_REPLY = _IOR('r', 3, struct binder_transaction_data), + /* + * binder_transaction_data: the received command. + */ + + BR_ACQUIRE_RESULT = _IOR('r', 4, __s32), + /* + * not currently supported + * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. + * Else the remote object has acquired a primary reference. + */ + + BR_DEAD_REPLY = _IO('r', 5), + /* + * The target of the last transaction (either a bcTRANSACTION or + * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. + */ + + BR_TRANSACTION_COMPLETE = _IO('r', 6), + /* + * No parameters... always refers to the last transaction requested + * (including replies). Note that this will be sent even for + * asynchronous transactions. + */ + + BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), + BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), + BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), + BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), + /* + * void *: ptr to binder + * void *: cookie for binder + */ + + BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), + /* + * not currently supported + * int: priority + * void *: ptr to binder + * void *: cookie for binder + */ + + BR_NOOP = _IO('r', 12), + /* + * No parameters. Do nothing and examine the next command. It exists + * primarily so that we can replace it with a BR_SPAWN_LOOPER command. + */ + + BR_SPAWN_LOOPER = _IO('r', 13), + /* + * No parameters. The driver has determined that a process has no + * threads waiting to service incoming transactions. When a process + * receives this command, it must spawn a new service thread and + * register it via bcENTER_LOOPER. + */ + + BR_FINISHED = _IO('r', 14), + /* + * not currently supported + * stop threadpool thread + */ + + BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t), + /* + * void *: cookie + */ + BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t), + /* + * void *: cookie + */ + + BR_FAILED_REPLY = _IO('r', 17), + /* + * The the last transaction (either a bcTRANSACTION or + * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. + */ +}; + +enum binder_driver_command_protocol { + BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), + BC_REPLY = _IOW('c', 1, struct binder_transaction_data), + /* + * binder_transaction_data: the sent command. + */ + + BC_ACQUIRE_RESULT = _IOW('c', 2, __s32), + /* + * not currently supported + * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. + * Else you have acquired a primary reference on the object. + */ + + BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t), + /* + * void *: ptr to transaction data received on a read + */ + + BC_INCREFS = _IOW('c', 4, __u32), + BC_ACQUIRE = _IOW('c', 5, __u32), + BC_RELEASE = _IOW('c', 6, __u32), + BC_DECREFS = _IOW('c', 7, __u32), + /* + * int: descriptor + */ + + BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), + BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), + /* + * void *: ptr to binder + * void *: cookie for binder + */ + + BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), + /* + * not currently supported + * int: priority + * int: descriptor + */ + + BC_REGISTER_LOOPER = _IO('c', 11), + /* + * No parameters. + * Register a spawned looper thread with the device. + */ + + BC_ENTER_LOOPER = _IO('c', 12), + BC_EXIT_LOOPER = _IO('c', 13), + /* + * No parameters. + * These two commands are sent as an application-level thread + * enters and exits the binder loop, respectively. They are + * used so the binder can have an accurate count of the number + * of looping threads it has available. + */ + + BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, + struct binder_handle_cookie), + /* + * int: handle + * void *: cookie + */ + + BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, + struct binder_handle_cookie), + /* + * int: handle + * void *: cookie + */ + + BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t), + /* + * void *: cookie + */ +}; + +#endif /* _UAPI_LINUX_BINDER_H */ + diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index d4dbef14d4df..d3475e1f15ec 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -322,9 +322,15 @@ enum { #define AUDIT_STATUS_BACKLOG_LIMIT 0x0010 #define AUDIT_STATUS_BACKLOG_WAIT_TIME 0x0020 -#define AUDIT_VERSION_BACKLOG_LIMIT 1 -#define AUDIT_VERSION_BACKLOG_WAIT_TIME 2 -#define AUDIT_VERSION_LATEST AUDIT_VERSION_BACKLOG_WAIT_TIME +#define AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT 0x00000001 +#define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME 0x00000002 +#define AUDIT_FEATURE_BITMAP_ALL (AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT | \ + AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME) + +/* deprecated: AUDIT_VERSION_* */ +#define AUDIT_VERSION_LATEST AUDIT_FEATURE_BITMAP_ALL +#define AUDIT_VERSION_BACKLOG_LIMIT AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT +#define AUDIT_VERSION_BACKLOG_WAIT_TIME AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME /* Failure-to-log actions */ #define AUDIT_FAIL_SILENT 0 @@ -365,7 +371,9 @@ enum { #define AUDIT_ARCH_PARISC (EM_PARISC) #define AUDIT_ARCH_PARISC64 (EM_PARISC|__AUDIT_ARCH_64BIT) #define AUDIT_ARCH_PPC (EM_PPC) +/* do not define AUDIT_ARCH_PPCLE since it is not supported by audit */ #define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT) +#define AUDIT_ARCH_PPC64LE (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_ARCH_S390 (EM_S390) #define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT) #define AUDIT_ARCH_SH (EM_SH) @@ -404,7 +412,10 @@ struct audit_status { __u32 backlog_limit; /* waiting messages limit */ __u32 lost; /* messages lost */ __u32 backlog; /* messages waiting in queue */ - __u32 version; /* audit api version number */ + union { + __u32 version; /* deprecated: audit api version num */ + __u32 feature_bitmap; /* bitmap of kernel audit features */ + }; __u32 backlog_wait_time;/* message queue wait timeout */ }; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d18316f9e9c4..45da7ec7d274 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -82,7 +82,7 @@ enum bpf_cmd { /* create or update key/value pair in a given map * err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size) - * Using attr->map_fd, attr->key, attr->value + * Using attr->map_fd, attr->key, attr->value, attr->flags * returns zero or negative error */ BPF_MAP_UPDATE_ELEM, @@ -111,12 +111,20 @@ enum bpf_cmd { enum bpf_map_type { BPF_MAP_TYPE_UNSPEC, + BPF_MAP_TYPE_HASH, + BPF_MAP_TYPE_ARRAY, }; enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC, + BPF_PROG_TYPE_SOCKET_FILTER, }; +/* flags for BPF_MAP_UPDATE_ELEM command */ +#define BPF_ANY 0 /* create new element or update existing */ +#define BPF_NOEXIST 1 /* create new element if it didn't exist */ +#define BPF_EXIST 2 /* update existing element */ + union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; /* one of enum bpf_map_type */ @@ -132,6 +140,7 @@ union bpf_attr { __aligned_u64 value; __aligned_u64 next_key; }; + __u64 flags; }; struct { /* anonymous struct used by BPF_PROG_LOAD command */ @@ -150,6 +159,9 @@ union bpf_attr { */ enum bpf_func_id { BPF_FUNC_unspec, + BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */ + BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */ + BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */ __BPF_FUNC_MAX_ID, }; diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 2f47824e7a36..611e1c5893b4 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -157,6 +157,7 @@ struct btrfs_ioctl_dev_replace_status_params { #define BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR 0 #define BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED 1 #define BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED 2 +#define BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS 3 struct btrfs_ioctl_dev_replace_args { __u64 cmd; /* in */ __u64 result; /* out */ diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h index c247446ab25a..1c508be9687f 100644 --- a/include/uapi/linux/can/error.h +++ b/include/uapi/linux/can/error.h @@ -71,6 +71,7 @@ #define CAN_ERR_CRTL_TX_PASSIVE 0x20 /* reached error passive status TX */ /* (at least one error counter exceeds */ /* the protocol-defined level of 127) */ +#define CAN_ERR_CRTL_ACTIVE 0x40 /* recovered to error active state */ /* error in CAN protocol (type) / data[2] */ #define CAN_ERR_PROT_UNSPEC 0x00 /* unspecified */ diff --git a/include/uapi/linux/dlmconstants.h b/include/uapi/linux/dlmconstants.h index 47bf08dc7566..2857bdc5b27b 100644 --- a/include/uapi/linux/dlmconstants.h +++ b/include/uapi/linux/dlmconstants.h @@ -114,7 +114,7 @@ * * DLM_LKF_ORPHAN * - * not yet implemented + * Acquire an orphan lock. * * DLM_LKF_ALTPR * diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 99b43056a6fe..5f66d9c2889d 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -534,6 +534,7 @@ struct ethtool_pauseparam { * @ETH_SS_NTUPLE_FILTERS: Previously used with %ETHTOOL_GRXNTUPLE; * now deprecated * @ETH_SS_FEATURES: Device feature names + * @ETH_SS_RSS_HASH_FUNCS: RSS hush function names */ enum ethtool_stringset { ETH_SS_TEST = 0, @@ -541,6 +542,7 @@ enum ethtool_stringset { ETH_SS_PRIV_FLAGS, ETH_SS_NTUPLE_FILTERS, ETH_SS_FEATURES, + ETH_SS_RSS_HASH_FUNCS, }; /** @@ -884,6 +886,8 @@ struct ethtool_rxfh_indir { * @key_size: On entry, the array size of the user buffer for the hash key, * which may be zero. On return from %ETHTOOL_GRSSH, the size of the * hardware hash key. + * @hfunc: Defines the current RSS hash function used by HW (or to be set to). + * Valid values are one of the %ETH_RSS_HASH_*. * @rsvd: Reserved for future extensions. * @rss_config: RX ring/queue index for each hash value i.e., indirection table * of @indir_size __u32 elements, followed by hash key of @key_size @@ -893,14 +897,16 @@ struct ethtool_rxfh_indir { * size should be returned. For %ETHTOOL_SRSSH, an @indir_size of * %ETH_RXFH_INDIR_NO_CHANGE means that indir table setting is not requested * and a @indir_size of zero means the indir table should be reset to default - * values. + * values. An hfunc of zero means that hash function setting is not requested. */ struct ethtool_rxfh { __u32 cmd; __u32 rss_context; __u32 indir_size; __u32 key_size; - __u32 rsvd[2]; + __u8 hfunc; + __u8 rsvd8[3]; + __u32 rsvd32; __u32 rss_config[0]; }; #define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff @@ -1213,6 +1219,10 @@ enum ethtool_sfeatures_retval_bits { #define SUPPORTED_40000baseCR4_Full (1 << 24) #define SUPPORTED_40000baseSR4_Full (1 << 25) #define SUPPORTED_40000baseLR4_Full (1 << 26) +#define SUPPORTED_56000baseKR4_Full (1 << 27) +#define SUPPORTED_56000baseCR4_Full (1 << 28) +#define SUPPORTED_56000baseSR4_Full (1 << 29) +#define SUPPORTED_56000baseLR4_Full (1 << 30) #define ADVERTISED_10baseT_Half (1 << 0) #define ADVERTISED_10baseT_Full (1 << 1) @@ -1241,6 +1251,10 @@ enum ethtool_sfeatures_retval_bits { #define ADVERTISED_40000baseCR4_Full (1 << 24) #define ADVERTISED_40000baseSR4_Full (1 << 25) #define ADVERTISED_40000baseLR4_Full (1 << 26) +#define ADVERTISED_56000baseKR4_Full (1 << 27) +#define ADVERTISED_56000baseCR4_Full (1 << 28) +#define ADVERTISED_56000baseSR4_Full (1 << 29) +#define ADVERTISED_56000baseLR4_Full (1 << 30) /* The following are all involved in forcing a particular link * mode for the device for setting things. When getting the @@ -1248,12 +1262,16 @@ enum ethtool_sfeatures_retval_bits { * it was forced up into this mode or autonegotiated. */ -/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */ +/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|10|20|40|56]GbE. */ #define SPEED_10 10 #define SPEED_100 100 #define SPEED_1000 1000 #define SPEED_2500 2500 #define SPEED_10000 10000 +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#define SPEED_56000 56000 + #define SPEED_UNKNOWN -1 /* Duplex, half or full. */ @@ -1343,6 +1361,10 @@ enum ethtool_sfeatures_retval_bits { #define ETH_MODULE_SFF_8079_LEN 256 #define ETH_MODULE_SFF_8472 0x2 #define ETH_MODULE_SFF_8472_LEN 512 +#define ETH_MODULE_SFF_8636 0x3 +#define ETH_MODULE_SFF_8636_LEN 256 +#define ETH_MODULE_SFF_8436 0x4 +#define ETH_MODULE_SFF_8436_LEN 256 /* Reset flags */ /* The reset() operation must clear the flags for the components which diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h index 0a8e6badb29b..bb1cb73c927a 100644 --- a/include/uapi/linux/hyperv.h +++ b/include/uapi/linux/hyperv.h @@ -134,6 +134,7 @@ struct hv_start_fcopy { struct hv_do_fcopy { struct hv_fcopy_hdr hdr; + __u32 pad; __u64 offset; __u32 size; __u8 data[DATA_FRAGMENT]; diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h index 0f9acce5b1ff..f2acd2fde1f3 100644 --- a/include/uapi/linux/if_alg.h +++ b/include/uapi/linux/if_alg.h @@ -32,6 +32,8 @@ struct af_alg_iv { #define ALG_SET_KEY 1 #define ALG_SET_IV 2 #define ALG_SET_OP 3 +#define ALG_SET_AEAD_ASSOCLEN 4 +#define ALG_SET_AEAD_AUTHSIZE 5 /* Operations */ #define ALG_OP_DECRYPT 0 diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index da17e456908d..b03ee8f62d3c 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -105,6 +105,7 @@ struct __fdb_entry { #define BRIDGE_MODE_VEB 0 /* Default loopback mode */ #define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#define BRIDGE_MODE_UNDEF 0xFFFF /* mode undefined */ /* Bridge management nested attributes * [IFLA_AF_SPEC] = { diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 0bdb77e16875..f7d0d2d7173a 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -145,6 +145,7 @@ enum { IFLA_CARRIER, IFLA_PHYS_PORT_ID, IFLA_CARRIER_CHANGES, + IFLA_PHYS_SWITCH_ID, __IFLA_MAX }; @@ -243,6 +244,8 @@ enum { IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */ IFLA_BRPORT_LEARNING, /* mac learning */ IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */ + IFLA_BRPORT_PROXYARP, /* proxy ARP */ + IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */ __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -329,6 +332,21 @@ enum macvlan_macaddr_mode { #define MACVLAN_FLAG_NOPROMISC 1 +/* IPVLAN section */ +enum { + IFLA_IPVLAN_UNSPEC, + IFLA_IPVLAN_MODE, + __IFLA_IPVLAN_MAX +}; + +#define IFLA_IPVLAN_MAX (__IFLA_IPVLAN_MAX - 1) + +enum ipvlan_mode { + IPVLAN_MODE_L2 = 0, + IPVLAN_MODE_L3, + IPVLAN_MODE_MAX +}; + /* VXLAN section */ enum { IFLA_VXLAN_UNSPEC, diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h index e9502dd1ee2c..50ae24335444 100644 --- a/include/uapi/linux/if_tun.h +++ b/include/uapi/linux/if_tun.h @@ -22,21 +22,11 @@ /* Read queue size */ #define TUN_READQ_SIZE 500 - -/* TUN device flags */ -#define TUN_TUN_DEV 0x0001 -#define TUN_TAP_DEV 0x0002 +/* TUN device type flags: deprecated. Use IFF_TUN/IFF_TAP instead. */ +#define TUN_TUN_DEV IFF_TUN +#define TUN_TAP_DEV IFF_TAP #define TUN_TYPE_MASK 0x000f -#define TUN_FASYNC 0x0010 -#define TUN_NOCHECKSUM 0x0020 -#define TUN_NO_PI 0x0040 -/* This flag has no real effect */ -#define TUN_ONE_QUEUE 0x0080 -#define TUN_PERSIST 0x0100 -#define TUN_VNET_HDR 0x0200 -#define TUN_TAP_MQ 0x0400 - /* Ioctl defines */ #define TUNSETNOCSUM _IOW('T', 200, int) #define TUNSETDEBUG _IOW('T', 201, int) @@ -58,6 +48,8 @@ #define TUNSETQUEUE _IOW('T', 217, int) #define TUNSETIFINDEX _IOW('T', 218, unsigned int) #define TUNGETFILTER _IOR('T', 219, struct sock_fprog) +#define TUNSETVNETLE _IOW('T', 220, int) +#define TUNGETVNETLE _IOR('T', 221, int) /* TUNSETIFF ifr flags */ #define IFF_TUN 0x0001 diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index 280d9e092283..bd3cc11a431f 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -69,6 +69,7 @@ enum tunnel_encap_types { #define TUNNEL_ENCAP_FLAG_CSUM (1<<0) #define TUNNEL_ENCAP_FLAG_CSUM6 (1<<1) +#define TUNNEL_ENCAP_FLAG_REMCSUM (1<<2) /* SIT-mode i_flags */ #define SIT_ISATAP 0x0001 diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index efa2666f4b8a..e863d088b9a5 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -164,6 +164,7 @@ enum { DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL, DEVCONF_SUPPRESS_FRAG_NDISC, DEVCONF_ACCEPT_RA_FROM_LOCAL, + DEVCONF_USE_OPTIMISTIC, DEVCONF_MAX }; diff --git a/include/linux/kcmp.h b/include/uapi/linux/kcmp.h index 2dcd1b3aafc8..84df14b37360 100644 --- a/include/linux/kcmp.h +++ b/include/uapi/linux/kcmp.h @@ -1,5 +1,5 @@ -#ifndef _LINUX_KCMP_H -#define _LINUX_KCMP_H +#ifndef _UAPI_LINUX_KCMP_H +#define _UAPI_LINUX_KCMP_H /* Comparison type */ enum kcmp_type { @@ -14,4 +14,4 @@ enum kcmp_type { KCMP_TYPES, }; -#endif /* _LINUX_KCMP_H */ +#endif /* _UAPI_LINUX_KCMP_H */ diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h new file mode 100644 index 000000000000..7acef41fc209 --- /dev/null +++ b/include/uapi/linux/kfd_ioctl.h @@ -0,0 +1,154 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef KFD_IOCTL_H_INCLUDED +#define KFD_IOCTL_H_INCLUDED + +#include <linux/types.h> +#include <linux/ioctl.h> + +#define KFD_IOCTL_MAJOR_VERSION 1 +#define KFD_IOCTL_MINOR_VERSION 0 + +struct kfd_ioctl_get_version_args { + uint32_t major_version; /* from KFD */ + uint32_t minor_version; /* from KFD */ +}; + +/* For kfd_ioctl_create_queue_args.queue_type. */ +#define KFD_IOC_QUEUE_TYPE_COMPUTE 0 +#define KFD_IOC_QUEUE_TYPE_SDMA 1 +#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 2 + +#define KFD_MAX_QUEUE_PERCENTAGE 100 +#define KFD_MAX_QUEUE_PRIORITY 15 + +struct kfd_ioctl_create_queue_args { + uint64_t ring_base_address; /* to KFD */ + uint64_t write_pointer_address; /* from KFD */ + uint64_t read_pointer_address; /* from KFD */ + uint64_t doorbell_offset; /* from KFD */ + + uint32_t ring_size; /* to KFD */ + uint32_t gpu_id; /* to KFD */ + uint32_t queue_type; /* to KFD */ + uint32_t queue_percentage; /* to KFD */ + uint32_t queue_priority; /* to KFD */ + uint32_t queue_id; /* from KFD */ + + uint64_t eop_buffer_address; /* to KFD */ + uint64_t eop_buffer_size; /* to KFD */ + uint64_t ctx_save_restore_address; /* to KFD */ + uint64_t ctx_save_restore_size; /* to KFD */ +}; + +struct kfd_ioctl_destroy_queue_args { + uint32_t queue_id; /* to KFD */ + uint32_t pad; +}; + +struct kfd_ioctl_update_queue_args { + uint64_t ring_base_address; /* to KFD */ + + uint32_t queue_id; /* to KFD */ + uint32_t ring_size; /* to KFD */ + uint32_t queue_percentage; /* to KFD */ + uint32_t queue_priority; /* to KFD */ +}; + +/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ +#define KFD_IOC_CACHE_POLICY_COHERENT 0 +#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 + +struct kfd_ioctl_set_memory_policy_args { + uint64_t alternate_aperture_base; /* to KFD */ + uint64_t alternate_aperture_size; /* to KFD */ + + uint32_t gpu_id; /* to KFD */ + uint32_t default_policy; /* to KFD */ + uint32_t alternate_policy; /* to KFD */ + uint32_t pad; +}; + +/* + * All counters are monotonic. They are used for profiling of compute jobs. + * The profiling is done by userspace. + * + * In case of GPU reset, the counter should not be affected. + */ + +struct kfd_ioctl_get_clock_counters_args { + uint64_t gpu_clock_counter; /* from KFD */ + uint64_t cpu_clock_counter; /* from KFD */ + uint64_t system_clock_counter; /* from KFD */ + uint64_t system_clock_freq; /* from KFD */ + + uint32_t gpu_id; /* to KFD */ + uint32_t pad; +}; + +#define NUM_OF_SUPPORTED_GPUS 7 + +struct kfd_process_device_apertures { + uint64_t lds_base; /* from KFD */ + uint64_t lds_limit; /* from KFD */ + uint64_t scratch_base; /* from KFD */ + uint64_t scratch_limit; /* from KFD */ + uint64_t gpuvm_base; /* from KFD */ + uint64_t gpuvm_limit; /* from KFD */ + uint32_t gpu_id; /* from KFD */ + uint32_t pad; +}; + +struct kfd_ioctl_get_process_apertures_args { + struct kfd_process_device_apertures + process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */ + + /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */ + uint32_t num_of_nodes; + uint32_t pad; +}; + +#define KFD_IOC_MAGIC 'K' + +#define KFD_IOC_GET_VERSION \ + _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args) + +#define KFD_IOC_CREATE_QUEUE \ + _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args) + +#define KFD_IOC_DESTROY_QUEUE \ + _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args) + +#define KFD_IOC_SET_MEMORY_POLICY \ + _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args) + +#define KFD_IOC_GET_CLOCK_COUNTERS \ + _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args) + +#define KFD_IOC_GET_PROCESS_APERTURES \ + _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args) + +#define KFD_IOC_UPDATE_QUEUE \ + _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args) + +#endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 60768822b140..a37fd1224f36 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -647,11 +647,7 @@ struct kvm_ppc_smmu_info { #define KVM_CAP_MP_STATE 14 #define KVM_CAP_COALESCED_MMIO 15 #define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */ -#define KVM_CAP_DEVICE_ASSIGNMENT 17 #define KVM_CAP_IOMMU 18 -#ifdef __KVM_HAVE_MSI -#define KVM_CAP_DEVICE_MSI 20 -#endif /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 #define KVM_CAP_USER_NMI 22 @@ -663,10 +659,6 @@ struct kvm_ppc_smmu_info { #endif #define KVM_CAP_IRQ_ROUTING 25 #define KVM_CAP_IRQ_INJECT_STATUS 26 -#define KVM_CAP_DEVICE_DEASSIGNMENT 27 -#ifdef __KVM_HAVE_MSIX -#define KVM_CAP_DEVICE_MSIX 28 -#endif #define KVM_CAP_ASSIGN_DEV_IRQ 29 /* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */ #define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30 @@ -1107,9 +1099,6 @@ struct kvm_s390_ucas_mapping { #define KVM_X86_SETUP_MCE _IOW(KVMIO, 0x9c, __u64) #define KVM_X86_GET_MCE_CAP_SUPPORTED _IOR(KVMIO, 0x9d, __u64) #define KVM_X86_SET_MCE _IOW(KVMIO, 0x9e, struct kvm_x86_mce) -/* IA64 stack access */ -#define KVM_IA64_VCPU_GET_STACK _IOR(KVMIO, 0x9a, void *) -#define KVM_IA64_VCPU_SET_STACK _IOW(KVMIO, 0x9b, void *) /* Available with KVM_CAP_VCPU_EVENTS */ #define KVM_GET_VCPU_EVENTS _IOR(KVMIO, 0x9f, struct kvm_vcpu_events) #define KVM_SET_VCPU_EVENTS _IOW(KVMIO, 0xa0, struct kvm_vcpu_events) diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 77c60311a6c6..7d664ea85ebd 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -72,5 +72,6 @@ #define MTD_INODE_FS_MAGIC 0x11307854 #define ANON_INODE_FS_MAGIC 0x09041934 #define BTRFS_TEST_MAGIC 0x73727279 +#define NSFS_MAGIC 0x6e736673 #endif /* __LINUX_MAGIC_H__ */ diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h new file mode 100644 index 000000000000..23b40908be30 --- /dev/null +++ b/include/uapi/linux/media-bus-format.h @@ -0,0 +1,125 @@ +/* + * Media Bus API header + * + * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MEDIA_BUS_FORMAT_H +#define __LINUX_MEDIA_BUS_FORMAT_H + +/* + * These bus formats uniquely identify data formats on the data bus. Format 0 + * is reserved, MEDIA_BUS_FMT_FIXED shall be used by host-client pairs, where + * the data format is fixed. Additionally, "2X8" means that one pixel is + * transferred in two 8-bit samples, "BE" or "LE" specify in which order those + * samples are transferred over the bus: "LE" means that the least significant + * bits are transferred first, "BE" means that the most significant bits are + * transferred first, and "PADHI" and "PADLO" define which bits - low or high, + * in the incomplete high byte, are filled with padding bits. + * + * The bus formats are grouped by type, bus_width, bits per component, samples + * per pixel and order of subsamples. Numerical values are sorted using generic + * numerical sort order (8 thus comes before 10). + * + * As their value can't change when a new bus format is inserted in the + * enumeration, the bus formats are explicitly given a numerical value. The next + * free values for each category are listed below, update them when inserting + * new pixel codes. + */ + +#define MEDIA_BUS_FMT_FIXED 0x0001 + +/* RGB - next is 0x100e */ +#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001 +#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002 +#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE 0x1003 +#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE 0x1004 +#define MEDIA_BUS_FMT_BGR565_2X8_BE 0x1005 +#define MEDIA_BUS_FMT_BGR565_2X8_LE 0x1006 +#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007 +#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008 +#define MEDIA_BUS_FMT_RGB666_1X18 0x1009 +#define MEDIA_BUS_FMT_RGB888_1X24 0x100a +#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b +#define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c +#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d + +/* YUV (including grey) - next is 0x2024 */ +#define MEDIA_BUS_FMT_Y8_1X8 0x2001 +#define MEDIA_BUS_FMT_UV8_1X8 0x2015 +#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002 +#define MEDIA_BUS_FMT_VYUY8_1_5X8 0x2003 +#define MEDIA_BUS_FMT_YUYV8_1_5X8 0x2004 +#define MEDIA_BUS_FMT_YVYU8_1_5X8 0x2005 +#define MEDIA_BUS_FMT_UYVY8_2X8 0x2006 +#define MEDIA_BUS_FMT_VYUY8_2X8 0x2007 +#define MEDIA_BUS_FMT_YUYV8_2X8 0x2008 +#define MEDIA_BUS_FMT_YVYU8_2X8 0x2009 +#define MEDIA_BUS_FMT_Y10_1X10 0x200a +#define MEDIA_BUS_FMT_UYVY10_2X10 0x2018 +#define MEDIA_BUS_FMT_VYUY10_2X10 0x2019 +#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b +#define MEDIA_BUS_FMT_YVYU10_2X10 0x200c +#define MEDIA_BUS_FMT_Y12_1X12 0x2013 +#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f +#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010 +#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011 +#define MEDIA_BUS_FMT_YVYU8_1X16 0x2012 +#define MEDIA_BUS_FMT_YDYUYDYV8_1X16 0x2014 +#define MEDIA_BUS_FMT_UYVY10_1X20 0x201a +#define MEDIA_BUS_FMT_VYUY10_1X20 0x201b +#define MEDIA_BUS_FMT_YUYV10_1X20 0x200d +#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e +#define MEDIA_BUS_FMT_YUV10_1X30 0x2016 +#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017 +#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c +#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d +#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e +#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f +#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020 +#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021 +#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022 +#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023 + +/* Bayer - next is 0x3019 */ +#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001 +#define MEDIA_BUS_FMT_SGBRG8_1X8 0x3013 +#define MEDIA_BUS_FMT_SGRBG8_1X8 0x3002 +#define MEDIA_BUS_FMT_SRGGB8_1X8 0x3014 +#define MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8 0x3015 +#define MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8 0x3016 +#define MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8 0x3017 +#define MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8 0x3018 +#define MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 0x300b +#define MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 0x300c +#define MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 0x3009 +#define MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 0x300d +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE 0x3003 +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE 0x3004 +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE 0x3005 +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE 0x3006 +#define MEDIA_BUS_FMT_SBGGR10_1X10 0x3007 +#define MEDIA_BUS_FMT_SGBRG10_1X10 0x300e +#define MEDIA_BUS_FMT_SGRBG10_1X10 0x300a +#define MEDIA_BUS_FMT_SRGGB10_1X10 0x300f +#define MEDIA_BUS_FMT_SBGGR12_1X12 0x3008 +#define MEDIA_BUS_FMT_SGBRG12_1X12 0x3010 +#define MEDIA_BUS_FMT_SGRBG12_1X12 0x3011 +#define MEDIA_BUS_FMT_SRGGB12_1X12 0x3012 + +/* JPEG compressed formats - next is 0x4002 */ +#define MEDIA_BUS_FMT_JPEG_1X8 0x4001 + +/* Vendor specific formats - next is 0x5002 */ + +/* S5C73M3 sensor specific interleaved UYVY and JPEG */ +#define MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8 0x5001 + +/* HSV - next is 0x6002 */ +#define MEDIA_BUS_FMT_AHSV8888_1X32 0x6001 + +#endif /* __LINUX_MEDIA_BUS_FORMAT_H */ diff --git a/include/uapi/linux/msg.h b/include/uapi/linux/msg.h index a70375526578..f51c8001dbe5 100644 --- a/include/uapi/linux/msg.h +++ b/include/uapi/linux/msg.h @@ -51,16 +51,28 @@ struct msginfo { }; /* - * Scaling factor to compute msgmni: - * the memory dedicated to msg queues (msgmni * msgmnb) should occupy - * at most 1/MSG_MEM_SCALE of the lowmem (see the formula in ipc/msg.c): - * up to 8MB : msgmni = 16 (MSGMNI) - * 4 GB : msgmni = 8K - * more than 16 GB : msgmni = 32K (IPCMNI) + * MSGMNI, MSGMAX and MSGMNB are default values which can be + * modified by sysctl. + * + * MSGMNI is the upper limit for the number of messages queues per + * namespace. + * It has been chosen to be as large possible without facilitating + * scenarios where userspace causes overflows when adjusting the limits via + * operations of the form retrieve current limit; add X; update limit". + * + * MSGMNB is the default size of a new message queue. Non-root tasks can + * decrease the size with msgctl(IPC_SET), root tasks + * (actually: CAP_SYS_RESOURCE) can both increase and decrease the queue + * size. The optimal value is application dependent. + * 16384 is used because it was always used (since 0.99.10) + * + * MAXMAX is the maximum size of an individual message, it's a global + * (per-namespace) limit that applies for all message queues. + * It's set to 1/2 of MSGMNB, to ensure that at least two messages fit into + * the queue. This is also an arbitrary choice (since 2.6.0). */ -#define MSG_MEM_SCALE 32 -#define MSGMNI 16 /* <= IPCMNI */ /* max # of msg queue identifiers */ +#define MSGMNI 32000 /* <= IPCMNI */ /* max # of msg queue identifiers */ #define MSGMAX 8192 /* <= INT_MAX */ /* max size of message (bytes) */ #define MSGMNB 16384 /* <= INT_MAX */ /* default max size of a message queue */ diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 4a1d7e96dfe3..f3d77f9f1e0b 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -35,11 +35,11 @@ enum { */ #define NTF_USE 0x01 -#define NTF_PROXY 0x08 /* == ATF_PUBL */ -#define NTF_ROUTER 0x80 - #define NTF_SELF 0x02 #define NTF_MASTER 0x04 +#define NTF_PROXY 0x08 /* == ATF_PUBL */ +#define NTF_EXT_LEARNED 0x10 +#define NTF_ROUTER 0x80 /* * Neighbor Cache Entry States. diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index ff354021bb69..edbc888ceb51 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -23,8 +23,9 @@ enum { SOF_TIMESTAMPING_OPT_ID = (1<<7), SOF_TIMESTAMPING_TX_SCHED = (1<<8), SOF_TIMESTAMPING_TX_ACK = (1<<9), + SOF_TIMESTAMPING_OPT_CMSG = (1<<10), - SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_TX_ACK, + SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_CMSG, SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) | SOF_TIMESTAMPING_LAST }; diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h index ca03119111a2..5ab4e60894cf 100644 --- a/include/uapi/linux/netfilter/ipset/ip_set.h +++ b/include/uapi/linux/netfilter/ipset/ip_set.h @@ -256,11 +256,17 @@ enum { IPSET_COUNTER_GT, }; -struct ip_set_counter_match { +/* Backward compatibility for set match v3 */ +struct ip_set_counter_match0 { __u8 op; __u64 value; }; +struct ip_set_counter_match { + __aligned_u64 value; + __u8 op; +}; + /* Interface to iptables/ip6tables */ #define SO_IP_SET 83 diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index f31fe7b660a5..832bc46db78b 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -579,6 +579,7 @@ enum nft_exthdr_attributes { * @NFT_META_CPU: cpu id through smp_processor_id() * @NFT_META_IIFGROUP: packet input interface group * @NFT_META_OIFGROUP: packet output interface group + * @NFT_META_CGROUP: socket control group (skb->sk->sk_classid) */ enum nft_meta_keys { NFT_META_LEN, @@ -604,6 +605,7 @@ enum nft_meta_keys { NFT_META_CPU, NFT_META_IIFGROUP, NFT_META_OIFGROUP, + NFT_META_CGROUP, }; /** @@ -838,6 +840,22 @@ enum nft_masq_attributes { #define NFTA_MASQ_MAX (__NFTA_MASQ_MAX - 1) /** + * enum nft_redir_attributes - nf_tables redirect expression netlink attributes + * + * @NFTA_REDIR_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers) + * @NFTA_REDIR_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers) + * @NFTA_REDIR_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32) + */ +enum nft_redir_attributes { + NFTA_REDIR_UNSPEC, + NFTA_REDIR_REG_PROTO_MIN, + NFTA_REDIR_REG_PROTO_MAX, + NFTA_REDIR_FLAGS, + __NFTA_REDIR_MAX +}; +#define NFTA_REDIR_MAX (__NFTA_REDIR_MAX - 1) + +/** * enum nft_gen_attributes - nf_tables ruleset generation attributes * * @NFTA_GEN_ID: Ruleset generation ID (NLA_U32) diff --git a/include/uapi/linux/netfilter/xt_set.h b/include/uapi/linux/netfilter/xt_set.h index d6a1df1f2947..d4e02348384c 100644 --- a/include/uapi/linux/netfilter/xt_set.h +++ b/include/uapi/linux/netfilter/xt_set.h @@ -66,8 +66,8 @@ struct xt_set_info_target_v2 { struct xt_set_info_match_v3 { struct xt_set_info match_set; - struct ip_set_counter_match packets; - struct ip_set_counter_match bytes; + struct ip_set_counter_match0 packets; + struct ip_set_counter_match0 bytes; __u32 flags; }; @@ -81,4 +81,13 @@ struct xt_set_info_target_v3 { __u32 timeout; }; +/* Revision 4 match */ + +struct xt_set_info_match_v4 { + struct xt_set_info match_set; + struct ip_set_counter_match packets; + struct ip_set_counter_match bytes; + __u32 flags; +}; + #endif /*_XT_SET_H*/ diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index 9b19b4461928..8119255feae4 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -116,6 +116,7 @@ enum nfc_commands { NFC_EVENT_SE_TRANSACTION, NFC_CMD_GET_SE, NFC_CMD_SE_IO, + NFC_CMD_ACTIVATE_TARGET, /* private: internal use only */ __NFC_CMD_AFTER_LAST }; @@ -196,15 +197,19 @@ enum nfc_sdp_attr { }; #define NFC_SDP_ATTR_MAX (__NFC_SDP_ATTR_AFTER_LAST - 1) -#define NFC_DEVICE_NAME_MAXSIZE 8 -#define NFC_NFCID1_MAXSIZE 10 -#define NFC_NFCID2_MAXSIZE 8 -#define NFC_NFCID3_MAXSIZE 10 -#define NFC_SENSB_RES_MAXSIZE 12 -#define NFC_SENSF_RES_MAXSIZE 18 -#define NFC_GB_MAXSIZE 48 -#define NFC_FIRMWARE_NAME_MAXSIZE 32 -#define NFC_ISO15693_UID_MAXSIZE 8 +#define NFC_DEVICE_NAME_MAXSIZE 8 +#define NFC_NFCID1_MAXSIZE 10 +#define NFC_NFCID2_MAXSIZE 8 +#define NFC_NFCID3_MAXSIZE 10 +#define NFC_SENSB_RES_MAXSIZE 12 +#define NFC_SENSF_RES_MAXSIZE 18 +#define NFC_ATR_REQ_MAXSIZE 64 +#define NFC_ATR_RES_MAXSIZE 64 +#define NFC_ATR_REQ_GB_MAXSIZE 48 +#define NFC_ATR_RES_GB_MAXSIZE 47 +#define NFC_GB_MAXSIZE 48 +#define NFC_FIRMWARE_NAME_MAXSIZE 32 +#define NFC_ISO15693_UID_MAXSIZE 8 /* NFC protocols */ #define NFC_PROTO_JEWEL 1 diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h index a6f453c740b8..1fdc95bb2375 100644 --- a/include/uapi/linux/nfsd/debug.h +++ b/include/uapi/linux/nfsd/debug.h @@ -15,7 +15,7 @@ * Enable debugging for nfsd. * Requires RPC_DEBUG. */ -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define NFSD_DEBUG 1 #endif diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 4b28dc07bcb1..b37bd5a1cb82 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -227,7 +227,11 @@ * the interface identified by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC * or, if no MAC address given, all stations, on the interface identified - * by %NL80211_ATTR_IFINDEX. + * by %NL80211_ATTR_IFINDEX. %NL80211_ATTR_MGMT_SUBTYPE and + * %NL80211_ATTR_REASON_CODE can optionally be used to specify which type + * of disconnection indication should be sent to the station + * (Deauthentication or Disassociation frame and reason code for that + * frame). * * @NL80211_CMD_GET_MPATH: Get mesh path attributes for mesh path to * destination %NL80211_ATTR_MAC on the interface identified by @@ -639,7 +643,18 @@ * @NL80211_CMD_CH_SWITCH_NOTIFY: An AP or GO may decide to switch channels * independently of the userspace SME, send this event indicating * %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ and the - * attributes determining channel width. + * attributes determining channel width. This indication may also be + * sent when a remotely-initiated switch (e.g., when a STA receives a CSA + * from the remote AP) is completed; + * + * @NL80211_CMD_CH_SWITCH_STARTED_NOTIFY: Notify that a channel switch + * has been started on an interface, regardless of the initiator + * (ie. whether it was requested from a remote device or + * initiated on our own). It indicates that + * %NL80211_ATTR_IFINDEX will be on %NL80211_ATTR_WIPHY_FREQ + * after %NL80211_ATTR_CH_SWITCH_COUNT TBTT's. The userspace may + * decide to react to this indication by requesting other + * interfaces to change channel as well. * * @NL80211_CMD_START_P2P_DEVICE: Start the given P2P Device, identified by * its %NL80211_ATTR_WDEV identifier. It must have been created with @@ -738,6 +753,27 @@ * before removing a station entry entirely, or before disassociating * or similar, cleanup will happen in the driver/device in this case. * + * @NL80211_CMD_GET_MPP: Get mesh path attributes for mesh proxy path to + * destination %NL80211_ATTR_MAC on the interface identified by + * %NL80211_ATTR_IFINDEX. + * + * @NL80211_CMD_JOIN_OCB: Join the OCB network. The center frequency and + * bandwidth of a channel must be given. + * @NL80211_CMD_LEAVE_OCB: Leave the OCB network -- no special arguments, the + * network is determined by the network interface. + * + * @NL80211_CMD_TDLS_CHANNEL_SWITCH: Start channel-switching with a TDLS peer, + * identified by the %NL80211_ATTR_MAC parameter. A target channel is + * provided via %NL80211_ATTR_WIPHY_FREQ and other attributes determining + * channel width/type. The target operating class is given via + * %NL80211_ATTR_OPER_CLASS. + * The driver is responsible for continually initiating channel-switching + * operations and returning to the base channel for communication with the + * AP. + * @NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH: Stop channel-switching with a TDLS + * peer given by %NL80211_ATTR_MAC. Both peers must be on the base channel + * when this command completes. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -912,6 +948,16 @@ enum nl80211_commands { NL80211_CMD_ADD_TX_TS, NL80211_CMD_DEL_TX_TS, + NL80211_CMD_GET_MPP, + + NL80211_CMD_JOIN_OCB, + NL80211_CMD_LEAVE_OCB, + + NL80211_CMD_CH_SWITCH_STARTED_NOTIFY, + + NL80211_CMD_TDLS_CHANNEL_SWITCH, + NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -1606,9 +1652,9 @@ enum nl80211_commands { * @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32. * As specified in the &enum nl80211_tdls_peer_capability. * - * @NL80211_ATTR_IFACE_SOCKET_OWNER: flag attribute, if set during interface + * @NL80211_ATTR_SOCKET_OWNER: Flag attribute, if set during interface * creation then the new interface will be owned by the netlink socket - * that created it and will be destroyed when the socket is closed + * that created it and will be destroyed when the socket is closed. * * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is * the TDLS link initiator. @@ -1638,6 +1684,11 @@ enum nl80211_commands { * @NL80211_ATTR_SMPS_MODE: SMPS mode to use (ap mode). see * &enum nl80211_smps_mode. * + * @NL80211_ATTR_OPER_CLASS: operating class + * + * @NL80211_ATTR_MAC_MASK: MAC address mask + * + * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -1973,7 +2024,7 @@ enum nl80211_attrs { NL80211_ATTR_TDLS_PEER_CAPABILITY, - NL80211_ATTR_IFACE_SOCKET_OWNER, + NL80211_ATTR_SOCKET_OWNER, NL80211_ATTR_CSA_C_OFFSETS_TX, NL80211_ATTR_MAX_CSA_COUNTERS, @@ -1990,15 +2041,21 @@ enum nl80211_attrs { NL80211_ATTR_SMPS_MODE, + NL80211_ATTR_OPER_CLASS, + + NL80211_ATTR_MAC_MASK, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, + NUM_NL80211_ATTR = __NL80211_ATTR_AFTER_LAST, NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 }; /* source-level API compatibility */ #define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION #define NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG +#define NL80211_ATTR_IFACE_SOCKET_OWNER NL80211_ATTR_SOCKET_OWNER /* * Allow user space programs to use #ifdef on new attributes by defining them @@ -2064,6 +2121,8 @@ enum nl80211_attrs { * and therefore can't be created in the normal ways, use the * %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE * commands to create and destroy one + * @NL80211_IF_TYPE_OCB: Outside Context of a BSS + * This mode corresponds to the MIB variable dot11OCBActivated=true * @NL80211_IFTYPE_MAX: highest interface type number currently defined * @NUM_NL80211_IFTYPES: number of defined interface types * @@ -2083,6 +2142,7 @@ enum nl80211_iftype { NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO, NL80211_IFTYPE_P2P_DEVICE, + NL80211_IFTYPE_OCB, /* keep last */ NUM_NL80211_IFTYPES, @@ -2631,6 +2691,11 @@ enum nl80211_sched_scan_match_attr { * @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated * base on contiguous rules and wider channels will be allowed to cross * multiple contiguous/overlapping frequency ranges. + * @NL80211_RRF_GO_CONCURRENT: See &NL80211_FREQUENCY_ATTR_GO_CONCURRENT + * @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation + * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation + * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed + * @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed */ enum nl80211_reg_rule_flags { NL80211_RRF_NO_OFDM = 1<<0, @@ -2643,11 +2708,18 @@ enum nl80211_reg_rule_flags { NL80211_RRF_NO_IR = 1<<7, __NL80211_RRF_NO_IBSS = 1<<8, NL80211_RRF_AUTO_BW = 1<<11, + NL80211_RRF_GO_CONCURRENT = 1<<12, + NL80211_RRF_NO_HT40MINUS = 1<<13, + NL80211_RRF_NO_HT40PLUS = 1<<14, + NL80211_RRF_NO_80MHZ = 1<<15, + NL80211_RRF_NO_160MHZ = 1<<16, }; #define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR #define NL80211_RRF_NO_IBSS NL80211_RRF_NO_IR #define NL80211_RRF_NO_IR NL80211_RRF_NO_IR +#define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\ + NL80211_RRF_NO_HT40PLUS) /* For backport compatibility with older userspace */ #define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS) @@ -3379,6 +3451,8 @@ enum nl80211_ps_state { * interval in which %NL80211_ATTR_CQM_TXE_PKTS and * %NL80211_ATTR_CQM_TXE_RATE must be satisfied before generating an * %NL80211_CMD_NOTIFY_CQM. Set to 0 to turn off TX error reporting. + * @NL80211_ATTR_CQM_BEACON_LOSS_EVENT: flag attribute that's set in a beacon + * loss event * @__NL80211_ATTR_CQM_AFTER_LAST: internal * @NL80211_ATTR_CQM_MAX: highest key attribute */ @@ -3391,6 +3465,7 @@ enum nl80211_attr_cqm { NL80211_ATTR_CQM_TXE_RATE, NL80211_ATTR_CQM_TXE_PKTS, NL80211_ATTR_CQM_TXE_INTVL, + NL80211_ATTR_CQM_BEACON_LOSS_EVENT, /* keep last */ __NL80211_ATTR_CQM_AFTER_LAST, @@ -3403,9 +3478,7 @@ enum nl80211_attr_cqm { * configured threshold * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH: The RSSI is higher than the * configured threshold - * @NL80211_CQM_RSSI_BEACON_LOSS_EVENT: The device experienced beacon loss. - * (Note that deauth/disassoc will still follow if the AP is not - * available. This event might get used as roaming event, etc.) + * @NL80211_CQM_RSSI_BEACON_LOSS_EVENT: (reserved, never sent) */ enum nl80211_cqm_rssi_threshold_event { NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, @@ -3545,6 +3618,25 @@ struct nl80211_pattern_support { * @NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS: For wakeup reporting only, * the TCP connection ran out of tokens to use for data to send to the * service + * @NL80211_WOWLAN_TRIG_NET_DETECT: wake up when a configured network + * is detected. This is a nested attribute that contains the + * same attributes used with @NL80211_CMD_START_SCHED_SCAN. It + * specifies how the scan is performed (e.g. the interval and the + * channels to scan) as well as the scan results that will + * trigger a wake (i.e. the matchsets). + * @NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS: nested attribute + * containing an array with information about what triggered the + * wake up. If no elements are present in the array, it means + * that the information is not available. If more than one + * element is present, it means that more than one match + * occurred. + * Each element in the array is a nested attribute that contains + * one optional %NL80211_ATTR_SSID attribute and one optional + * %NL80211_ATTR_SCAN_FREQUENCIES attribute. At least one of + * these attributes must be present. If + * %NL80211_ATTR_SCAN_FREQUENCIES contains more than one + * frequency, it means that the match occurred in more than one + * channel. * @NUM_NL80211_WOWLAN_TRIG: number of wake on wireless triggers * @MAX_NL80211_WOWLAN_TRIG: highest wowlan trigger attribute number * @@ -3570,6 +3662,8 @@ enum nl80211_wowlan_triggers { NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH, NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST, NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS, + NL80211_WOWLAN_TRIG_NET_DETECT, + NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS, /* keep last */ NUM_NL80211_WOWLAN_TRIG, @@ -4042,6 +4136,27 @@ enum nl80211_ap_sme_features { * multiplexing powersave, ie. can turn off all but one chain * and then wake the rest up as required after, for example, * rts/cts handshake. + * @NL80211_FEATURE_SUPPORTS_WMM_ADMISSION: the device supports setting up WMM + * TSPEC sessions (TID aka TSID 0-7) with the %NL80211_CMD_ADD_TX_TS + * command. Standard IEEE 802.11 TSPEC setup is not yet supported, it + * needs to be able to handle Block-Ack agreements and other things. + * @NL80211_FEATURE_MAC_ON_CREATE: Device supports configuring + * the vif's MAC address upon creation. + * See 'macaddr' field in the vif_params (cfg80211.h). + * @NL80211_FEATURE_TDLS_CHANNEL_SWITCH: Driver supports channel switching when + * operating as a TDLS peer. + * @NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR: This device/driver supports using a + * random MAC address during scan (if the device is unassociated); the + * %NL80211_SCAN_FLAG_RANDOM_ADDR flag may be set for scans and the MAC + * address mask/value will be used. + * @NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR: This device/driver supports + * using a random MAC address for every scan iteration during scheduled + * scan (while not associated), the %NL80211_SCAN_FLAG_RANDOM_ADDR may + * be set for scheduled scan and the MAC address mask/value will be used. + * @NL80211_FEATURE_ND_RANDOM_MAC_ADDR: This device/driver supports using a + * random MAC address for every scan iteration during "net detect", i.e. + * scan in unassociated WoWLAN, the %NL80211_SCAN_FLAG_RANDOM_ADDR may + * be set for scheduled scan and the MAC address mask/value will be used. */ enum nl80211_feature_flags { NL80211_FEATURE_SK_TX_STATUS = 1 << 0, @@ -4070,6 +4185,12 @@ enum nl80211_feature_flags { NL80211_FEATURE_ACKTO_ESTIMATION = 1 << 23, NL80211_FEATURE_STATIC_SMPS = 1 << 24, NL80211_FEATURE_DYNAMIC_SMPS = 1 << 25, + NL80211_FEATURE_SUPPORTS_WMM_ADMISSION = 1 << 26, + NL80211_FEATURE_MAC_ON_CREATE = 1 << 27, + NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 1 << 28, + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 1 << 29, + NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 1 << 30, + NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1 << 31, }; /** @@ -4118,11 +4239,21 @@ enum nl80211_connect_failed_reason { * dangerous because will destroy stations performance as a lot of frames * will be lost while scanning off-channel, therefore it must be used only * when really needed + * @NL80211_SCAN_FLAG_RANDOM_ADDR: use a random MAC address for this scan (or + * for scheduled scan: a different one for every scan iteration). When the + * flag is set, depending on device capabilities the @NL80211_ATTR_MAC and + * @NL80211_ATTR_MAC_MASK attributes may also be given in which case only + * the masked bits will be preserved from the MAC address and the remainder + * randomised. If the attributes are not given full randomisation (46 bits, + * locally administered 1, multicast 0) is assumed. + * This flag must not be requested when the feature isn't supported, check + * the nl80211 feature flags for the device. */ enum nl80211_scan_flags { NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0, NL80211_SCAN_FLAG_FLUSH = 1<<1, NL80211_SCAN_FLAG_AP = 1<<2, + NL80211_SCAN_FLAG_RANDOM_ADDR = 1<<3, }; /** diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h index 29a7d8619d8d..26386cf3db44 100644 --- a/include/uapi/linux/nvme.h +++ b/include/uapi/linux/nvme.h @@ -181,6 +181,22 @@ enum { NVME_LBART_ATTRIB_HIDE = 1 << 1, }; +struct nvme_reservation_status { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 resv5[2]; + __u8 ptpls; + __u8 resv10[13]; + struct { + __le16 cntlid; + __u8 rcsts; + __u8 resv3[5]; + __le64 hostid; + __le64 rkey; + } regctl_ds[]; +}; + /* I/O commands */ enum nvme_opcode { @@ -189,7 +205,12 @@ enum nvme_opcode { nvme_cmd_read = 0x02, nvme_cmd_write_uncor = 0x04, nvme_cmd_compare = 0x05, + nvme_cmd_write_zeroes = 0x08, nvme_cmd_dsm = 0x09, + nvme_cmd_resv_register = 0x0d, + nvme_cmd_resv_report = 0x0e, + nvme_cmd_resv_acquire = 0x11, + nvme_cmd_resv_release = 0x15, }; struct nvme_common_command { @@ -305,7 +326,11 @@ enum { NVME_FEAT_IRQ_CONFIG = 0x09, NVME_FEAT_WRITE_ATOMIC = 0x0a, NVME_FEAT_ASYNC_EVENT = 0x0b, - NVME_FEAT_SW_PROGRESS = 0x0c, + NVME_FEAT_AUTO_PST = 0x0c, + NVME_FEAT_SW_PROGRESS = 0x80, + NVME_FEAT_HOST_ID = 0x81, + NVME_FEAT_RESV_MASK = 0x82, + NVME_FEAT_RESV_PERSIST = 0x83, NVME_LOG_ERROR = 0x01, NVME_LOG_SMART = 0x02, NVME_LOG_FW_SLOT = 0x03, @@ -440,9 +465,15 @@ enum { NVME_SC_FUSED_MISSING = 0xa, NVME_SC_INVALID_NS = 0xb, NVME_SC_CMD_SEQ_ERROR = 0xc, + NVME_SC_SGL_INVALID_LAST = 0xd, + NVME_SC_SGL_INVALID_COUNT = 0xe, + NVME_SC_SGL_INVALID_DATA = 0xf, + NVME_SC_SGL_INVALID_METADATA = 0x10, + NVME_SC_SGL_INVALID_TYPE = 0x11, NVME_SC_LBA_RANGE = 0x80, NVME_SC_CAP_EXCEEDED = 0x81, NVME_SC_NS_NOT_READY = 0x82, + NVME_SC_RESERVATION_CONFLICT = 0x83, NVME_SC_CQ_INVALID = 0x100, NVME_SC_QID_INVALID = 0x101, NVME_SC_QUEUE_SIZE = 0x102, @@ -454,7 +485,15 @@ enum { NVME_SC_INVALID_VECTOR = 0x108, NVME_SC_INVALID_LOG_PAGE = 0x109, NVME_SC_INVALID_FORMAT = 0x10a, + NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b, + NVME_SC_INVALID_QUEUE = 0x10c, + NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, + NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, + NVME_SC_FEATURE_NOT_PER_NS = 0x10f, + NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, NVME_SC_BAD_ATTRIBUTES = 0x180, + NVME_SC_INVALID_PI = 0x181, + NVME_SC_READ_ONLY = 0x182, NVME_SC_WRITE_FAULT = 0x280, NVME_SC_READ_ERROR = 0x281, NVME_SC_GUARD_CHECK = 0x282, @@ -489,7 +528,7 @@ struct nvme_user_io { __u16 appmask; }; -struct nvme_admin_cmd { +struct nvme_passthru_cmd { __u8 opcode; __u8 flags; __u16 rsvd1; @@ -510,8 +549,11 @@ struct nvme_admin_cmd { __u32 result; }; +#define nvme_admin_cmd nvme_passthru_cmd + #define NVME_IOCTL_ID _IO('N', 0x40) #define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd) #define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io) +#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd) #endif /* _UAPI_LINUX_NVME_H */ diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 435eabc5ffaa..3a6dcaa359b7 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -157,6 +157,11 @@ enum ovs_packet_cmd { * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an * %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content * specified there. + * @OVS_PACKET_ATTR_EGRESS_TUN_KEY: Present for an %OVS_PACKET_CMD_ACTION + * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an + * %OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute, which is sent only if the + * output port is actually a tunnel port. Contains the output tunnel key + * extracted from the packet as nested %OVS_TUNNEL_KEY_ATTR_* attributes. * * These attributes follow the &struct ovs_header within the Generic Netlink * payload for %OVS_PACKET_* commands. @@ -167,6 +172,8 @@ enum ovs_packet_attr { OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */ OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ + OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_* + attributes. */ __OVS_PACKET_ATTR_MAX }; @@ -293,6 +300,9 @@ enum ovs_key_attr { OVS_KEY_ATTR_DP_HASH, /* u32 hash value. Value 0 indicates the hash is not computed by the datapath. */ OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */ + OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls. + * The implementation may restrict + * the accepted length of the array. */ #ifdef __KERNEL__ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */ @@ -312,6 +322,8 @@ enum ovs_tunnel_key_attr { OVS_TUNNEL_KEY_ATTR_CSUM, /* No argument. CSUM packet. */ OVS_TUNNEL_KEY_ATTR_OAM, /* No argument. OAM frame. */ OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, /* Array of Geneve options. */ + OVS_TUNNEL_KEY_ATTR_TP_SRC, /* be16 src Transport Port. */ + OVS_TUNNEL_KEY_ATTR_TP_DST, /* be16 dst Transport Port. */ __OVS_TUNNEL_KEY_ATTR_MAX }; @@ -340,6 +352,10 @@ struct ovs_key_ethernet { __u8 eth_dst[ETH_ALEN]; }; +struct ovs_key_mpls { + __be32 mpls_lse; +}; + struct ovs_key_ipv4 { __be32 ipv4_src; __be32 ipv4_dst; @@ -393,9 +409,9 @@ struct ovs_key_arp { }; struct ovs_key_nd { - __u32 nd_target[4]; - __u8 nd_sll[ETH_ALEN]; - __u8 nd_tll[ETH_ALEN]; + __be32 nd_target[4]; + __u8 nd_sll[ETH_ALEN]; + __u8 nd_tll[ETH_ALEN]; }; /** @@ -441,6 +457,8 @@ enum ovs_flow_attr { OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */ OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */ OVS_FLOW_ATTR_MASK, /* Sequence of OVS_KEY_ATTR_* attributes. */ + OVS_FLOW_ATTR_PROBE, /* Flow operation is a feature probe, error + * logging should be suppressed. */ __OVS_FLOW_ATTR_MAX }; @@ -473,17 +491,34 @@ enum ovs_sample_attr { * message should be sent. Required. * @OVS_USERSPACE_ATTR_USERDATA: If present, its variable-length argument is * copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA. + * @OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: If present, u32 output port to get + * tunnel info. */ enum ovs_userspace_attr { OVS_USERSPACE_ATTR_UNSPEC, OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */ OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */ + OVS_USERSPACE_ATTR_EGRESS_TUN_PORT, /* Optional, u32 output port + * to get tunnel info. */ __OVS_USERSPACE_ATTR_MAX }; #define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) /** + * struct ovs_action_push_mpls - %OVS_ACTION_ATTR_PUSH_MPLS action argument. + * @mpls_lse: MPLS label stack entry to push. + * @mpls_ethertype: Ethertype to set in the encapsulating ethernet frame. + * + * The only values @mpls_ethertype should ever be given are %ETH_P_MPLS_UC and + * %ETH_P_MPLS_MC, indicating MPLS unicast or multicast. Other are rejected. + */ +struct ovs_action_push_mpls { + __be32 mpls_lse; + __be16 mpls_ethertype; /* Either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC */ +}; + +/** * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument. * @vlan_tpid: Tag protocol identifier (TPID) to push. * @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set @@ -534,6 +569,15 @@ struct ovs_action_hash { * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet. * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in * the nested %OVS_SAMPLE_ATTR_* attributes. + * @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the + * top of the packets MPLS label stack. Set the ethertype of the + * encapsulating frame to either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC to + * indicate the new packet contents. + * @OVS_ACTION_ATTR_POP_MPLS: Pop an MPLS label stack entry off of the + * packet's MPLS label stack. Set the encapsulating frame's ethertype to + * indicate the new packet contents. This could potentially still be + * %ETH_P_MPLS if the resulting MPLS label stack is not empty. If there + * is no MPLS label stack, as determined by ethertype, no action is taken. * * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all * fields within a header are modifiable, e.g. the IPv4 protocol and fragment @@ -550,6 +594,9 @@ enum ovs_action_attr { OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ OVS_ACTION_ATTR_RECIRC, /* u32 recirc_id. */ OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */ + OVS_ACTION_ATTR_PUSH_MPLS, /* struct ovs_action_push_mpls. */ + OVS_ACTION_ATTR_POP_MPLS, /* __be16 ethertype. */ + __OVS_ACTION_ATTR_MAX }; diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index eb0f1a554d7b..9c9b8b4480cd 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -235,6 +235,7 @@ enum { #define RTPROT_NTK 15 /* Netsukuku */ #define RTPROT_DHCP 16 /* DHCP client */ #define RTPROT_MROUTED 17 /* Multicast daemon */ +#define RTPROT_BABEL 42 /* Babel daemon */ /* rtm_scope diff --git a/include/uapi/linux/sem.h b/include/uapi/linux/sem.h index 541fce03b50c..dd73b908b2f3 100644 --- a/include/uapi/linux/sem.h +++ b/include/uapi/linux/sem.h @@ -63,10 +63,22 @@ struct seminfo { int semaem; }; -#define SEMMNI 128 /* <= IPCMNI max # of semaphore identifiers */ -#define SEMMSL 250 /* <= 8 000 max num of semaphores per id */ +/* + * SEMMNI, SEMMSL and SEMMNS are default values which can be + * modified by sysctl. + * The values has been chosen to be larger than necessary for any + * known configuration. + * + * SEMOPM should not be increased beyond 1000, otherwise there is the + * risk that semop()/semtimedop() fails due to kernel memory fragmentation when + * allocating the sop array. + */ + + +#define SEMMNI 32000 /* <= IPCMNI max # of semaphore identifiers */ +#define SEMMSL 32000 /* <= INT_MAX max num of semaphores per id */ #define SEMMNS (SEMMNI*SEMMSL) /* <= INT_MAX max # of semaphores in system */ -#define SEMOPM 32 /* <= 1 000 max num of ops per semop call */ +#define SEMOPM 500 /* <= 1 000 max num of ops per semop call */ #define SEMVMX 32767 /* <= 32767 semaphore maximum value */ #define SEMAEM SEMVMX /* adjust on exit max value */ diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 16ad8521af6a..c17218094f18 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -54,7 +54,8 @@ #define PORT_ALTR_16550_F32 26 /* Altera 16550 UART with 32 FIFOs */ #define PORT_ALTR_16550_F64 27 /* Altera 16550 UART with 64 FIFOs */ #define PORT_ALTR_16550_F128 28 /* Altera 16550 UART with 128 FIFOs */ -#define PORT_MAX_8250 28 /* max port ID */ +#define PORT_RT2880 29 /* Ralink RT2880 internal UART */ +#define PORT_MAX_8250 29 /* max port ID */ /* * ARM specific type numbers. These are not currently guaranteed diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h index df6c9ab6b0cd..53af3b790129 100644 --- a/include/uapi/linux/serial_reg.h +++ b/include/uapi/linux/serial_reg.h @@ -359,6 +359,7 @@ #define UART_OMAP_SYSC 0x15 /* System configuration register */ #define UART_OMAP_SYSS 0x16 /* System status register */ #define UART_OMAP_WER 0x17 /* Wake-up enable register */ +#define UART_OMAP_TX_LVL 0x1a /* TX FIFO level register */ /* * These are the definitions for the MDR1 register diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index df40137f33dd..b22224100011 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -156,6 +156,7 @@ enum UDP_MIB_RCVBUFERRORS, /* RcvbufErrors */ UDP_MIB_SNDBUFERRORS, /* SndbufErrors */ UDP_MIB_CSUMERRORS, /* InCsumErrors */ + UDP_MIB_IGNOREDMULTI, /* IgnoredMulti */ __UDP_MIB_MAX }; @@ -265,6 +266,10 @@ enum LINUX_MIB_TCPWANTZEROWINDOWADV, /* TCPWantZeroWindowAdv */ LINUX_MIB_TCPSYNRETRANS, /* TCPSynRetrans */ LINUX_MIB_TCPORIGDATASENT, /* TCPOrigDataSent */ + LINUX_MIB_TCPHYSTARTTRAINDETECT, /* TCPHystartTrainDetect */ + LINUX_MIB_TCPHYSTARTTRAINCWND, /* TCPHystartTrainCwnd */ + LINUX_MIB_TCPHYSTARTDELAYDETECT, /* TCPHystartDelayDetect */ + LINUX_MIB_TCPHYSTARTDELAYCWND, /* TCPHystartDelayCwnd */ __LINUX_MIB_MAX }; diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index 43aaba1cc037..0956373b56db 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -153,6 +153,7 @@ enum KERN_MAX_LOCK_DEPTH=74, /* int: rtmutex's maximum lock depth */ KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */ KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ + KERN_PANIC_ON_WARN=77, /* int: call panic() in WARN() functions */ }; diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild index 56f121605c99..b057da2b87a4 100644 --- a/include/uapi/linux/tc_act/Kbuild +++ b/include/uapi/linux/tc_act/Kbuild @@ -7,3 +7,4 @@ header-y += tc_mirred.h header-y += tc_nat.h header-y += tc_pedit.h header-y += tc_skbedit.h +header-y += tc_vlan.h diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h new file mode 100644 index 000000000000..f7b8d448b960 --- /dev/null +++ b/include/uapi/linux/tc_act/tc_vlan.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_TC_VLAN_H +#define __LINUX_TC_VLAN_H + +#include <linux/pkt_cls.h> + +#define TCA_ACT_VLAN 12 + +#define TCA_VLAN_ACT_POP 1 +#define TCA_VLAN_ACT_PUSH 2 + +struct tc_vlan { + tc_gen; + int v_action; +}; + +enum { + TCA_VLAN_UNSPEC, + TCA_VLAN_TM, + TCA_VLAN_PARMS, + TCA_VLAN_PUSH_VLAN_ID, + TCA_VLAN_PUSH_VLAN_PROTOCOL, + __TCA_VLAN_MAX, +}; +#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1) + +#endif diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h new file mode 100644 index 000000000000..ac5535855982 --- /dev/null +++ b/include/uapi/linux/thermal.h @@ -0,0 +1,35 @@ +#ifndef _UAPI_LINUX_THERMAL_H +#define _UAPI_LINUX_THERMAL_H + +#define THERMAL_NAME_LENGTH 20 + +/* Adding event notification support elements */ +#define THERMAL_GENL_FAMILY_NAME "thermal_event" +#define THERMAL_GENL_VERSION 0x01 +#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp" + +/* Events supported by Thermal Netlink */ +enum events { + THERMAL_AUX0, + THERMAL_AUX1, + THERMAL_CRITICAL, + THERMAL_DEV_FAULT, +}; + +/* attributes of thermal_genl_family */ +enum { + THERMAL_GENL_ATTR_UNSPEC, + THERMAL_GENL_ATTR_EVENT, + __THERMAL_GENL_ATTR_MAX, +}; +#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1) + +/* commands supported by the thermal_genl_family */ +enum { + THERMAL_GENL_CMD_UNSPEC, + THERMAL_GENL_CMD_EVENT, + __THERMAL_GENL_CMD_MAX, +}; +#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1) + +#endif /* _UAPI_LINUX_THERMAL_H */ diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h new file mode 100644 index 000000000000..8d723824ad69 --- /dev/null +++ b/include/uapi/linux/tipc_netlink.h @@ -0,0 +1,244 @@ +/* + * Copyright (c) 2014, Ericsson AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LINUX_TIPC_NETLINK_H_ +#define _LINUX_TIPC_NETLINK_H_ + +#define TIPC_GENL_V2_NAME "TIPCv2" +#define TIPC_GENL_V2_VERSION 0x1 + +/* Netlink commands */ +enum { + TIPC_NL_UNSPEC, + TIPC_NL_LEGACY, + TIPC_NL_BEARER_DISABLE, + TIPC_NL_BEARER_ENABLE, + TIPC_NL_BEARER_GET, + TIPC_NL_BEARER_SET, + TIPC_NL_SOCK_GET, + TIPC_NL_PUBL_GET, + TIPC_NL_LINK_GET, + TIPC_NL_LINK_SET, + TIPC_NL_LINK_RESET_STATS, + TIPC_NL_MEDIA_GET, + TIPC_NL_MEDIA_SET, + TIPC_NL_NODE_GET, + TIPC_NL_NET_GET, + TIPC_NL_NET_SET, + TIPC_NL_NAME_TABLE_GET, + + __TIPC_NL_CMD_MAX, + TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1 +}; + +/* Top level netlink attributes */ +enum { + TIPC_NLA_UNSPEC, + TIPC_NLA_BEARER, /* nest */ + TIPC_NLA_SOCK, /* nest */ + TIPC_NLA_PUBL, /* nest */ + TIPC_NLA_LINK, /* nest */ + TIPC_NLA_MEDIA, /* nest */ + TIPC_NLA_NODE, /* nest */ + TIPC_NLA_NET, /* nest */ + TIPC_NLA_NAME_TABLE, /* nest */ + + __TIPC_NLA_MAX, + TIPC_NLA_MAX = __TIPC_NLA_MAX - 1 +}; + +/* Bearer info */ +enum { + TIPC_NLA_BEARER_UNSPEC, + TIPC_NLA_BEARER_NAME, /* string */ + TIPC_NLA_BEARER_PROP, /* nest */ + TIPC_NLA_BEARER_DOMAIN, /* u32 */ + + __TIPC_NLA_BEARER_MAX, + TIPC_NLA_BEARER_MAX = __TIPC_NLA_BEARER_MAX - 1 +}; + +/* Socket info */ +enum { + TIPC_NLA_SOCK_UNSPEC, + TIPC_NLA_SOCK_ADDR, /* u32 */ + TIPC_NLA_SOCK_REF, /* u32 */ + TIPC_NLA_SOCK_CON, /* nest */ + TIPC_NLA_SOCK_HAS_PUBL, /* flag */ + + __TIPC_NLA_SOCK_MAX, + TIPC_NLA_SOCK_MAX = __TIPC_NLA_SOCK_MAX - 1 +}; + +/* Link info */ +enum { + TIPC_NLA_LINK_UNSPEC, + TIPC_NLA_LINK_NAME, /* string */ + TIPC_NLA_LINK_DEST, /* u32 */ + TIPC_NLA_LINK_MTU, /* u32 */ + TIPC_NLA_LINK_BROADCAST, /* flag */ + TIPC_NLA_LINK_UP, /* flag */ + TIPC_NLA_LINK_ACTIVE, /* flag */ + TIPC_NLA_LINK_PROP, /* nest */ + TIPC_NLA_LINK_STATS, /* nest */ + TIPC_NLA_LINK_RX, /* u32 */ + TIPC_NLA_LINK_TX, /* u32 */ + + __TIPC_NLA_LINK_MAX, + TIPC_NLA_LINK_MAX = __TIPC_NLA_LINK_MAX - 1 +}; + +/* Media info */ +enum { + TIPC_NLA_MEDIA_UNSPEC, + TIPC_NLA_MEDIA_NAME, /* string */ + TIPC_NLA_MEDIA_PROP, /* nest */ + + __TIPC_NLA_MEDIA_MAX, + TIPC_NLA_MEDIA_MAX = __TIPC_NLA_MEDIA_MAX - 1 +}; + +/* Node info */ +enum { + TIPC_NLA_NODE_UNSPEC, + TIPC_NLA_NODE_ADDR, /* u32 */ + TIPC_NLA_NODE_UP, /* flag */ + + __TIPC_NLA_NODE_MAX, + TIPC_NLA_NODE_MAX = __TIPC_NLA_NODE_MAX - 1 +}; + +/* Net info */ +enum { + TIPC_NLA_NET_UNSPEC, + TIPC_NLA_NET_ID, /* u32 */ + TIPC_NLA_NET_ADDR, /* u32 */ + + __TIPC_NLA_NET_MAX, + TIPC_NLA_NET_MAX = __TIPC_NLA_NET_MAX - 1 +}; + +/* Name table info */ +enum { + TIPC_NLA_NAME_TABLE_UNSPEC, + TIPC_NLA_NAME_TABLE_PUBL, /* nest */ + + __TIPC_NLA_NAME_TABLE_MAX, + TIPC_NLA_NAME_TABLE_MAX = __TIPC_NLA_NAME_TABLE_MAX - 1 +}; + +/* Publication info */ +enum { + TIPC_NLA_PUBL_UNSPEC, + + TIPC_NLA_PUBL_TYPE, /* u32 */ + TIPC_NLA_PUBL_LOWER, /* u32 */ + TIPC_NLA_PUBL_UPPER, /* u32 */ + TIPC_NLA_PUBL_SCOPE, /* u32 */ + TIPC_NLA_PUBL_NODE, /* u32 */ + TIPC_NLA_PUBL_REF, /* u32 */ + TIPC_NLA_PUBL_KEY, /* u32 */ + + __TIPC_NLA_PUBL_MAX, + TIPC_NLA_PUBL_MAX = __TIPC_NLA_PUBL_MAX - 1 +}; + +/* Nest, connection info */ +enum { + TIPC_NLA_CON_UNSPEC, + + TIPC_NLA_CON_FLAG, /* flag */ + TIPC_NLA_CON_NODE, /* u32 */ + TIPC_NLA_CON_SOCK, /* u32 */ + TIPC_NLA_CON_TYPE, /* u32 */ + TIPC_NLA_CON_INST, /* u32 */ + + __TIPC_NLA_CON_MAX, + TIPC_NLA_CON_MAX = __TIPC_NLA_CON_MAX - 1 +}; + +/* Nest, link propreties. Valid for link, media and bearer */ +enum { + TIPC_NLA_PROP_UNSPEC, + + TIPC_NLA_PROP_PRIO, /* u32 */ + TIPC_NLA_PROP_TOL, /* u32 */ + TIPC_NLA_PROP_WIN, /* u32 */ + + __TIPC_NLA_PROP_MAX, + TIPC_NLA_PROP_MAX = __TIPC_NLA_PROP_MAX - 1 +}; + +/* Nest, statistics info */ +enum { + TIPC_NLA_STATS_UNSPEC, + + TIPC_NLA_STATS_RX_INFO, /* u32 */ + TIPC_NLA_STATS_RX_FRAGMENTS, /* u32 */ + TIPC_NLA_STATS_RX_FRAGMENTED, /* u32 */ + TIPC_NLA_STATS_RX_BUNDLES, /* u32 */ + TIPC_NLA_STATS_RX_BUNDLED, /* u32 */ + TIPC_NLA_STATS_TX_INFO, /* u32 */ + TIPC_NLA_STATS_TX_FRAGMENTS, /* u32 */ + TIPC_NLA_STATS_TX_FRAGMENTED, /* u32 */ + TIPC_NLA_STATS_TX_BUNDLES, /* u32 */ + TIPC_NLA_STATS_TX_BUNDLED, /* u32 */ + TIPC_NLA_STATS_MSG_PROF_TOT, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_CNT, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_TOT, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P0, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P1, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P2, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P3, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P4, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P5, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P6, /* u32 */ + TIPC_NLA_STATS_RX_STATES, /* u32 */ + TIPC_NLA_STATS_RX_PROBES, /* u32 */ + TIPC_NLA_STATS_RX_NACKS, /* u32 */ + TIPC_NLA_STATS_RX_DEFERRED, /* u32 */ + TIPC_NLA_STATS_TX_STATES, /* u32 */ + TIPC_NLA_STATS_TX_PROBES, /* u32 */ + TIPC_NLA_STATS_TX_NACKS, /* u32 */ + TIPC_NLA_STATS_TX_ACKS, /* u32 */ + TIPC_NLA_STATS_RETRANSMITTED, /* u32 */ + TIPC_NLA_STATS_DUPLICATES, /* u32 */ + TIPC_NLA_STATS_LINK_CONGS, /* u32 */ + TIPC_NLA_STATS_MAX_QUEUE, /* u32 */ + TIPC_NLA_STATS_AVG_QUEUE, /* u32 */ + + __TIPC_NLA_STATS_MAX, + TIPC_NLA_STATS_MAX = __TIPC_NLA_STATS_MAX - 1 +}; + +#endif diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h index eefcb483a2c0..fae4864737fa 100644 --- a/include/uapi/linux/tty_flags.h +++ b/include/uapi/linux/tty_flags.h @@ -6,27 +6,31 @@ * shared by the tty_port flags structures. * * Define ASYNCB_* for convenient use with {test,set,clear}_bit. + * + * Bits [0..ASYNCB_LAST_USER] are userspace defined/visible/changeable + * [x] in the bit comments indicates the flag is defunct and no longer used. */ #define ASYNCB_HUP_NOTIFY 0 /* Notify getty on hangups and closes * on the callout port */ #define ASYNCB_FOURPORT 1 /* Set OU1, OUT2 per AST Fourport settings */ #define ASYNCB_SAK 2 /* Secure Attention Key (Orange book) */ -#define ASYNCB_SPLIT_TERMIOS 3 /* Separate termios for dialin/callout */ +#define ASYNCB_SPLIT_TERMIOS 3 /* [x] Separate termios for dialin/callout */ #define ASYNCB_SPD_HI 4 /* Use 56000 instead of 38400 bps */ #define ASYNCB_SPD_VHI 5 /* Use 115200 instead of 38400 bps */ #define ASYNCB_SKIP_TEST 6 /* Skip UART test during autoconfiguration */ #define ASYNCB_AUTO_IRQ 7 /* Do automatic IRQ during * autoconfiguration */ -#define ASYNCB_SESSION_LOCKOUT 8 /* Lock out cua opens based on session */ -#define ASYNCB_PGRP_LOCKOUT 9 /* Lock out cua opens based on pgrp */ -#define ASYNCB_CALLOUT_NOHUP 10 /* Don't do hangups for cua device */ +#define ASYNCB_SESSION_LOCKOUT 8 /* [x] Lock out cua opens based on session */ +#define ASYNCB_PGRP_LOCKOUT 9 /* [x] Lock out cua opens based on pgrp */ +#define ASYNCB_CALLOUT_NOHUP 10 /* [x] Don't do hangups for cua device */ #define ASYNCB_HARDPPS_CD 11 /* Call hardpps when CD goes high */ #define ASYNCB_SPD_SHI 12 /* Use 230400 instead of 38400 bps */ #define ASYNCB_LOW_LATENCY 13 /* Request low latency behaviour */ #define ASYNCB_BUGGY_UART 14 /* This is a buggy UART, skip some safety * checks. Note: can be dangerous! */ -#define ASYNCB_AUTOPROBE 15 /* Port was autoprobed by PCI or PNP code */ -#define ASYNCB_LAST_USER 15 +#define ASYNCB_AUTOPROBE 15 /* [x] Port was autoprobed by PCI/PNP code */ +#define ASYNCB_MAGIC_MULTIPLIER 16 /* Use special CLK or divisor */ +#define ASYNCB_LAST_USER 16 /* Internal flags used only by kernel */ #define ASYNCB_INITIALIZED 31 /* Serial port was initialized */ @@ -57,8 +61,11 @@ #define ASYNC_LOW_LATENCY (1U << ASYNCB_LOW_LATENCY) #define ASYNC_BUGGY_UART (1U << ASYNCB_BUGGY_UART) #define ASYNC_AUTOPROBE (1U << ASYNCB_AUTOPROBE) +#define ASYNC_MAGIC_MULTIPLIER (1U << ASYNCB_MAGIC_MULTIPLIER) #define ASYNC_FLAGS ((1U << (ASYNCB_LAST_USER + 1)) - 1) +#define ASYNC_DEPRECATED (ASYNC_SESSION_LOCKOUT | ASYNC_PGRP_LOCKOUT | \ + ASYNC_CALLOUT_NOHUP | ASYNC_AUTOPROBE) #define ASYNC_USR_MASK (ASYNC_SPD_MASK|ASYNC_CALLOUT_NOHUP| \ ASYNC_LOW_LATENCY) #define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI) diff --git a/include/uapi/linux/v4l2-common.h b/include/uapi/linux/v4l2-common.h index 2f6f8cafe773..15273987093e 100644 --- a/include/uapi/linux/v4l2-common.h +++ b/include/uapi/linux/v4l2-common.h @@ -43,6 +43,8 @@ #define V4L2_SEL_TGT_CROP_DEFAULT 0x0001 /* Cropping bounds */ #define V4L2_SEL_TGT_CROP_BOUNDS 0x0002 +/* Native frame size */ +#define V4L2_SEL_TGT_NATIVE_SIZE 0x0003 /* Current composing area */ #define V4L2_SEL_TGT_COMPOSE 0x0100 /* Default composing area */ diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h index 1445e858854f..26db20647e6f 100644 --- a/include/uapi/linux/v4l2-mediabus.h +++ b/include/uapi/linux/v4l2-mediabus.h @@ -11,122 +11,10 @@ #ifndef __LINUX_V4L2_MEDIABUS_H #define __LINUX_V4L2_MEDIABUS_H +#include <linux/media-bus-format.h> #include <linux/types.h> #include <linux/videodev2.h> -/* - * These pixel codes uniquely identify data formats on the media bus. Mostly - * they correspond to similarly named V4L2_PIX_FMT_* formats, format 0 is - * reserved, V4L2_MBUS_FMT_FIXED shall be used by host-client pairs, where the - * data format is fixed. Additionally, "2X8" means that one pixel is transferred - * in two 8-bit samples, "BE" or "LE" specify in which order those samples are - * transferred over the bus: "LE" means that the least significant bits are - * transferred first, "BE" means that the most significant bits are transferred - * first, and "PADHI" and "PADLO" define which bits - low or high, in the - * incomplete high byte, are filled with padding bits. - * - * The pixel codes are grouped by type, bus_width, bits per component, samples - * per pixel and order of subsamples. Numerical values are sorted using generic - * numerical sort order (8 thus comes before 10). - * - * As their value can't change when a new pixel code is inserted in the - * enumeration, the pixel codes are explicitly given a numerical value. The next - * free values for each category are listed below, update them when inserting - * new pixel codes. - */ -enum v4l2_mbus_pixelcode { - V4L2_MBUS_FMT_FIXED = 0x0001, - - /* RGB - next is 0x100e */ - V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE = 0x1001, - V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE = 0x1002, - V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE = 0x1003, - V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE = 0x1004, - V4L2_MBUS_FMT_BGR565_2X8_BE = 0x1005, - V4L2_MBUS_FMT_BGR565_2X8_LE = 0x1006, - V4L2_MBUS_FMT_RGB565_2X8_BE = 0x1007, - V4L2_MBUS_FMT_RGB565_2X8_LE = 0x1008, - V4L2_MBUS_FMT_RGB666_1X18 = 0x1009, - V4L2_MBUS_FMT_RGB888_1X24 = 0x100a, - V4L2_MBUS_FMT_RGB888_2X12_BE = 0x100b, - V4L2_MBUS_FMT_RGB888_2X12_LE = 0x100c, - V4L2_MBUS_FMT_ARGB8888_1X32 = 0x100d, - - /* YUV (including grey) - next is 0x2024 */ - V4L2_MBUS_FMT_Y8_1X8 = 0x2001, - V4L2_MBUS_FMT_UV8_1X8 = 0x2015, - V4L2_MBUS_FMT_UYVY8_1_5X8 = 0x2002, - V4L2_MBUS_FMT_VYUY8_1_5X8 = 0x2003, - V4L2_MBUS_FMT_YUYV8_1_5X8 = 0x2004, - V4L2_MBUS_FMT_YVYU8_1_5X8 = 0x2005, - V4L2_MBUS_FMT_UYVY8_2X8 = 0x2006, - V4L2_MBUS_FMT_VYUY8_2X8 = 0x2007, - V4L2_MBUS_FMT_YUYV8_2X8 = 0x2008, - V4L2_MBUS_FMT_YVYU8_2X8 = 0x2009, - V4L2_MBUS_FMT_Y10_1X10 = 0x200a, - V4L2_MBUS_FMT_UYVY10_2X10 = 0x2018, - V4L2_MBUS_FMT_VYUY10_2X10 = 0x2019, - V4L2_MBUS_FMT_YUYV10_2X10 = 0x200b, - V4L2_MBUS_FMT_YVYU10_2X10 = 0x200c, - V4L2_MBUS_FMT_Y12_1X12 = 0x2013, - V4L2_MBUS_FMT_UYVY8_1X16 = 0x200f, - V4L2_MBUS_FMT_VYUY8_1X16 = 0x2010, - V4L2_MBUS_FMT_YUYV8_1X16 = 0x2011, - V4L2_MBUS_FMT_YVYU8_1X16 = 0x2012, - V4L2_MBUS_FMT_YDYUYDYV8_1X16 = 0x2014, - V4L2_MBUS_FMT_UYVY10_1X20 = 0x201a, - V4L2_MBUS_FMT_VYUY10_1X20 = 0x201b, - V4L2_MBUS_FMT_YUYV10_1X20 = 0x200d, - V4L2_MBUS_FMT_YVYU10_1X20 = 0x200e, - V4L2_MBUS_FMT_YUV10_1X30 = 0x2016, - V4L2_MBUS_FMT_AYUV8_1X32 = 0x2017, - V4L2_MBUS_FMT_UYVY12_2X12 = 0x201c, - V4L2_MBUS_FMT_VYUY12_2X12 = 0x201d, - V4L2_MBUS_FMT_YUYV12_2X12 = 0x201e, - V4L2_MBUS_FMT_YVYU12_2X12 = 0x201f, - V4L2_MBUS_FMT_UYVY12_1X24 = 0x2020, - V4L2_MBUS_FMT_VYUY12_1X24 = 0x2021, - V4L2_MBUS_FMT_YUYV12_1X24 = 0x2022, - V4L2_MBUS_FMT_YVYU12_1X24 = 0x2023, - - /* Bayer - next is 0x3019 */ - V4L2_MBUS_FMT_SBGGR8_1X8 = 0x3001, - V4L2_MBUS_FMT_SGBRG8_1X8 = 0x3013, - V4L2_MBUS_FMT_SGRBG8_1X8 = 0x3002, - V4L2_MBUS_FMT_SRGGB8_1X8 = 0x3014, - V4L2_MBUS_FMT_SBGGR10_ALAW8_1X8 = 0x3015, - V4L2_MBUS_FMT_SGBRG10_ALAW8_1X8 = 0x3016, - V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8 = 0x3017, - V4L2_MBUS_FMT_SRGGB10_ALAW8_1X8 = 0x3018, - V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8 = 0x300b, - V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8 = 0x300c, - V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8 = 0x3009, - V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8 = 0x300d, - V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE = 0x3003, - V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE = 0x3004, - V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE = 0x3005, - V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE = 0x3006, - V4L2_MBUS_FMT_SBGGR10_1X10 = 0x3007, - V4L2_MBUS_FMT_SGBRG10_1X10 = 0x300e, - V4L2_MBUS_FMT_SGRBG10_1X10 = 0x300a, - V4L2_MBUS_FMT_SRGGB10_1X10 = 0x300f, - V4L2_MBUS_FMT_SBGGR12_1X12 = 0x3008, - V4L2_MBUS_FMT_SGBRG12_1X12 = 0x3010, - V4L2_MBUS_FMT_SGRBG12_1X12 = 0x3011, - V4L2_MBUS_FMT_SRGGB12_1X12 = 0x3012, - - /* JPEG compressed formats - next is 0x4002 */ - V4L2_MBUS_FMT_JPEG_1X8 = 0x4001, - - /* Vendor specific formats - next is 0x5002 */ - - /* S5C73M3 sensor specific interleaved UYVY and JPEG */ - V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8 = 0x5001, - - /* HSV - next is 0x6002 */ - V4L2_MBUS_FMT_AHSV8888_1X32 = 0x6001, -}; - /** * struct v4l2_mbus_framefmt - frame format on the media bus * @width: frame width @@ -134,6 +22,8 @@ enum v4l2_mbus_pixelcode { * @code: data format code (from enum v4l2_mbus_pixelcode) * @field: used interlacing type (from enum v4l2_field) * @colorspace: colorspace of the data (from enum v4l2_colorspace) + * @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding) + * @quantization: quantization of the data (from enum v4l2_quantization) */ struct v4l2_mbus_framefmt { __u32 width; @@ -141,7 +31,108 @@ struct v4l2_mbus_framefmt { __u32 code; __u32 field; __u32 colorspace; - __u32 reserved[7]; + __u16 ycbcr_enc; + __u16 quantization; + __u32 reserved[6]; +}; + +#ifndef __KERNEL__ +/* + * enum v4l2_mbus_pixelcode and its definitions are now deprecated, and + * MEDIA_BUS_FMT_ definitions (defined in media-bus-format.h) should be + * used instead. + * + * New defines should only be added to media-bus-format.h. The + * v4l2_mbus_pixelcode enum is frozen. + */ + +#define V4L2_MBUS_FROM_MEDIA_BUS_FMT(name) \ + V4L2_MBUS_FMT_ ## name = MEDIA_BUS_FMT_ ## name + +enum v4l2_mbus_pixelcode { + V4L2_MBUS_FROM_MEDIA_BUS_FMT(FIXED), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB444_2X8_PADHI_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB444_2X8_PADHI_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB555_2X8_PADHI_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB555_2X8_PADHI_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(BGR565_2X8_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(BGR565_2X8_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB565_2X8_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB565_2X8_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB666_1X18), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_1X24), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(ARGB8888_1X32), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UV8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_1_5X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_1_5X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_1_5X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_1_5X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_2X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_2X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_2X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_2X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY10_2X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY10_2X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV10_2X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU10_2X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y12_1X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YDYUYDYV8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY10_1X20), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY10_1X20), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV10_1X20), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU10_1X20), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUV10_1X30), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(AYUV8_1X32), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY12_2X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_2X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_2X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_2X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY12_1X24), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_1X24), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_1X24), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_1X24), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_ALAW8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_ALAW8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_ALAW8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_ALAW8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_DPCM8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_DPCM8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_DPCM8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_DPCM8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADHI_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADHI_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADLO_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADLO_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR12_1X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG12_1X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG12_1X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB12_1X12), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(JPEG_1X8), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(S5C_UYVY_JPEG_1X8), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(AHSV8888_1X32), }; +#endif /* __KERNEL__ */ #endif diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h index a619cdd300ac..e0a7e3da498a 100644 --- a/include/uapi/linux/v4l2-subdev.h +++ b/include/uapi/linux/v4l2-subdev.h @@ -68,7 +68,7 @@ struct v4l2_subdev_crop { * struct v4l2_subdev_mbus_code_enum - Media bus format enumeration * @pad: pad number, as reported by the media API * @index: format index during enumeration - * @code: format code (from enum v4l2_mbus_pixelcode) + * @code: format code (MEDIA_BUS_FMT_ definitions) */ struct v4l2_subdev_mbus_code_enum { __u32 pad; @@ -81,7 +81,7 @@ struct v4l2_subdev_mbus_code_enum { * struct v4l2_subdev_frame_size_enum - Media bus format enumeration * @pad: pad number, as reported by the media API * @index: format index during enumeration - * @code: format code (from enum v4l2_mbus_pixelcode) + * @code: format code (MEDIA_BUS_FMT_ definitions) */ struct v4l2_subdev_frame_size_enum { __u32 index; @@ -109,7 +109,7 @@ struct v4l2_subdev_frame_interval { * struct v4l2_subdev_frame_interval_enum - Frame interval enumeration * @pad: pad number, as reported by the media API * @index: frame interval index during enumeration - * @code: format code (from enum v4l2_mbus_pixelcode) + * @code: format code (MEDIA_BUS_FMT_ definitions) * @width: frame width in pixels * @height: frame height in pixels * @interval: frame interval in seconds diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 1c2f84fd4d99..d279c1b75cf7 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -178,30 +178,103 @@ enum v4l2_memory { /* see also http://vektor.theorem.ca/graphics/ycbcr/ */ enum v4l2_colorspace { - /* ITU-R 601 -- broadcast NTSC/PAL */ + /* SMPTE 170M: used for broadcast NTSC/PAL SDTV */ V4L2_COLORSPACE_SMPTE170M = 1, - /* 1125-Line (US) HDTV */ + /* Obsolete pre-1998 SMPTE 240M HDTV standard, superseded by Rec 709 */ V4L2_COLORSPACE_SMPTE240M = 2, - /* HD and modern captures. */ + /* Rec.709: used for HDTV */ V4L2_COLORSPACE_REC709 = 3, - /* broken BT878 extents (601, luma range 16-253 instead of 16-235) */ + /* + * Deprecated, do not use. No driver will ever return this. This was + * based on a misunderstanding of the bt878 datasheet. + */ V4L2_COLORSPACE_BT878 = 4, - /* These should be useful. Assume 601 extents. */ + /* + * NTSC 1953 colorspace. This only makes sense when dealing with + * really, really old NTSC recordings. Superseded by SMPTE 170M. + */ V4L2_COLORSPACE_470_SYSTEM_M = 5, + + /* + * EBU Tech 3213 PAL/SECAM colorspace. This only makes sense when + * dealing with really old PAL/SECAM recordings. Superseded by + * SMPTE 170M. + */ V4L2_COLORSPACE_470_SYSTEM_BG = 6, - /* I know there will be cameras that send this. So, this is - * unspecified chromaticities and full 0-255 on each of the - * Y'CbCr components + /* + * Effectively shorthand for V4L2_COLORSPACE_SRGB, V4L2_YCBCR_ENC_601 + * and V4L2_QUANTIZATION_FULL_RANGE. To be used for (Motion-)JPEG. */ V4L2_COLORSPACE_JPEG = 7, - /* For RGB colourspaces, this is probably a good start. */ + /* For RGB colorspaces such as produces by most webcams. */ V4L2_COLORSPACE_SRGB = 8, + + /* AdobeRGB colorspace */ + V4L2_COLORSPACE_ADOBERGB = 9, + + /* BT.2020 colorspace, used for UHDTV. */ + V4L2_COLORSPACE_BT2020 = 10, +}; + +enum v4l2_ycbcr_encoding { + /* + * Mapping of V4L2_YCBCR_ENC_DEFAULT to actual encodings for the + * various colorspaces: + * + * V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M, + * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_ADOBERGB and + * V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601 + * + * V4L2_COLORSPACE_REC709: V4L2_YCBCR_ENC_709 + * + * V4L2_COLORSPACE_SRGB: V4L2_YCBCR_ENC_SYCC + * + * V4L2_COLORSPACE_BT2020: V4L2_YCBCR_ENC_BT2020 + * + * V4L2_COLORSPACE_SMPTE240M: V4L2_YCBCR_ENC_SMPTE240M + */ + V4L2_YCBCR_ENC_DEFAULT = 0, + + /* ITU-R 601 -- SDTV */ + V4L2_YCBCR_ENC_601 = 1, + + /* Rec. 709 -- HDTV */ + V4L2_YCBCR_ENC_709 = 2, + + /* ITU-R 601/EN 61966-2-4 Extended Gamut -- SDTV */ + V4L2_YCBCR_ENC_XV601 = 3, + + /* Rec. 709/EN 61966-2-4 Extended Gamut -- HDTV */ + V4L2_YCBCR_ENC_XV709 = 4, + + /* sYCC (Y'CbCr encoding of sRGB) */ + V4L2_YCBCR_ENC_SYCC = 5, + + /* BT.2020 Non-constant Luminance Y'CbCr */ + V4L2_YCBCR_ENC_BT2020 = 6, + + /* BT.2020 Constant Luminance Y'CbcCrc */ + V4L2_YCBCR_ENC_BT2020_CONST_LUM = 7, + + /* SMPTE 240M -- Obsolete HDTV */ + V4L2_YCBCR_ENC_SMPTE240M = 8, +}; + +enum v4l2_quantization { + /* + * The default for R'G'B' quantization is always full range. For + * Y'CbCr the quantization is always limited range, except for + * SYCC, XV601, XV709 or JPEG: those are full range. + */ + V4L2_QUANTIZATION_DEFAULT = 0, + V4L2_QUANTIZATION_FULL_RANGE = 1, + V4L2_QUANTIZATION_LIM_RANGE = 2, }; enum v4l2_priority { @@ -294,6 +367,8 @@ struct v4l2_pix_format { __u32 colorspace; /* enum v4l2_colorspace */ __u32 priv; /* private data, depends on pixelformat */ __u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */ + __u32 ycbcr_enc; /* enum v4l2_ycbcr_encoding */ + __u32 quantization; /* enum v4l2_quantization */ }; /* Pixel format FOURCC depth Description */ @@ -1249,6 +1324,7 @@ struct v4l2_input { #define V4L2_IN_CAP_DV_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */ #define V4L2_IN_CAP_CUSTOM_TIMINGS V4L2_IN_CAP_DV_TIMINGS /* For compatibility */ #define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */ +#define V4L2_IN_CAP_NATIVE_SIZE 0x00000008 /* Supports setting native size */ /* * V I D E O O U T P U T S @@ -1272,6 +1348,7 @@ struct v4l2_output { #define V4L2_OUT_CAP_DV_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */ #define V4L2_OUT_CAP_CUSTOM_TIMINGS V4L2_OUT_CAP_DV_TIMINGS /* For compatibility */ #define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */ +#define V4L2_OUT_CAP_NATIVE_SIZE 0x00000008 /* Supports setting native size */ /* * C O N T R O L S @@ -1777,6 +1854,8 @@ struct v4l2_plane_pix_format { * @plane_fmt: per-plane information * @num_planes: number of planes for this format * @flags: format flags (V4L2_PIX_FMT_FLAG_*) + * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding + * @quantization: enum v4l2_quantization, colorspace quantization */ struct v4l2_pix_format_mplane { __u32 width; @@ -1788,7 +1867,9 @@ struct v4l2_pix_format_mplane { struct v4l2_plane_pix_format plane_fmt[VIDEO_MAX_PLANES]; __u8 num_planes; __u8 flags; - __u8 reserved[10]; + __u8 ycbcr_enc; + __u8 quantization; + __u8 reserved[8]; } __attribute__ ((packed)); /** diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index 5e26f61b5df5..be40f7059e93 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h @@ -31,6 +31,7 @@ /* The feature bitmap for virtio balloon */ #define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */ #define VIRTIO_BALLOON_F_STATS_VQ 1 /* Memory Stats virtqueue */ +#define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */ /* Size of a PFN in the balloon interface. */ #define VIRTIO_BALLOON_PFN_SHIFT 12 diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h index 9ad67b267584..247c8ba8544a 100644 --- a/include/uapi/linux/virtio_blk.h +++ b/include/uapi/linux/virtio_blk.h @@ -28,6 +28,7 @@ #include <linux/types.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> +#include <linux/virtio_types.h> /* Feature bits */ #define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */ @@ -114,18 +115,18 @@ struct virtio_blk_config { /* This is the first element of the read scatter-gather list. */ struct virtio_blk_outhdr { /* VIRTIO_BLK_T* */ - __u32 type; + __virtio32 type; /* io priority. */ - __u32 ioprio; + __virtio32 ioprio; /* Sector (ie. 512 byte offset) */ - __u64 sector; + __virtio64 sector; }; struct virtio_scsi_inhdr { - __u32 errors; - __u32 data_len; - __u32 sense_len; - __u32 residual; + __virtio32 errors; + __virtio32 data_len; + __virtio32 sense_len; + __virtio32 residual; }; /* And this is the final byte of the write scatter-gather list. */ diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index 3ce768c6910d..a6d0cdeaacd4 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -38,14 +38,16 @@ #define VIRTIO_CONFIG_S_DRIVER 2 /* Driver has used its parts of the config, and is happy */ #define VIRTIO_CONFIG_S_DRIVER_OK 4 +/* Driver has finished configuring features */ +#define VIRTIO_CONFIG_S_FEATURES_OK 8 /* We've given up on this device. */ #define VIRTIO_CONFIG_S_FAILED 0x80 -/* Some virtio feature bits (currently bits 28 through 31) are reserved for the +/* Some virtio feature bits (currently bits 28 through 32) are reserved for the * transport being used (eg. virtio_ring), the rest are per-device feature * bits. */ #define VIRTIO_TRANSPORT_F_START 28 -#define VIRTIO_TRANSPORT_F_END 32 +#define VIRTIO_TRANSPORT_F_END 33 /* Do we get callbacks when the ring is completely used, even if we've * suppressed them? */ @@ -54,4 +56,7 @@ /* Can the device handle any descriptor layout? */ #define VIRTIO_F_ANY_LAYOUT 27 +/* v1.0 compliant. */ +#define VIRTIO_F_VERSION_1 32 + #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ diff --git a/include/uapi/linux/virtio_console.h b/include/uapi/linux/virtio_console.h index ba260dd0b33a..b7fb108c9310 100644 --- a/include/uapi/linux/virtio_console.h +++ b/include/uapi/linux/virtio_console.h @@ -32,6 +32,7 @@ #ifndef _UAPI_LINUX_VIRTIO_CONSOLE_H #define _UAPI_LINUX_VIRTIO_CONSOLE_H #include <linux/types.h> +#include <linux/virtio_types.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> @@ -58,9 +59,9 @@ struct virtio_console_config { * particular port. */ struct virtio_console_control { - __u32 id; /* Port number */ - __u16 event; /* The kind of control event (see below) */ - __u16 value; /* Extra information for the key */ + __virtio32 id; /* Port number */ + __virtio16 event; /* The kind of control event (see below) */ + __virtio16 value; /* Extra information for the key */ }; /* Some events for control messages */ diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index 172a7f00780c..b5f1677b291c 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -28,6 +28,7 @@ #include <linux/types.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> +#include <linux/virtio_types.h> #include <linux/if_ether.h> /* The feature bitmap for virtio net */ @@ -84,17 +85,17 @@ struct virtio_net_hdr { #define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP #define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set __u8 gso_type; - __u16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */ - __u16 gso_size; /* Bytes to append to hdr_len per frame */ - __u16 csum_start; /* Position to start checksumming from */ - __u16 csum_offset; /* Offset after that to place checksum */ + __virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */ + __virtio16 gso_size; /* Bytes to append to hdr_len per frame */ + __virtio16 csum_start; /* Position to start checksumming from */ + __virtio16 csum_offset; /* Offset after that to place checksum */ }; /* This is the version of the header to use when the MRG_RXBUF * feature has been negotiated. */ struct virtio_net_hdr_mrg_rxbuf { struct virtio_net_hdr hdr; - __u16 num_buffers; /* Number of merged rx buffers */ + __virtio16 num_buffers; /* Number of merged rx buffers */ }; /* @@ -149,7 +150,7 @@ typedef __u8 virtio_net_ctrl_ack; * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available. */ struct virtio_net_ctrl_mac { - __u32 entries; + __virtio32 entries; __u8 macs[][ETH_ALEN]; } __attribute__((packed)); @@ -193,7 +194,7 @@ struct virtio_net_ctrl_mac { * specified. */ struct virtio_net_ctrl_mq { - __u16 virtqueue_pairs; + __virtio16 virtqueue_pairs; }; #define VIRTIO_NET_CTRL_MQ 4 diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index e5ec1caab82a..35b552c7f330 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -41,6 +41,8 @@ #include <linux/virtio_config.h> +#ifndef VIRTIO_PCI_NO_LEGACY + /* A 32-bit r/o bitmask of the features supported by the host */ #define VIRTIO_PCI_HOST_FEATURES 0 @@ -67,16 +69,11 @@ * a read-and-acknowledge. */ #define VIRTIO_PCI_ISR 19 -/* The bit of the ISR which indicates a device configuration change. */ -#define VIRTIO_PCI_ISR_CONFIG 0x2 - /* MSI-X registers: only enabled if MSI-X is enabled. */ /* A 16-bit vector for configuration changes. */ #define VIRTIO_MSI_CONFIG_VECTOR 20 /* A 16-bit vector for selected queue notifications. */ #define VIRTIO_MSI_QUEUE_VECTOR 22 -/* Vector value used to disable MSI for queue */ -#define VIRTIO_MSI_NO_VECTOR 0xffff /* The remaining space is defined by each driver as the per-driver * configuration space */ @@ -94,4 +91,12 @@ /* The alignment to use between consumer and producer parts of vring. * x86 pagesize again. */ #define VIRTIO_PCI_VRING_ALIGN 4096 + +#endif /* VIRTIO_PCI_NO_LEGACY */ + +/* The bit of the ISR which indicates a device configuration change. */ +#define VIRTIO_PCI_ISR_CONFIG 0x2 +/* Vector value used to disable MSI for queue */ +#define VIRTIO_MSI_NO_VECTOR 0xffff + #endif diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index a99f9b7caa67..61c818a7fe70 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h @@ -32,6 +32,7 @@ * * Copyright Rusty Russell IBM Corporation 2007. */ #include <linux/types.h> +#include <linux/virtio_types.h> /* This marks a buffer as continuing via the next field. */ #define VRING_DESC_F_NEXT 1 @@ -61,32 +62,32 @@ /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ struct vring_desc { /* Address (guest-physical). */ - __u64 addr; + __virtio64 addr; /* Length. */ - __u32 len; + __virtio32 len; /* The flags as indicated above. */ - __u16 flags; + __virtio16 flags; /* We chain unused descriptors via this, too */ - __u16 next; + __virtio16 next; }; struct vring_avail { - __u16 flags; - __u16 idx; - __u16 ring[]; + __virtio16 flags; + __virtio16 idx; + __virtio16 ring[]; }; /* u32 is used here for ids for padding reasons. */ struct vring_used_elem { /* Index of start of used descriptor chain. */ - __u32 id; + __virtio32 id; /* Total length of the descriptor chain which was used (written to) */ - __u32 len; + __virtio32 len; }; struct vring_used { - __u16 flags; - __u16 idx; + __virtio16 flags; + __virtio16 idx; struct vring_used_elem ring[]; }; @@ -109,25 +110,25 @@ struct vring { * struct vring_desc desc[num]; * * // A ring of available descriptor heads with free-running index. - * __u16 avail_flags; - * __u16 avail_idx; - * __u16 available[num]; - * __u16 used_event_idx; + * __virtio16 avail_flags; + * __virtio16 avail_idx; + * __virtio16 available[num]; + * __virtio16 used_event_idx; * * // Padding to the next align boundary. * char pad[]; * * // A ring of used descriptor heads with free-running index. - * __u16 used_flags; - * __u16 used_idx; + * __virtio16 used_flags; + * __virtio16 used_idx; * struct vring_used_elem used[num]; - * __u16 avail_event_idx; + * __virtio16 avail_event_idx; * }; */ /* We publish the used event index at the end of the available ring, and vice * versa. They are at the end for backwards compatibility. */ #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) -#define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num]) +#define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num]) static inline void vring_init(struct vring *vr, unsigned int num, void *p, unsigned long align) @@ -135,15 +136,15 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p, vr->num = num; vr->desc = p; vr->avail = p + num*sizeof(struct vring_desc); - vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__u16) + vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16) + align-1) & ~(align - 1)); } static inline unsigned vring_size(unsigned int num, unsigned long align) { - return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num) + return ((sizeof(struct vring_desc) * num + sizeof(__virtio16) * (3 + num) + align - 1) & ~(align - 1)) - + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num; + + sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num; } /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ diff --git a/include/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h index de429d1f4357..42b9370771b0 100644 --- a/include/linux/virtio_scsi.h +++ b/include/uapi/linux/virtio_scsi.h @@ -27,83 +27,85 @@ #ifndef _LINUX_VIRTIO_SCSI_H #define _LINUX_VIRTIO_SCSI_H +#include <linux/virtio_types.h> + #define VIRTIO_SCSI_CDB_SIZE 32 #define VIRTIO_SCSI_SENSE_SIZE 96 /* SCSI command request, followed by data-out */ struct virtio_scsi_cmd_req { - u8 lun[8]; /* Logical Unit Number */ - u64 tag; /* Command identifier */ - u8 task_attr; /* Task attribute */ - u8 prio; /* SAM command priority field */ - u8 crn; - u8 cdb[VIRTIO_SCSI_CDB_SIZE]; -} __packed; + __u8 lun[8]; /* Logical Unit Number */ + __virtio64 tag; /* Command identifier */ + __u8 task_attr; /* Task attribute */ + __u8 prio; /* SAM command priority field */ + __u8 crn; + __u8 cdb[VIRTIO_SCSI_CDB_SIZE]; +} __attribute__((packed)); /* SCSI command request, followed by protection information */ struct virtio_scsi_cmd_req_pi { - u8 lun[8]; /* Logical Unit Number */ - u64 tag; /* Command identifier */ - u8 task_attr; /* Task attribute */ - u8 prio; /* SAM command priority field */ - u8 crn; - u32 pi_bytesout; /* DataOUT PI Number of bytes */ - u32 pi_bytesin; /* DataIN PI Number of bytes */ - u8 cdb[VIRTIO_SCSI_CDB_SIZE]; -} __packed; + __u8 lun[8]; /* Logical Unit Number */ + __virtio64 tag; /* Command identifier */ + __u8 task_attr; /* Task attribute */ + __u8 prio; /* SAM command priority field */ + __u8 crn; + __virtio32 pi_bytesout; /* DataOUT PI Number of bytes */ + __virtio32 pi_bytesin; /* DataIN PI Number of bytes */ + __u8 cdb[VIRTIO_SCSI_CDB_SIZE]; +} __attribute__((packed)); /* Response, followed by sense data and data-in */ struct virtio_scsi_cmd_resp { - u32 sense_len; /* Sense data length */ - u32 resid; /* Residual bytes in data buffer */ - u16 status_qualifier; /* Status qualifier */ - u8 status; /* Command completion status */ - u8 response; /* Response values */ - u8 sense[VIRTIO_SCSI_SENSE_SIZE]; -} __packed; + __virtio32 sense_len; /* Sense data length */ + __virtio32 resid; /* Residual bytes in data buffer */ + __virtio16 status_qualifier; /* Status qualifier */ + __u8 status; /* Command completion status */ + __u8 response; /* Response values */ + __u8 sense[VIRTIO_SCSI_SENSE_SIZE]; +} __attribute__((packed)); /* Task Management Request */ struct virtio_scsi_ctrl_tmf_req { - u32 type; - u32 subtype; - u8 lun[8]; - u64 tag; -} __packed; + __virtio32 type; + __virtio32 subtype; + __u8 lun[8]; + __virtio64 tag; +} __attribute__((packed)); struct virtio_scsi_ctrl_tmf_resp { - u8 response; -} __packed; + __u8 response; +} __attribute__((packed)); /* Asynchronous notification query/subscription */ struct virtio_scsi_ctrl_an_req { - u32 type; - u8 lun[8]; - u32 event_requested; -} __packed; + __virtio32 type; + __u8 lun[8]; + __virtio32 event_requested; +} __attribute__((packed)); struct virtio_scsi_ctrl_an_resp { - u32 event_actual; - u8 response; -} __packed; + __virtio32 event_actual; + __u8 response; +} __attribute__((packed)); struct virtio_scsi_event { - u32 event; - u8 lun[8]; - u32 reason; -} __packed; + __virtio32 event; + __u8 lun[8]; + __virtio32 reason; +} __attribute__((packed)); struct virtio_scsi_config { - u32 num_queues; - u32 seg_max; - u32 max_sectors; - u32 cmd_per_lun; - u32 event_info_size; - u32 sense_size; - u32 cdb_size; - u16 max_channel; - u16 max_target; - u32 max_lun; -} __packed; + __u32 num_queues; + __u32 seg_max; + __u32 max_sectors; + __u32 cmd_per_lun; + __u32 event_info_size; + __u32 sense_size; + __u32 cdb_size; + __u16 max_channel; + __u16 max_target; + __u32 max_lun; +} __attribute__((packed)); /* Feature Bits */ #define VIRTIO_SCSI_F_INOUT 0 diff --git a/include/uapi/linux/virtio_types.h b/include/uapi/linux/virtio_types.h new file mode 100644 index 000000000000..e845e8c4cbee --- /dev/null +++ b/include/uapi/linux/virtio_types.h @@ -0,0 +1,46 @@ +#ifndef _UAPI_LINUX_VIRTIO_TYPES_H +#define _UAPI_LINUX_VIRTIO_TYPES_H +/* Type definitions for virtio implementations. + * + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Copyright (C) 2014 Red Hat, Inc. + * Author: Michael S. Tsirkin <mst@redhat.com> + */ +#include <linux/types.h> + +/* + * __virtio{16,32,64} have the following meaning: + * - __u{16,32,64} for virtio devices in legacy mode, accessed in native endian + * - __le{16,32,64} for standard-compliant virtio devices + */ + +typedef __u16 __bitwise__ __virtio16; +typedef __u32 __bitwise__ __virtio32; +typedef __u64 __bitwise__ __virtio64; + +#endif /* _UAPI_LINUX_VIRTIO_TYPES_H */ diff --git a/include/uapi/linux/vt.h b/include/uapi/linux/vt.h index 4b59a26799a3..978578bd1895 100644 --- a/include/uapi/linux/vt.h +++ b/include/uapi/linux/vt.h @@ -84,7 +84,4 @@ struct vt_setactivate { #define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */ - -#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1) - #endif /* _UAPI_LINUX_VT_H */ diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 26daf55ff76e..4275b961bf60 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -90,8 +90,9 @@ enum { }; enum { + IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE, IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, - IB_USER_VERBS_EX_CMD_DESTROY_FLOW + IB_USER_VERBS_EX_CMD_DESTROY_FLOW, }; /* @@ -201,6 +202,32 @@ struct ib_uverbs_query_device_resp { __u8 reserved[4]; }; +enum { + IB_USER_VERBS_EX_QUERY_DEVICE_ODP = 1ULL << 0, +}; + +struct ib_uverbs_ex_query_device { + __u32 comp_mask; + __u32 reserved; +}; + +struct ib_uverbs_odp_caps { + __u64 general_caps; + struct { + __u32 rc_odp_caps; + __u32 uc_odp_caps; + __u32 ud_odp_caps; + } per_transport_caps; + __u32 reserved; +}; + +struct ib_uverbs_ex_query_device_resp { + struct ib_uverbs_query_device_resp base; + __u32 comp_mask; + __u32 reserved; + struct ib_uverbs_odp_caps odp_caps; +}; + struct ib_uverbs_query_port { __u64 response; __u8 port_num; diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index 941d32f007dc..1f23cd635957 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -96,9 +96,10 @@ enum { SNDRV_HWDEP_IFACE_FW_DICE, /* TC DICE FireWire device */ SNDRV_HWDEP_IFACE_FW_FIREWORKS, /* Echo Audio Fireworks based device */ SNDRV_HWDEP_IFACE_FW_BEBOB, /* BridgeCo BeBoB based device */ + SNDRV_HWDEP_IFACE_FW_OXFW, /* Oxford OXFW970/971 based device */ /* Don't forget to change the following: */ - SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_BEBOB + SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_OXFW }; struct snd_hwdep_info { diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h index 1964026b5e09..22ed8cb7800b 100644 --- a/include/uapi/sound/compress_offload.h +++ b/include/uapi/sound/compress_offload.h @@ -32,7 +32,7 @@ #define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2) /** - * struct snd_compressed_buffer: compressed buffer + * struct snd_compressed_buffer - compressed buffer * @fragment_size: size of buffer fragment in bytes * @fragments: number of such fragments */ @@ -42,7 +42,7 @@ struct snd_compressed_buffer { } __attribute__((packed, aligned(4))); /** - * struct snd_compr_params: compressed stream params + * struct snd_compr_params - compressed stream params * @buffer: buffer description * @codec: codec parameters * @no_wake_mode: dont wake on fragment elapsed @@ -54,7 +54,7 @@ struct snd_compr_params { } __attribute__((packed, aligned(4))); /** - * struct snd_compr_tstamp: timestamp descriptor + * struct snd_compr_tstamp - timestamp descriptor * @byte_offset: Byte offset in ring buffer to DSP * @copied_total: Total number of bytes copied from/to ring buffer to/by DSP * @pcm_frames: Frames decoded or encoded by DSP. This field will evolve by @@ -73,7 +73,7 @@ struct snd_compr_tstamp { } __attribute__((packed, aligned(4))); /** - * struct snd_compr_avail: avail descriptor + * struct snd_compr_avail - avail descriptor * @avail: Number of bytes available in ring buffer for writing/reading * @tstamp: timestamp infomation */ @@ -88,7 +88,7 @@ enum snd_compr_direction { }; /** - * struct snd_compr_caps: caps descriptor + * struct snd_compr_caps - caps descriptor * @codecs: pointer to array of codecs * @direction: direction supported. Of type snd_compr_direction * @min_fragment_size: minimum fragment supported by DSP @@ -110,7 +110,7 @@ struct snd_compr_caps { } __attribute__((packed, aligned(4))); /** - * struct snd_compr_codec_caps: query capability of codec + * struct snd_compr_codec_caps - query capability of codec * @codec: codec for which capability is queried * @num_descriptors: number of codec descriptors * @descriptor: array of codec capability descriptor @@ -122,18 +122,19 @@ struct snd_compr_codec_caps { } __attribute__((packed, aligned(4))); /** + * enum sndrv_compress_encoder * @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the * end of the track * @SNDRV_COMPRESS_ENCODER_DELAY: no of samples inserted by the encoder at the * beginning of the track */ -enum { +enum sndrv_compress_encoder { SNDRV_COMPRESS_ENCODER_PADDING = 1, SNDRV_COMPRESS_ENCODER_DELAY = 2, }; /** - * struct snd_compr_metadata: compressed stream metadata + * struct snd_compr_metadata - compressed stream metadata * @key: key id * @value: key value */ diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h index af4bd136c75d..49122df3b56b 100644 --- a/include/uapi/sound/firewire.h +++ b/include/uapi/sound/firewire.h @@ -55,7 +55,8 @@ union snd_firewire_event { #define SNDRV_FIREWIRE_TYPE_DICE 1 #define SNDRV_FIREWIRE_TYPE_FIREWORKS 2 #define SNDRV_FIREWIRE_TYPE_BEBOB 3 -/* AV/C, RME, MOTU, ... */ +#define SNDRV_FIREWIRE_TYPE_OXFW 4 +/* RME, MOTU, ... */ struct snd_firewire_get_info { unsigned int type; /* SNDRV_FIREWIRE_TYPE_xxx */ diff --git a/include/uapi/sound/hdspm.h b/include/uapi/sound/hdspm.h index d956c3593f65..b357f1a5e29c 100644 --- a/include/uapi/sound/hdspm.h +++ b/include/uapi/sound/hdspm.h @@ -74,14 +74,14 @@ struct hdspm_config { #define SNDRV_HDSPM_IOCTL_GET_CONFIG \ _IOR('H', 0x41, struct hdspm_config) -/** +/* * If there's a TCO (TimeCode Option) board installed, * there are further options and status data available. * The hdspm_ltc structure contains the current SMPTE * timecode and some status information and can be * obtained via SNDRV_HDSPM_IOCTL_GET_LTC or in the * hdspm_status struct. - **/ + */ enum hdspm_ltc_format { format_invalid, @@ -113,11 +113,11 @@ struct hdspm_ltc { #define SNDRV_HDSPM_IOCTL_GET_LTC _IOR('H', 0x46, struct hdspm_ltc) -/** +/* * The status data reflects the device's current state * as determined by the card's configuration and * connection status. - **/ + */ enum hdspm_sync { hdspm_sync_no_lock = 0, @@ -171,9 +171,9 @@ struct hdspm_status { #define SNDRV_HDSPM_IOCTL_GET_STATUS \ _IOR('H', 0x47, struct hdspm_status) -/** +/* * Get information about the card and its add-ons. - **/ + */ #define HDSPM_ADDON_TCO 1 diff --git a/include/video/omapdss.h b/include/video/omapdss.h index 069dfca9549a..6a84498ea513 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h @@ -166,13 +166,6 @@ enum omap_dss_display_state { OMAP_DSS_DISPLAY_ACTIVE, }; -enum omap_dss_audio_state { - OMAP_DSS_AUDIO_DISABLED = 0, - OMAP_DSS_AUDIO_ENABLED, - OMAP_DSS_AUDIO_CONFIGURED, - OMAP_DSS_AUDIO_PLAYING, -}; - struct omap_dss_audio { struct snd_aes_iec958 *iec; struct snd_cea_861_aud_if *cea; @@ -635,19 +628,6 @@ struct omapdss_hdmi_ops { int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); int (*set_infoframe)(struct omap_dss_device *dssdev, const struct hdmi_avi_infoframe *avi); - - /* - * Note: These functions might sleep. Do not call while - * holding a spinlock/readlock. - */ - int (*audio_enable)(struct omap_dss_device *dssdev); - void (*audio_disable)(struct omap_dss_device *dssdev); - bool (*audio_supported)(struct omap_dss_device *dssdev); - int (*audio_config)(struct omap_dss_device *dssdev, - struct omap_dss_audio *audio); - /* Note: These functions may not sleep */ - int (*audio_start)(struct omap_dss_device *dssdev); - void (*audio_stop)(struct omap_dss_device *dssdev); }; struct omapdss_dsi_ops { @@ -783,8 +763,6 @@ struct omap_dss_device { enum omap_dss_display_state state; - enum omap_dss_audio_state audio_state; - /* OMAP DSS output specific fields */ struct list_head list; @@ -795,6 +773,9 @@ struct omap_dss_device { /* output instance */ enum omap_dss_output_id id; + /* the port number in the DT node */ + int port_num; + /* dynamic fields */ struct omap_overlay_manager *manager; @@ -858,24 +839,6 @@ struct omap_dss_driver { int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev, const struct hdmi_avi_infoframe *avi); - - /* - * For display drivers that support audio. This encompasses - * HDMI and DisplayPort at the moment. - */ - /* - * Note: These functions might sleep. Do not call while - * holding a spinlock/readlock. - */ - int (*audio_enable)(struct omap_dss_device *dssdev); - void (*audio_disable)(struct omap_dss_device *dssdev); - bool (*audio_supported)(struct omap_dss_device *dssdev); - int (*audio_config)(struct omap_dss_device *dssdev, - struct omap_dss_audio *audio); - /* Note: These functions may not sleep */ - int (*audio_start)(struct omap_dss_device *dssdev); - void (*audio_stop)(struct omap_dss_device *dssdev); - }; enum omapdss_version omapdss_get_version(void); @@ -918,7 +881,7 @@ int omapdss_register_output(struct omap_dss_device *output); void omapdss_unregister_output(struct omap_dss_device *output); struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id); struct omap_dss_device *omap_dss_find_output(const char *name); -struct omap_dss_device *omap_dss_find_output_by_node(struct device_node *node); +struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port); int omapdss_output_set_device(struct omap_dss_device *out, struct omap_dss_device *dssdev); int omapdss_output_unset_device(struct omap_dss_device *out); diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h index 14334d0161d5..131a6ccdba25 100644 --- a/include/xen/interface/features.h +++ b/include/xen/interface/features.h @@ -53,9 +53,6 @@ /* operation as Dom0 is supported */ #define XENFEAT_dom0 11 -/* Xen also maps grant references at pfn = mfn */ -#define XENFEAT_grant_map_identity 12 - #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h index e40fae9bf11a..bcce56439d64 100644 --- a/include/xen/interface/grant_table.h +++ b/include/xen/interface/grant_table.h @@ -479,6 +479,25 @@ struct gnttab_get_version { DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version); /* + * Issue one or more cache maintenance operations on a portion of a + * page granted to the calling domain by a foreign domain. + */ +#define GNTTABOP_cache_flush 12 +struct gnttab_cache_flush { + union { + uint64_t dev_bus_addr; + grant_ref_t ref; + } a; + uint16_t offset; /* offset from start of grant */ + uint16_t length; /* size within the grant */ +#define GNTTAB_CACHE_CLEAN (1<<0) +#define GNTTAB_CACHE_INVAL (1<<1) +#define GNTTAB_CACHE_SOURCE_GREF (1<<31) + uint32_t op; +}; +DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush); + +/* * Bitfield values for update_pin_status.flags. */ /* Map the grant entry for access by I/O devices. */ |