diff options
Diffstat (limited to 'include')
40 files changed, 833 insertions, 293 deletions
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index f0b44c16e88f..c2bae8da642c 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -82,6 +82,14 @@ int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst); void ahash_free_instance(struct crypto_instance *inst); +int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); + +static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) +{ + return alg->setkey != shash_no_setkey; +} + int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, struct hash_alg_common *alg, struct crypto_instance *inst); diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index a4649c56ca2f..5971577016a2 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -24,6 +24,7 @@ #define __DRM_CONNECTOR_H__ #include <linux/list.h> +#include <linux/llist.h> #include <linux/ctype.h> #include <linux/hdmi.h> #include <drm/drm_mode_object.h> @@ -918,12 +919,13 @@ struct drm_connector { uint16_t tile_h_size, tile_v_size; /** - * @free_work: + * @free_node: * - * Work used only by &drm_connector_iter to be able to clean up a - * connector from any context. + * List used only by &drm_connector_iter to be able to clean up a + * connector from any context, in conjunction with + * &drm_mode_config.connector_free_work. */ - struct work_struct free_work; + struct llist_node free_node; }; #define obj_to_connector(x) container_of(x, struct drm_connector, base) diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 2ec41d032e56..efe6d5a8e834 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -465,6 +465,8 @@ struct edid *drm_get_edid(struct drm_connector *connector, struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, struct i2c_adapter *adapter); struct edid *drm_edid_duplicate(const struct edid *edid); +void drm_reset_display_info(struct drm_connector *connector); +u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid); int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); u8 drm_match_cea_mode(const struct drm_display_mode *to_match); diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index b21e827c5c78..b0ce26d71296 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -27,6 +27,7 @@ #include <linux/types.h> #include <linux/idr.h> #include <linux/workqueue.h> +#include <linux/llist.h> #include <drm/drm_modeset_lock.h> @@ -393,7 +394,7 @@ struct drm_mode_config { /** * @connector_list_lock: Protects @num_connector and - * @connector_list. + * @connector_list and @connector_free_list. */ spinlock_t connector_list_lock; /** @@ -414,6 +415,21 @@ struct drm_mode_config { */ struct list_head connector_list; /** + * @connector_free_list: + * + * List of connector objects linked with &drm_connector.free_head. + * Protected by @connector_list_lock. Used by + * drm_for_each_connector_iter() and + * &struct drm_connector_list_iter to savely free connectors using + * @connector_free_work. + */ + struct llist_head connector_free_list; + /** + * @connector_free_work: Work to clean up @connector_free_list. + */ + struct work_struct connector_free_work; + + /** * @num_encoder: * * Number of encoders on this device. This is invariant over the diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h new file mode 100644 index 000000000000..2c005376ac0e --- /dev/null +++ b/include/dt-bindings/bus/ti-sysc.h @@ -0,0 +1,22 @@ +/* TI sysc interconnect target module defines */ + +/* Generic sysc found on omap2 and later, also known as type1 */ +#define SYSC_OMAP2_CLOCKACTIVITY (3 << 8) +#define SYSC_OMAP2_EMUFREE (1 << 5) +#define SYSC_OMAP2_ENAWAKEUP (1 << 2) +#define SYSC_OMAP2_SOFTRESET (1 << 1) +#define SYSC_OMAP2_AUTOIDLE (1 << 0) + +/* Generic sysc found on omap4 and later, also known as type2 */ +#define SYSC_OMAP4_DMADISABLE (1 << 16) +#define SYSC_OMAP4_FREEEMU (1 << 1) /* Also known as EMUFREE */ +#define SYSC_OMAP4_SOFTRESET (1 << 0) + +/* SmartReflex sysc found on 36xx and later */ +#define SYSC_OMAP3_SR_ENAWAKEUP (1 << 26) + +/* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */ +#define SYSC_IDLE_FORCE 0 +#define SYSC_IDLE_NO 1 +#define SYSC_IDLE_SMART 2 +#define SYSC_IDLE_SMART_WKUP 3 diff --git a/include/dt-bindings/memory/tegra186-mc.h b/include/dt-bindings/memory/tegra186-mc.h new file mode 100644 index 000000000000..64813536aec9 --- /dev/null +++ b/include/dt-bindings/memory/tegra186-mc.h @@ -0,0 +1,111 @@ +#ifndef DT_BINDINGS_MEMORY_TEGRA186_MC_H +#define DT_BINDINGS_MEMORY_TEGRA186_MC_H + +/* special clients */ +#define TEGRA186_SID_INVALID 0x00 +#define TEGRA186_SID_PASSTHROUGH 0x7f + +/* host1x clients */ +#define TEGRA186_SID_HOST1X 0x01 +#define TEGRA186_SID_CSI 0x02 +#define TEGRA186_SID_VIC 0x03 +#define TEGRA186_SID_VI 0x04 +#define TEGRA186_SID_ISP 0x05 +#define TEGRA186_SID_NVDEC 0x06 +#define TEGRA186_SID_NVENC 0x07 +#define TEGRA186_SID_NVJPG 0x08 +#define TEGRA186_SID_NVDISPLAY 0x09 +#define TEGRA186_SID_TSEC 0x0a +#define TEGRA186_SID_TSECB 0x0b +#define TEGRA186_SID_SE 0x0c +#define TEGRA186_SID_SE1 0x0d +#define TEGRA186_SID_SE2 0x0e +#define TEGRA186_SID_SE3 0x0f + +/* GPU clients */ +#define TEGRA186_SID_GPU 0x10 + +/* other SoC clients */ +#define TEGRA186_SID_AFI 0x11 +#define TEGRA186_SID_HDA 0x12 +#define TEGRA186_SID_ETR 0x13 +#define TEGRA186_SID_EQOS 0x14 +#define TEGRA186_SID_UFSHC 0x15 +#define TEGRA186_SID_AON 0x16 +#define TEGRA186_SID_SDMMC4 0x17 +#define TEGRA186_SID_SDMMC3 0x18 +#define TEGRA186_SID_SDMMC2 0x19 +#define TEGRA186_SID_SDMMC1 0x1a +#define TEGRA186_SID_XUSB_HOST 0x1b +#define TEGRA186_SID_XUSB_DEV 0x1c +#define TEGRA186_SID_SATA 0x1d +#define TEGRA186_SID_APE 0x1e +#define TEGRA186_SID_SCE 0x1f + +/* GPC DMA clients */ +#define TEGRA186_SID_GPCDMA_0 0x20 +#define TEGRA186_SID_GPCDMA_1 0x21 +#define TEGRA186_SID_GPCDMA_2 0x22 +#define TEGRA186_SID_GPCDMA_3 0x23 +#define TEGRA186_SID_GPCDMA_4 0x24 +#define TEGRA186_SID_GPCDMA_5 0x25 +#define TEGRA186_SID_GPCDMA_6 0x26 +#define TEGRA186_SID_GPCDMA_7 0x27 + +/* APE DMA clients */ +#define TEGRA186_SID_APE_1 0x28 +#define TEGRA186_SID_APE_2 0x29 + +/* camera RTCPU */ +#define TEGRA186_SID_RCE 0x2a + +/* camera RTCPU on host1x address space */ +#define TEGRA186_SID_RCE_1X 0x2b + +/* APE DMA clients */ +#define TEGRA186_SID_APE_3 0x2c + +/* camera RTCPU running on APE */ +#define TEGRA186_SID_APE_CAM 0x2d +#define TEGRA186_SID_APE_CAM_1X 0x2e + +/* + * The BPMP has its SID value hardcoded in the firmware. Changing it requires + * considerable effort. + */ +#define TEGRA186_SID_BPMP 0x32 + +/* for SMMU tests */ +#define TEGRA186_SID_SMMU_TEST 0x33 + +/* host1x virtualization channels */ +#define TEGRA186_SID_HOST1X_CTX0 0x38 +#define TEGRA186_SID_HOST1X_CTX1 0x39 +#define TEGRA186_SID_HOST1X_CTX2 0x3a +#define TEGRA186_SID_HOST1X_CTX3 0x3b +#define TEGRA186_SID_HOST1X_CTX4 0x3c +#define TEGRA186_SID_HOST1X_CTX5 0x3d +#define TEGRA186_SID_HOST1X_CTX6 0x3e +#define TEGRA186_SID_HOST1X_CTX7 0x3f + +/* host1x command buffers */ +#define TEGRA186_SID_HOST1X_VM0 0x40 +#define TEGRA186_SID_HOST1X_VM1 0x41 +#define TEGRA186_SID_HOST1X_VM2 0x42 +#define TEGRA186_SID_HOST1X_VM3 0x43 +#define TEGRA186_SID_HOST1X_VM4 0x44 +#define TEGRA186_SID_HOST1X_VM5 0x45 +#define TEGRA186_SID_HOST1X_VM6 0x46 +#define TEGRA186_SID_HOST1X_VM7 0x47 + +/* SE data buffers */ +#define TEGRA186_SID_SE_VM0 0x48 +#define TEGRA186_SID_SE_VM1 0x49 +#define TEGRA186_SID_SE_VM2 0x4a +#define TEGRA186_SID_SE_VM3 0x4b +#define TEGRA186_SID_SE_VM4 0x4c +#define TEGRA186_SID_SE_VM5 0x4d +#define TEGRA186_SID_SE_VM6 0x4e +#define TEGRA186_SID_SE_VM7 0x4f + +#endif diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h new file mode 100644 index 000000000000..ad6f55dabd6d --- /dev/null +++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h @@ -0,0 +1,124 @@ +/* + * + * Copyright (c) 2016 BayLibre, SAS. + * Author: Neil Armstrong <narmstrong@baylibre.com> + * + * Copyright (c) 2017 Amlogic, inc. + * Author: Yixun Lan <yixun.lan@amlogic.com> + * + * SPDX-License-Identifier: (GPL-2.0+ OR BSD) + */ + +#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H +#define _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H + +/* RESET0 */ +#define RESET_HIU 0 +#define RESET_PCIE_A 1 +#define RESET_PCIE_B 2 +#define RESET_DDR_TOP 3 +/* 4 */ +#define RESET_VIU 5 +#define RESET_PCIE_PHY 6 +#define RESET_PCIE_APB 7 +/* 8 */ +/* 9 */ +#define RESET_VENC 10 +#define RESET_ASSIST 11 +/* 12 */ +#define RESET_VCBUS 13 +/* 14 */ +/* 15 */ +#define RESET_GIC 16 +#define RESET_CAPB3_DECODE 17 +/* 18-21 */ +#define RESET_SYS_CPU_CAPB3 22 +#define RESET_CBUS_CAPB3 23 +#define RESET_AHB_CNTL 24 +#define RESET_AHB_DATA 25 +#define RESET_VCBUS_CLK81 26 +#define RESET_MMC 27 +/* 28-31 */ +/* RESET1 */ +/* 32 */ +/* 33 */ +#define RESET_USB_OTG 34 +#define RESET_DDR 35 +#define RESET_AO_RESET 36 +/* 37 */ +#define RESET_AHB_SRAM 38 +/* 39 */ +/* 40 */ +#define RESET_DMA 41 +#define RESET_ISA 42 +#define RESET_ETHERNET 43 +/* 44 */ +#define RESET_SD_EMMC_B 45 +#define RESET_SD_EMMC_C 46 +#define RESET_ROM_BOOT 47 +#define RESET_SYS_CPU_0 48 +#define RESET_SYS_CPU_1 49 +#define RESET_SYS_CPU_2 50 +#define RESET_SYS_CPU_3 51 +#define RESET_SYS_CPU_CORE_0 52 +#define RESET_SYS_CPU_CORE_1 53 +#define RESET_SYS_CPU_CORE_2 54 +#define RESET_SYS_CPU_CORE_3 55 +#define RESET_SYS_PLL_DIV 56 +#define RESET_SYS_CPU_AXI 57 +#define RESET_SYS_CPU_L2 58 +#define RESET_SYS_CPU_P 59 +#define RESET_SYS_CPU_MBIST 60 +/* 61-63 */ +/* RESET2 */ +/* 64 */ +/* 65 */ +#define RESET_AUDIO 66 +/* 67 */ +#define RESET_MIPI_HOST 68 +#define RESET_AUDIO_LOCKER 69 +#define RESET_GE2D 70 +/* 71-76 */ +#define RESET_AO_CPU_RESET 77 +/* 78-95 */ +/* RESET3 */ +#define RESET_RING_OSCILLATOR 96 +/* 97-127 */ +/* RESET4 */ +/* 128 */ +/* 129 */ +#define RESET_MIPI_PHY 130 +/* 131-140 */ +#define RESET_VENCL 141 +#define RESET_I2C_MASTER_2 142 +#define RESET_I2C_MASTER_1 143 +/* 144-159 */ +/* RESET5 */ +/* 160-191 */ +/* RESET6 */ +#define RESET_PERIPHS_GENERAL 192 +#define RESET_PERIPHS_SPICC 193 +/* 194 */ +/* 195 */ +#define RESET_PERIPHS_I2C_MASTER_0 196 +/* 197-200 */ +#define RESET_PERIPHS_UART_0 201 +#define RESET_PERIPHS_UART_1 202 +/* 203-204 */ +#define RESET_PERIPHS_SPI_0 205 +#define RESET_PERIPHS_I2C_MASTER_3 206 +/* 207-223 */ +/* RESET7 */ +#define RESET_USB_DDR_0 224 +#define RESET_USB_DDR_1 225 +#define RESET_USB_DDR_2 226 +#define RESET_USB_DDR_3 227 +/* 228 */ +#define RESET_DEVICE_MMC_ARB 229 +/* 230 */ +#define RESET_VID_LOCK 231 +#define RESET_A9_DMC_PIPEL 232 +#define RESET_DMC_VPU_PIPEL 233 +/* 234-255 */ + +#endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 188ed9f65517..52e611ab9a6c 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -220,21 +220,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s /* * Prevent the compiler from merging or refetching reads or writes. The * compiler is also forbidden from reordering successive instances of - * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the - * compiler is aware of some particular ordering. One way to make the - * compiler aware of ordering is to put the two invocations of READ_ONCE, - * WRITE_ONCE or ACCESS_ONCE() in different C statements. + * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some + * particular ordering. One way to make the compiler aware of ordering is to + * put the two invocations of READ_ONCE or WRITE_ONCE in different C + * statements. * - * In contrast to ACCESS_ONCE these two macros will also work on aggregate - * data types like structs or unions. If the size of the accessed data - * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) - * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at - * least two memcpy()s: one for the __builtin_memcpy() and then one for - * the macro doing the copy of variable - '__u' allocated on the stack. + * These two macros will also work on aggregate data types like structs or + * unions. If the size of the accessed data type exceeds the word size of + * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will + * fall back to memcpy(). There's at least two memcpy()s: one for the + * __builtin_memcpy() and then one for the macro doing the copy of variable + * - '__u' allocated on the stack. * * Their two major use cases are: (1) Mediating communication between * process-level code and irq/NMI handlers, all running on the same CPU, - * and (2) Ensuring that the compiler does not fold, spindle, or otherwise + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise * mutilate accesses that either do not require ordering or that interact * with an explicit memory barrier or atomic instruction that provides the * required ordering. @@ -327,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s compiletime_assert(__native_word(t), \ "Need native word sized stores/loads for atomicity.") -/* - * Prevent the compiler from merging or refetching accesses. The compiler - * is also forbidden from reordering successive instances of ACCESS_ONCE(), - * but only when the compiler is aware of some particular ordering. One way - * to make the compiler aware of ordering is to put the two invocations of - * ACCESS_ONCE() in different C statements. - * - * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE - * on a union member will work as long as the size of the member matches the - * size of the union and the size is smaller than word size. - * - * The major use cases of ACCESS_ONCE used to be (1) Mediating communication - * between process-level code and irq/NMI handlers, all running on the same CPU, - * and (2) Ensuring that the compiler does not fold, spindle, or otherwise - * mutilate accesses that either do not require ordering or that interact - * with an explicit memory barrier or atomic instruction that provides the - * required ordering. - * - * If possible use READ_ONCE()/WRITE_ONCE() instead. - */ -#define __ACCESS_ONCE(x) ({ \ - __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ - (volatile typeof(x) *)&(x); }) -#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) - #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/completion.h b/include/linux/completion.h index 0662a417febe..94a59ba7d422 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -10,9 +10,6 @@ */ #include <linux/wait.h> -#ifdef CONFIG_LOCKDEP_COMPLETIONS -#include <linux/lockdep.h> -#endif /* * struct completion - structure used to maintain state for a "completion" @@ -29,58 +26,16 @@ struct completion { unsigned int done; wait_queue_head_t wait; -#ifdef CONFIG_LOCKDEP_COMPLETIONS - struct lockdep_map_cross map; -#endif }; -#ifdef CONFIG_LOCKDEP_COMPLETIONS -static inline void complete_acquire(struct completion *x) -{ - lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_); -} - -static inline void complete_release(struct completion *x) -{ - lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_); -} - -static inline void complete_release_commit(struct completion *x) -{ - lock_commit_crosslock((struct lockdep_map *)&x->map); -} - -#define init_completion_map(x, m) \ -do { \ - lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \ - (m)->name, (m)->key, 0); \ - __init_completion(x); \ -} while (0) - -#define init_completion(x) \ -do { \ - static struct lock_class_key __key; \ - lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \ - "(completion)" #x, \ - &__key, 0); \ - __init_completion(x); \ -} while (0) -#else #define init_completion_map(x, m) __init_completion(x) #define init_completion(x) __init_completion(x) static inline void complete_acquire(struct completion *x) {} static inline void complete_release(struct completion *x) {} static inline void complete_release_commit(struct completion *x) {} -#endif -#ifdef CONFIG_LOCKDEP_COMPLETIONS -#define COMPLETION_INITIALIZER(work) \ - { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \ - STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) } -#else #define COMPLETION_INITIALIZER(work) \ { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } -#endif #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ (*({ init_completion_map(&(work), &(map)); &(work); })) diff --git a/include/linux/cred.h b/include/linux/cred.h index 099058e1178b..631286535d0f 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -83,6 +83,7 @@ extern int set_current_groups(struct group_info *); extern void set_groups(struct cred *, struct group_info *); extern int groups_search(const struct group_info *, kgid_t); extern bool may_setgroups(void); +extern void groups_sort(struct group_info *); /* * The security context of a task diff --git a/include/linux/idr.h b/include/linux/idr.h index 7c3a365f7e12..fa14f834e4ed 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -15,6 +15,7 @@ #include <linux/radix-tree.h> #include <linux/gfp.h> #include <linux/percpu.h> +#include <linux/bug.h> struct idr { struct radix_tree_root idr_rt; diff --git a/include/linux/pti.h b/include/linux/intel-pti.h index b3ea01a3197e..2710d72de3c9 100644 --- a/include/linux/pti.h +++ b/include/linux/intel-pti.h @@ -22,8 +22,8 @@ * interface to write out it's contents for debugging a mobile system. */ -#ifndef PTI_H_ -#define PTI_H_ +#ifndef LINUX_INTEL_PTI_H_ +#define LINUX_INTEL_PTI_H_ /* offset for last dword of any PTI message. Part of MIPI P1149.7 */ #define PTI_LASTDWORD_DTS 0x30 @@ -40,4 +40,4 @@ struct pti_masterchannel *pti_request_masterchannel(u8 type, const char *thread_name); void pti_release_masterchannel(struct pti_masterchannel *mc); -#endif /*PTI_H_*/ +#endif /* LINUX_INTEL_PTI_H_ */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index a842551fe044..2e75dc34bff5 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -158,12 +158,6 @@ struct lockdep_map { int cpu; unsigned long ip; #endif -#ifdef CONFIG_LOCKDEP_CROSSRELEASE - /* - * Whether it's a crosslock. - */ - int cross; -#endif }; static inline void lockdep_copy_map(struct lockdep_map *to, @@ -267,96 +261,9 @@ struct held_lock { unsigned int hardirqs_off:1; unsigned int references:12; /* 32 bits */ unsigned int pin_count; -#ifdef CONFIG_LOCKDEP_CROSSRELEASE - /* - * Generation id. - * - * A value of cross_gen_id will be stored when holding this, - * which is globally increased whenever each crosslock is held. - */ - unsigned int gen_id; -#endif -}; - -#ifdef CONFIG_LOCKDEP_CROSSRELEASE -#define MAX_XHLOCK_TRACE_ENTRIES 5 - -/* - * This is for keeping locks waiting for commit so that true dependencies - * can be added at commit step. - */ -struct hist_lock { - /* - * Id for each entry in the ring buffer. This is used to - * decide whether the ring buffer was overwritten or not. - * - * For example, - * - * |<----------- hist_lock ring buffer size ------->| - * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii - * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii....................... - * - * where 'p' represents an acquisition in process - * context, 'i' represents an acquisition in irq - * context. - * - * In this example, the ring buffer was overwritten by - * acquisitions in irq context, that should be detected on - * rollback or commit. - */ - unsigned int hist_id; - - /* - * Seperate stack_trace data. This will be used at commit step. - */ - struct stack_trace trace; - unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES]; - - /* - * Seperate hlock instance. This will be used at commit step. - * - * TODO: Use a smaller data structure containing only necessary - * data. However, we should make lockdep code able to handle the - * smaller one first. - */ - struct held_lock hlock; }; /* - * To initialize a lock as crosslock, lockdep_init_map_crosslock() should - * be called instead of lockdep_init_map(). - */ -struct cross_lock { - /* - * When more than one acquisition of crosslocks are overlapped, - * we have to perform commit for them based on cross_gen_id of - * the first acquisition, which allows us to add more true - * dependencies. - * - * Moreover, when no acquisition of a crosslock is in progress, - * we should not perform commit because the lock might not exist - * any more, which might cause incorrect memory access. So we - * have to track the number of acquisitions of a crosslock. - */ - int nr_acquire; - - /* - * Seperate hlock instance. This will be used at commit step. - * - * TODO: Use a smaller data structure containing only necessary - * data. However, we should make lockdep code able to handle the - * smaller one first. - */ - struct held_lock hlock; -}; - -struct lockdep_map_cross { - struct lockdep_map map; - struct cross_lock xlock; -}; -#endif - -/* * Initialization, self-test and debugging-output methods: */ extern void lockdep_info(void); @@ -560,37 +467,6 @@ enum xhlock_context_t { XHLOCK_CTX_NR, }; -#ifdef CONFIG_LOCKDEP_CROSSRELEASE -extern void lockdep_init_map_crosslock(struct lockdep_map *lock, - const char *name, - struct lock_class_key *key, - int subclass); -extern void lock_commit_crosslock(struct lockdep_map *lock); - -/* - * What we essencially have to initialize is 'nr_acquire'. Other members - * will be initialized in add_xlock(). - */ -#define STATIC_CROSS_LOCK_INIT() \ - { .nr_acquire = 0,} - -#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \ - { .map.name = (_name), .map.key = (void *)(_key), \ - .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), } - -/* - * To initialize a lockdep_map statically use this macro. - * Note that _name must not be NULL. - */ -#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ - { .name = (_name), .key = (void *)(_key), .cross = 0, } - -extern void crossrelease_hist_start(enum xhlock_context_t c); -extern void crossrelease_hist_end(enum xhlock_context_t c); -extern void lockdep_invariant_state(bool force); -extern void lockdep_init_task(struct task_struct *task); -extern void lockdep_free_task(struct task_struct *task); -#else /* !CROSSRELEASE */ #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) /* * To initialize a lockdep_map statically use this macro. @@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {} static inline void lockdep_invariant_state(bool force) {} static inline void lockdep_init_task(struct task_struct *task) {} static inline void lockdep_free_task(struct task_struct *task) {} -#endif /* CROSSRELEASE */ #ifdef CONFIG_LOCK_STAT diff --git a/include/linux/oom.h b/include/linux/oom.h index 01c91d874a57..5bad038ac012 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -67,6 +67,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk) } /* + * Use this helper if tsk->mm != mm and the victim mm needs a special + * handling. This is guaranteed to stay true after once set. + */ +static inline bool mm_is_oom_victim(struct mm_struct *mm) +{ + return test_bit(MMF_OOM_VICTIM, &mm->flags); +} + +/* * Checks whether a page fault on the given mm is still reliable. * This is no longer true if the oom reaper started to reap the * address space which is reflected by MMF_UNSTABLE flag set in diff --git a/include/linux/pci.h b/include/linux/pci.h index 0403894147a3..c170c9250c8b 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1674,6 +1674,9 @@ static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) { return NULL; } +static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain, + unsigned int bus, unsigned int devfn) +{ return NULL; } static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h new file mode 100644 index 000000000000..1be356330b96 --- /dev/null +++ b/include/linux/platform_data/ti-sysc.h @@ -0,0 +1,86 @@ +#ifndef __TI_SYSC_DATA_H__ +#define __TI_SYSC_DATA_H__ + +enum ti_sysc_module_type { + TI_SYSC_OMAP2, + TI_SYSC_OMAP2_TIMER, + TI_SYSC_OMAP3_SHAM, + TI_SYSC_OMAP3_AES, + TI_SYSC_OMAP4, + TI_SYSC_OMAP4_TIMER, + TI_SYSC_OMAP4_SIMPLE, + TI_SYSC_OMAP34XX_SR, + TI_SYSC_OMAP36XX_SR, + TI_SYSC_OMAP4_SR, + TI_SYSC_OMAP4_MCASP, + TI_SYSC_OMAP4_USB_HOST_FS, +}; + +/** + * struct sysc_regbits - TI OCP_SYSCONFIG register field offsets + * @midle_shift: Offset of the midle bit + * @clkact_shift: Offset of the clockactivity bit + * @sidle_shift: Offset of the sidle bit + * @enwkup_shift: Offset of the enawakeup bit + * @srst_shift: Offset of the softreset bit + * @autoidle_shift: Offset of the autoidle bit + * @dmadisable_shift: Offset of the dmadisable bit + * @emufree_shift; Offset of the emufree bit + * + * Note that 0 is a valid shift, and for ti-sysc.c -ENODEV can be used if a + * feature is not available. + */ +struct sysc_regbits { + s8 midle_shift; + s8 clkact_shift; + s8 sidle_shift; + s8 enwkup_shift; + s8 srst_shift; + s8 autoidle_shift; + s8 dmadisable_shift; + s8 emufree_shift; +}; + +#define SYSC_QUIRK_RESET_STATUS BIT(7) +#define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6) +#define SYSC_QUIRK_NO_RESET_ON_INIT BIT(5) +#define SYSC_QUIRK_OPT_CLKS_NEEDED BIT(4) +#define SYSC_QUIRK_OPT_CLKS_IN_RESET BIT(3) +#define SYSC_QUIRK_16BIT BIT(2) +#define SYSC_QUIRK_UNCACHED BIT(1) +#define SYSC_QUIRK_USE_CLOCKACT BIT(0) + +#define SYSC_NR_IDLEMODES 4 + +/** + * struct sysc_capabilities - capabilities for an interconnect target module + * + * @sysc_mask: bitmask of supported SYSCONFIG register bits + * @regbits: bitmask of SYSCONFIG register bits + * @mod_quirks: bitmask of module specific quirks + */ +struct sysc_capabilities { + const enum ti_sysc_module_type type; + const u32 sysc_mask; + const struct sysc_regbits *regbits; + const u32 mod_quirks; +}; + +/** + * struct sysc_config - configuration for an interconnect target module + * @sysc_val: configured value for sysc register + * @midlemodes: bitmask of supported master idle modes + * @sidlemodes: bitmask of supported master idle modes + * @srst_udelay: optional delay needed after OCP soft reset + * @quirks: bitmask of enabled quirks + */ +struct sysc_config { + u32 sysc_val; + u32 syss_mask; + u8 midlemodes; + u8 sidlemodes; + u8 srst_udelay; + u32 quirks; +}; + +#endif /* __TI_SYSC_DATA_H__ */ diff --git a/include/linux/pm.h b/include/linux/pm.h index 65d39115f06d..492ed473ba7e 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -765,6 +765,7 @@ extern int pm_generic_poweroff_late(struct device *dev); extern int pm_generic_poweroff(struct device *dev); extern void pm_generic_complete(struct device *dev); +extern void dev_pm_skip_next_resume_phases(struct device *dev); extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); #else /* !CONFIG_PM_SLEEP */ diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 37b4bb2545b3..6866df4f31b5 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -101,12 +101,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r) /* Note: callers invoking this in a loop must use a compiler barrier, * for example cpu_relax(). Callers must hold producer_lock. + * Callers are responsible for making sure pointer that is being queued + * points to a valid data. */ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) { if (unlikely(!r->size) || r->queue[r->producer]) return -ENOSPC; + /* Make sure the pointer we are storing points to a valid data. */ + /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ + smp_wmb(); + r->queue[r->producer++] = ptr; if (unlikely(r->producer >= r->size)) r->producer = 0; @@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) if (ptr) __ptr_ring_discard_one(r); + /* Make sure anyone accessing data through the pointer is up to date. */ + /* Pairs with smp_wmb in __ptr_ring_produce. */ + smp_read_barrier_depends(); return ptr; } diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 1fd27d68926b..b401b962afff 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -13,6 +13,9 @@ #ifndef __QCOM_SCM_H #define __QCOM_SCM_H +#include <linux/types.h> +#include <linux/cpumask.h> + #define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) #define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 #define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index d574361943ea..fcbeed4053ef 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -99,6 +99,8 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, struct rb_root *root); +extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, + struct rb_root_cached *root); static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, struct rb_node **rb_link) diff --git a/include/linux/reset.h b/include/linux/reset.h index 4c7871ddf3c6..09732c36f351 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -2,8 +2,10 @@ #ifndef _LINUX_RESET_H_ #define _LINUX_RESET_H_ -#include <linux/device.h> +#include <linux/types.h> +struct device; +struct device_node; struct reset_control; #ifdef CONFIG_RESET_CONTROLLER @@ -20,22 +22,16 @@ struct reset_control *__reset_control_get(struct device *dev, const char *id, int index, bool shared, bool optional); void reset_control_put(struct reset_control *rstc); +int __device_reset(struct device *dev, bool optional); struct reset_control *__devm_reset_control_get(struct device *dev, const char *id, int index, bool shared, bool optional); -int __must_check device_reset(struct device *dev); - struct reset_control *devm_reset_control_array_get(struct device *dev, bool shared, bool optional); struct reset_control *of_reset_control_array_get(struct device_node *np, bool shared, bool optional); -static inline int device_reset_optional(struct device *dev) -{ - return device_reset(dev); -} - #else static inline int reset_control_reset(struct reset_control *rstc) @@ -62,15 +58,9 @@ static inline void reset_control_put(struct reset_control *rstc) { } -static inline int __must_check device_reset(struct device *dev) -{ - WARN_ON(1); - return -ENOTSUPP; -} - -static inline int device_reset_optional(struct device *dev) +static inline int __device_reset(struct device *dev, bool optional) { - return -ENOTSUPP; + return optional ? 0 : -ENOTSUPP; } static inline struct reset_control *__of_reset_control_get( @@ -109,6 +99,16 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional) #endif /* CONFIG_RESET_CONTROLLER */ +static inline int __must_check device_reset(struct device *dev) +{ + return __device_reset(dev, false); +} + +static inline int device_reset_optional(struct device *dev) +{ + return __device_reset(dev, true); +} + /** * reset_control_get_exclusive - Lookup and obtain an exclusive reference * to a reset controller. @@ -127,9 +127,6 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional) static inline struct reset_control * __must_check reset_control_get_exclusive(struct device *dev, const char *id) { -#ifndef CONFIG_RESET_CONTROLLER - WARN_ON(1); -#endif return __reset_control_get(dev, id, 0, false, false); } @@ -275,9 +272,6 @@ static inline struct reset_control * __must_check devm_reset_control_get_exclusive(struct device *dev, const char *id) { -#ifndef CONFIG_RESET_CONTROLLER - WARN_ON(1); -#endif return __devm_reset_control_get(dev, id, 0, false, false); } @@ -350,18 +344,6 @@ devm_reset_control_get_shared_by_index(struct device *dev, int index) * These inline function calls will be removed once all consumers * have been moved over to the new explicit API. */ -static inline struct reset_control *reset_control_get( - struct device *dev, const char *id) -{ - return reset_control_get_exclusive(dev, id); -} - -static inline struct reset_control *reset_control_get_optional( - struct device *dev, const char *id) -{ - return reset_control_get_optional_exclusive(dev, id); -} - static inline struct reset_control *of_reset_control_get( struct device_node *node, const char *id) { diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index cc0072e93e36..857a72ceb794 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -10,9 +10,6 @@ */ typedef struct { arch_rwlock_t raw_lock; -#ifdef CONFIG_GENERIC_LOCKBREAK - unsigned int break_lock; -#endif #ifdef CONFIG_DEBUG_SPINLOCK unsigned int magic, owner_cpu; void *owner; diff --git a/include/linux/sched.h b/include/linux/sched.h index 21991d668d35..d2588263a989 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -849,17 +849,6 @@ struct task_struct { struct held_lock held_locks[MAX_LOCK_DEPTH]; #endif -#ifdef CONFIG_LOCKDEP_CROSSRELEASE -#define MAX_XHLOCKS_NR 64UL - struct hist_lock *xhlocks; /* Crossrelease history locks */ - unsigned int xhlock_idx; - /* For restoring at history boundaries */ - unsigned int xhlock_idx_hist[XHLOCK_CTX_NR]; - unsigned int hist_id; - /* For overwrite check at each context exit */ - unsigned int hist_id_save[XHLOCK_CTX_NR]; -#endif - #ifdef CONFIG_UBSAN unsigned int in_ubsan; #endif @@ -1503,7 +1492,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from) __set_task_comm(tsk, from, false); } -extern char *get_task_comm(char *to, struct task_struct *tsk); +extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); +#define get_task_comm(buf, tsk) ({ \ + BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ + __get_task_comm(buf, sizeof(buf), tsk); \ +}) #ifdef CONFIG_SMP void scheduler_ipi(void); diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index 9c8847395b5e..ec912d01126f 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ +#define MMF_OOM_VICTIM 25 /* mm is the oom victim */ #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h index 12e548938bbb..8e884e0dda0a 100644 --- a/include/linux/soc/brcmstb/brcmstb.h +++ b/include/linux/soc/brcmstb/brcmstb.h @@ -13,12 +13,6 @@ static inline u32 BRCM_REV(u32 reg) } /* - * Bus Interface Unit control register setup, must happen early during boot, - * before SMP is brought up, called by machine entry point. - */ -void brcmstb_biuctrl_init(void); - -/* * Helper functions for getting family or product id from the * SoC driver. */ diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h new file mode 100644 index 000000000000..f4de33654a60 --- /dev/null +++ b/include/linux/soc/qcom/qmi.h @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * Copyright (c) 2017, Linaro Ltd. + */ +#ifndef __QMI_HELPERS_H__ +#define __QMI_HELPERS_H__ + +#include <linux/completion.h> +#include <linux/idr.h> +#include <linux/list.h> +#include <linux/qrtr.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +struct socket; + +/** + * qmi_header - wireformat header of QMI messages + * @type: type of message + * @txn_id: transaction id + * @msg_id: message id + * @msg_len: length of message payload following header + */ +struct qmi_header { + u8 type; + u16 txn_id; + u16 msg_id; + u16 msg_len; +} __packed; + +#define QMI_REQUEST 0 +#define QMI_RESPONSE 2 +#define QMI_INDICATION 4 + +#define QMI_COMMON_TLV_TYPE 0 + +enum qmi_elem_type { + QMI_EOTI, + QMI_OPT_FLAG, + QMI_DATA_LEN, + QMI_UNSIGNED_1_BYTE, + QMI_UNSIGNED_2_BYTE, + QMI_UNSIGNED_4_BYTE, + QMI_UNSIGNED_8_BYTE, + QMI_SIGNED_2_BYTE_ENUM, + QMI_SIGNED_4_BYTE_ENUM, + QMI_STRUCT, + QMI_STRING, +}; + +enum qmi_array_type { + NO_ARRAY, + STATIC_ARRAY, + VAR_LEN_ARRAY, +}; + +/** + * struct qmi_elem_info - describes how to encode a single QMI element + * @data_type: Data type of this element. + * @elem_len: Array length of this element, if an array. + * @elem_size: Size of a single instance of this data type. + * @array_type: Array type of this element. + * @tlv_type: QMI message specific type to identify which element + * is present in an incoming message. + * @offset: Specifies the offset of the first instance of this + * element in the data structure. + * @ei_array: Null-terminated array of @qmi_elem_info to describe nested + * structures. + */ +struct qmi_elem_info { + enum qmi_elem_type data_type; + u32 elem_len; + u32 elem_size; + enum qmi_array_type array_type; + u8 tlv_type; + u32 offset; + struct qmi_elem_info *ei_array; +}; + +#define QMI_RESULT_SUCCESS_V01 0 +#define QMI_RESULT_FAILURE_V01 1 + +#define QMI_ERR_NONE_V01 0 +#define QMI_ERR_MALFORMED_MSG_V01 1 +#define QMI_ERR_NO_MEMORY_V01 2 +#define QMI_ERR_INTERNAL_V01 3 +#define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5 +#define QMI_ERR_INVALID_ID_V01 41 +#define QMI_ERR_ENCODING_V01 58 +#define QMI_ERR_INCOMPATIBLE_STATE_V01 90 +#define QMI_ERR_NOT_SUPPORTED_V01 94 + +/** + * qmi_response_type_v01 - common response header (decoded) + * @result: result of the transaction + * @error: error value, when @result is QMI_RESULT_FAILURE_V01 + */ +struct qmi_response_type_v01 { + u16 result; + u16 error; +}; + +extern struct qmi_elem_info qmi_response_type_v01_ei[]; + +/** + * struct qmi_service - context to track lookup-results + * @service: service type + * @version: version of the @service + * @instance: instance id of the @service + * @node: node of the service + * @port: port of the service + * @priv: handle for client's use + * @list_node: list_head for house keeping + */ +struct qmi_service { + unsigned int service; + unsigned int version; + unsigned int instance; + + unsigned int node; + unsigned int port; + + void *priv; + struct list_head list_node; +}; + +struct qmi_handle; + +/** + * struct qmi_ops - callbacks for qmi_handle + * @new_server: inform client of a new_server lookup-result, returning + * successfully from this call causes the library to call + * @del_server as the service is removed from the + * lookup-result. @priv of the qmi_service can be used by + * the client + * @del_server: inform client of a del_server lookup-result + * @net_reset: inform client that the name service was restarted and + * that and any state needs to be released + * @msg_handler: invoked for incoming messages, allows a client to + * override the usual QMI message handler + * @bye: inform a client that all clients from a node are gone + * @del_client: inform a client that a particular client is gone + */ +struct qmi_ops { + int (*new_server)(struct qmi_handle *qmi, struct qmi_service *svc); + void (*del_server)(struct qmi_handle *qmi, struct qmi_service *svc); + void (*net_reset)(struct qmi_handle *qmi); + void (*msg_handler)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + const void *data, size_t count); + void (*bye)(struct qmi_handle *qmi, unsigned int node); + void (*del_client)(struct qmi_handle *qmi, + unsigned int node, unsigned int port); +}; + +/** + * struct qmi_txn - transaction context + * @qmi: QMI handle this transaction is associated with + * @id: transaction id + * @lock: for synchronization between handler and waiter of messages + * @completion: completion object as the transaction receives a response + * @result: result code for the completed transaction + * @ei: description of the QMI encoded response (optional) + * @dest: destination buffer to decode message into (optional) + */ +struct qmi_txn { + struct qmi_handle *qmi; + + int id; + + struct mutex lock; + struct completion completion; + int result; + + struct qmi_elem_info *ei; + void *dest; +}; + +/** + * struct qmi_msg_handler - description of QMI message handler + * @type: type of message + * @msg_id: message id + * @ei: description of the QMI encoded message + * @decoded_size: size of the decoded object + * @fn: function to invoke as the message is decoded + */ +struct qmi_msg_handler { + unsigned int type; + unsigned int msg_id; + + struct qmi_elem_info *ei; + + size_t decoded_size; + void (*fn)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *decoded); +}; + +/** + * struct qmi_handle - QMI context + * @sock: socket handle + * @sock_lock: synchronization of @sock modifications + * @sq: sockaddr of @sock + * @work: work for handling incoming messages + * @wq: workqueue to post @work on + * @recv_buf: scratch buffer for handling incoming messages + * @recv_buf_size: size of @recv_buf + * @lookups: list of registered lookup requests + * @lookup_results: list of lookup-results advertised to the client + * @services: list of registered services (by this client) + * @ops: reference to callbacks + * @txns: outstanding transactions + * @txn_lock: lock for modifications of @txns + * @handlers: list of handlers for incoming messages + */ +struct qmi_handle { + struct socket *sock; + struct mutex sock_lock; + + struct sockaddr_qrtr sq; + + struct work_struct work; + struct workqueue_struct *wq; + + void *recv_buf; + size_t recv_buf_size; + + struct list_head lookups; + struct list_head lookup_results; + struct list_head services; + + struct qmi_ops ops; + + struct idr txns; + struct mutex txn_lock; + + const struct qmi_msg_handler *handlers; +}; + +int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service, + unsigned int version, unsigned int instance); +int qmi_add_server(struct qmi_handle *qmi, unsigned int service, + unsigned int version, unsigned int instance); + +int qmi_handle_init(struct qmi_handle *qmi, size_t max_msg_len, + const struct qmi_ops *ops, + const struct qmi_msg_handler *handlers); +void qmi_handle_release(struct qmi_handle *qmi); + +ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, int msg_id, size_t len, + struct qmi_elem_info *ei, const void *c_struct); +ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, int msg_id, size_t len, + struct qmi_elem_info *ei, const void *c_struct); +ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + int msg_id, size_t len, struct qmi_elem_info *ei, + const void *c_struct); + +void *qmi_encode_message(int type, unsigned int msg_id, size_t *len, + unsigned int txn_id, struct qmi_elem_info *ei, + const void *c_struct); + +int qmi_decode_message(const void *buf, size_t len, + struct qmi_elem_info *ei, void *c_struct); + +int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn, + struct qmi_elem_info *ei, void *c_struct); +int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout); +void qmi_txn_cancel(struct qmi_txn *txn); + +#endif diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index a39186194cd6..3bf273538840 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -107,16 +107,11 @@ do { \ #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) -#ifdef CONFIG_GENERIC_LOCKBREAK -#define raw_spin_is_contended(lock) ((lock)->break_lock) -#else - #ifdef arch_spin_is_contended #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) #else #define raw_spin_is_contended(lock) (((void)(lock), 0)) #endif /*arch_spin_is_contended*/ -#endif /* * This barrier must provide two things: diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 73548eb13a5d..24b4e6f2c1a2 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -19,9 +19,6 @@ typedef struct raw_spinlock { arch_spinlock_t raw_lock; -#ifdef CONFIG_GENERIC_LOCKBREAK - unsigned int break_lock; -#endif #ifdef CONFIG_DEBUG_SPINLOCK unsigned int magic, owner_cpu; void *owner; diff --git a/include/linux/string.h b/include/linux/string.h index 410ecf17de3c..cfd83eb2f926 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -259,7 +259,10 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) { __kernel_size_t ret; size_t p_size = __builtin_object_size(p, 0); - if (p_size == (size_t)-1) + + /* Work around gcc excess stack consumption issue */ + if (p_size == (size_t)-1 || + (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) return __builtin_strlen(p); ret = strnlen(p, p_size); if (p_size <= ret) diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 230a1ebbf3bc..a2b3dfcee0b5 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -453,4 +453,16 @@ static inline int tee_shm_get_id(struct tee_shm *shm) */ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); +static inline bool tee_param_is_memref(struct tee_param *param) +{ + switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + return true; + default: + return false; + } +} + #endif /*__TEE_DRV_H*/ diff --git a/include/linux/ti-emif-sram.h b/include/linux/ti-emif-sram.h new file mode 100644 index 000000000000..45bc6b376492 --- /dev/null +++ b/include/linux/ti-emif-sram.h @@ -0,0 +1,69 @@ +/* + * TI AM33XX EMIF Routines + * + * Copyright (C) 2016-2017 Texas Instruments Inc. + * Dave Gerlach + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __LINUX_TI_EMIF_H +#define __LINUX_TI_EMIF_H + +#include <linux/kbuild.h> +#include <linux/types.h> +#ifndef __ASSEMBLY__ + +struct emif_regs_amx3 { + u32 emif_sdcfg_val; + u32 emif_timing1_val; + u32 emif_timing2_val; + u32 emif_timing3_val; + u32 emif_ref_ctrl_val; + u32 emif_zqcfg_val; + u32 emif_pmcr_val; + u32 emif_pmcr_shdw_val; + u32 emif_rd_wr_level_ramp_ctrl; + u32 emif_rd_wr_exec_thresh; + u32 emif_cos_config; + u32 emif_priority_to_cos_mapping; + u32 emif_connect_id_serv_1_map; + u32 emif_connect_id_serv_2_map; + u32 emif_ocp_config_val; + u32 emif_lpddr2_nvm_tim; + u32 emif_lpddr2_nvm_tim_shdw; + u32 emif_dll_calib_ctrl_val; + u32 emif_dll_calib_ctrl_val_shdw; + u32 emif_ddr_phy_ctlr_1; + u32 emif_ext_phy_ctrl_vals[120]; +}; + +struct ti_emif_pm_data { + void __iomem *ti_emif_base_addr_virt; + phys_addr_t ti_emif_base_addr_phys; + unsigned long ti_emif_sram_config; + struct emif_regs_amx3 *regs_virt; + phys_addr_t regs_phys; +} __packed __aligned(8); + +struct ti_emif_pm_functions { + u32 save_context; + u32 restore_context; + u32 enter_sr; + u32 exit_sr; + u32 abort_sr; +} __packed __aligned(8); + +struct gen_pool; + +int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst); +int ti_emif_get_mem_type(void); + +#endif +#endif /* __LINUX_TI_EMIF_H */ diff --git a/include/linux/trace.h b/include/linux/trace.h index d24991c1fef3..b95ffb2188ab 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h @@ -18,7 +18,7 @@ */ struct trace_export { struct trace_export __rcu *next; - void (*write)(const void *, unsigned int); + void (*write)(struct trace_export *, const void *, unsigned int); }; int register_ftrace_export(struct trace_export *export); diff --git a/include/net/gue.h b/include/net/gue.h index 2fdb29ca74c2..fdad41469b65 100644 --- a/include/net/gue.h +++ b/include/net/gue.h @@ -44,10 +44,10 @@ struct guehdr { #else #error "Please fix <asm/byteorder.h>" #endif - __u8 proto_ctype; - __u16 flags; + __u8 proto_ctype; + __be16 flags; }; - __u32 word; + __be32 word; }; }; @@ -84,11 +84,10 @@ static inline size_t guehdr_priv_flags_len(__be32 flags) * if there is an unknown standard or private flags, or the options length for * the flags exceeds the options length specific in hlen of the GUE header. */ -static inline int validate_gue_flags(struct guehdr *guehdr, - size_t optlen) +static inline int validate_gue_flags(struct guehdr *guehdr, size_t optlen) { + __be16 flags = guehdr->flags; size_t len; - __be32 flags = guehdr->flags; if (flags & ~GUE_FLAGS_ALL) return 1; @@ -101,12 +100,13 @@ static inline int validate_gue_flags(struct guehdr *guehdr, /* Private flags are last four bytes accounted in * guehdr_flags_len */ - flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV); + __be32 pflags = *(__be32 *)((void *)&guehdr[1] + + len - GUE_LEN_PRIV); - if (flags & ~GUE_PFLAGS_ALL) + if (pflags & ~GUE_PFLAGS_ALL) return 1; - len += guehdr_priv_flags_len(flags); + len += guehdr_priv_flags_len(pflags); if (len > optlen) return 1; } diff --git a/include/net/ip.h b/include/net/ip.h index 9896f46cbbf1..af8addbaa3c1 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -34,6 +34,7 @@ #include <net/flow_dissector.h> #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ +#define IPV4_MIN_MTU 68 /* RFC 791 */ struct sock; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 65d0d25f2648..83a3e47d5845 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -71,6 +71,7 @@ struct Qdisc { * qdisc_tree_decrease_qlen() should stop. */ #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ +#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table __rcu *stab; diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h index 44202ff897fd..233bae954970 100644 --- a/include/soc/tegra/mc.h +++ b/include/soc/tegra/mc.h @@ -51,6 +51,12 @@ struct tegra_smmu_swgroup { unsigned int reg; }; +struct tegra_smmu_group_soc { + const char *name; + const unsigned int *swgroups; + unsigned int num_swgroups; +}; + struct tegra_smmu_soc { const struct tegra_mc_client *clients; unsigned int num_clients; @@ -58,6 +64,9 @@ struct tegra_smmu_soc { const struct tegra_smmu_swgroup *swgroups; unsigned int num_swgroups; + const struct tegra_smmu_group_soc *groups; + unsigned int num_groups; + bool supports_round_robin_arbitration; bool supports_request_limit; diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h index f5024c560d8f..9c4eb33c5a1d 100644 --- a/include/trace/events/preemptirq.h +++ b/include/trace/events/preemptirq.h @@ -56,15 +56,18 @@ DEFINE_EVENT(preemptirq_template, preempt_enable, #include <trace/define_trace.h> -#else /* !CONFIG_PREEMPTIRQ_EVENTS */ +#endif /* !CONFIG_PREEMPTIRQ_EVENTS */ +#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING) #define trace_irq_enable(...) #define trace_irq_disable(...) -#define trace_preempt_enable(...) -#define trace_preempt_disable(...) #define trace_irq_enable_rcuidle(...) #define trace_irq_disable_rcuidle(...) +#endif + +#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT) +#define trace_preempt_enable(...) +#define trace_preempt_disable(...) #define trace_preempt_enable_rcuidle(...) #define trace_preempt_disable_rcuidle(...) - #endif diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index af3cc2f4e1ad..37b5096ae97b 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -256,7 +256,6 @@ struct tc_red_qopt { #define TC_RED_ECN 1 #define TC_RED_HARDDROP 2 #define TC_RED_ADAPTATIVE 4 -#define TC_RED_OFFLOADED 8 }; struct tc_red_xstats { diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index d8b5f80c2ea6..843e29aa3cac 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -557,6 +557,7 @@ enum { TCA_PAD, TCA_DUMP_INVISIBLE, TCA_CHAIN, + TCA_HW_OFFLOAD, __TCA_MAX }; diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h index d41a07afe3fc..4b9eb064d7e7 100644 --- a/include/uapi/linux/tee.h +++ b/include/uapi/linux/tee.h @@ -155,6 +155,13 @@ struct tee_ioctl_buf_data { */ #define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff +/* Meta parameter carrying extra information about the message. */ +#define TEE_IOCTL_PARAM_ATTR_META 0x100 + +/* Mask of all known attr bits */ +#define TEE_IOCTL_PARAM_ATTR_MASK \ + (TEE_IOCTL_PARAM_ATTR_TYPE_MASK | TEE_IOCTL_PARAM_ATTR_META) + /* * Matches TEEC_LOGIN_* in GP TEE Client API * Are only defined for GP compliant TEEs |