diff options
Diffstat (limited to 'include')
661 files changed, 20717 insertions, 6252 deletions
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h index 3a26aa7ead23..6db9a6d40c85 100644 --- a/include/acpi/acoutput.h +++ b/include/acpi/acoutput.h @@ -73,7 +73,8 @@ #define ACPI_LV_RESOURCES 0x00010000 #define ACPI_LV_USER_REQUESTS 0x00020000 #define ACPI_LV_PACKAGE 0x00040000 -#define ACPI_LV_VERBOSITY1 0x0007FF40 | ACPI_LV_ALL_EXCEPTIONS +#define ACPI_LV_EVALUATION 0x00080000 +#define ACPI_LV_VERBOSITY1 0x000FFF40 | ACPI_LV_ALL_EXCEPTIONS /* Trace verbosity level 2 [Function tracing and memory allocation] */ @@ -141,6 +142,7 @@ #define ACPI_DB_INTERRUPTS ACPI_DEBUG_LEVEL (ACPI_LV_INTERRUPTS) #define ACPI_DB_USER_REQUESTS ACPI_DEBUG_LEVEL (ACPI_LV_USER_REQUESTS) #define ACPI_DB_PACKAGE ACPI_DEBUG_LEVEL (ACPI_LV_PACKAGE) +#define ACPI_DB_EVALUATION ACPI_DEBUG_LEVEL (ACPI_LV_EVALUATION) #define ACPI_DB_MUTEX ACPI_DEBUG_LEVEL (ACPI_LV_MUTEX) #define ACPI_DB_EVENTS ACPI_DEBUG_LEVEL (ACPI_LV_EVENTS) @@ -148,7 +150,7 @@ /* Defaults for debug_level, debug and normal */ -#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR) +#define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR) #define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR) #define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index 14499757338f..de1804aeaf69 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h @@ -88,7 +88,14 @@ int acpi_pci_link_free_irq(acpi_handle handle); struct pci_bus; +#ifdef CONFIG_PCI struct pci_dev *acpi_get_pci_dev(acpi_handle); +#else +static inline struct pci_dev *acpi_get_pci_dev(acpi_handle handle) +{ + return NULL; +} +#endif /* Arch-defined function to add a bus to the system */ diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 0c19b68bf060..7aa38b648564 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -12,7 +12,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20181003 +#define ACPI_CA_VERSION 0x20181213 #include <acpi/acconfig.h> #include <acpi/actypes.h> diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index 517addd6b11d..0a977eca0a74 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h @@ -38,6 +38,7 @@ #define ACPI_SIG_XSDT "XSDT" /* Extended System Description Table */ #define ACPI_SIG_SSDT "SSDT" /* Secondary System Description Table */ #define ACPI_RSDP_NAME "RSDP" /* Short name for RSDP, not signature */ +#define ACPI_OEM_NAME "OEM" /* Short name for OEM, not signature */ /* * All tables and structures must be byte-packed to match the ACPI diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index 501f341d1d92..ea1ca49c9c1b 100644 --- a/include/acpi/actbl3.h +++ b/include/acpi/actbl3.h @@ -365,6 +365,29 @@ struct acpi_table_tcpa_server { * ******************************************************************************/ +/* Revision 3 */ + +struct acpi_table_tpm23 { + struct acpi_table_header header; /* Common ACPI table header */ + u32 reserved; + u64 control_address; + u32 start_method; +}; + +/* Value for start_method above */ + +#define ACPI_TPM23_ACPI_START_METHOD 2 + +/* + * Optional trailer for revision 3. If start method is 2, there is a 4 byte + * reserved area of all zeros. + */ +struct acpi_tmp23_trailer { + u32 reserved; +}; + +/* Revision 4 */ + struct acpi_table_tpm2 { struct acpi_table_header header; /* Common ACPI table header */ u16 platform_class; diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 66ceb12ebc63..2590627dbfcc 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -527,6 +527,10 @@ typedef u64 acpi_integer; #define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8)) #define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8)) +/* Support for OEMx signature (x can be any character) */ +#define ACPI_IS_OEM_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_OEM_NAME, 3) &&\ + strnlen (a, ACPI_NAME_SIZE) == ACPI_NAME_SIZE) + /* * Algorithm to obtain access bit width. * Can be used with access_width of struct acpi_generic_address and access_size of @@ -1273,6 +1277,8 @@ typedef enum { #define ACPI_OSI_WIN_10_RS1 0x0E #define ACPI_OSI_WIN_10_RS2 0x0F #define ACPI_OSI_WIN_10_RS3 0x10 +#define ACPI_OSI_WIN_10_RS4 0x11 +#define ACPI_OSI_WIN_10_RS5 0x12 /* Definitions of getopt */ diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index cf59e6210d27..4f34734e7f36 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -142,5 +142,8 @@ extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); extern int acpi_get_psd_map(struct cppc_cpudata **); extern unsigned int cppc_get_transition_latency(int cpu); +extern bool cpc_ffh_supported(void); +extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val); +extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val); #endif /* _CPPC_ACPI_H*/ diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 7451b3bca83a..e3d21d014fcc 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -33,6 +33,10 @@ /* Kernel specific ACPICA configuration */ +#ifdef CONFIG_PCI +#define ACPI_PCI_CONFIGURED +#endif + #ifdef CONFIG_ACPI_REDUCED_HARDWARE_ONLY #define ACPI_REDUCED_HARDWARE 1 #endif diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h index 89f3b03b1445..e3667c9a33a5 100644 --- a/include/asm-generic/4level-fixup.h +++ b/include/asm-generic/4level-fixup.h @@ -3,7 +3,7 @@ #define _4LEVEL_FIXUP_H #define __ARCH_HAS_4LEVEL_HACK -#define __PAGETABLE_PUD_FOLDED +#define __PAGETABLE_PUD_FOLDED 1 #define PUD_SHIFT PGDIR_SHIFT #define PUD_SIZE PGDIR_SIZE diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h index 9c2e0708eb82..bb6cb347018c 100644 --- a/include/asm-generic/5level-fixup.h +++ b/include/asm-generic/5level-fixup.h @@ -3,7 +3,7 @@ #define _5LEVEL_FIXUP_H #define __ARCH_HAS_5LEVEL_HACK -#define __PAGETABLE_P4D_FOLDED +#define __PAGETABLE_P4D_FOLDED 1 #define P4D_SHIFT PGDIR_SHIFT #define P4D_SIZE PGDIR_SIZE @@ -26,6 +26,7 @@ #define p4d_clear(p4d) pgd_clear(p4d) #define p4d_val(p4d) pgd_val(p4d) #define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud) +#define p4d_populate_safe(mm, p4d, pud) pgd_populate(mm, p4d, pud) #define p4d_page(p4d) pgd_page(p4d) #define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d) diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h index 62daf940989d..c8455cc28841 100644 --- a/include/asm-generic/bitops/builtin-fls.h +++ b/include/asm-generic/bitops/builtin-fls.h @@ -9,7 +9,7 @@ * This is defined the same way as ffs. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static __always_inline int fls(int x) +static __always_inline int fls(unsigned int x) { return x ? sizeof(x) * 8 - __builtin_clz(x) : 0; } diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h index 753aecaab641..b168bb10e1be 100644 --- a/include/asm-generic/bitops/fls.h +++ b/include/asm-generic/bitops/fls.h @@ -10,7 +10,7 @@ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static __always_inline int fls(int x) +static __always_inline int fls(unsigned int x) { int r = 32; diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index cdafa5edea49..20561a60db9c 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -17,8 +17,10 @@ #ifndef __ASSEMBLY__ #include <linux/kernel.h> -struct bug_entry { +#ifdef CONFIG_BUG + #ifdef CONFIG_GENERIC_BUG +struct bug_entry { #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS unsigned long bug_addr; #else @@ -33,10 +35,8 @@ struct bug_entry { unsigned short line; #endif unsigned short flags; -#endif /* CONFIG_GENERIC_BUG */ }; - -#ifdef CONFIG_BUG +#endif /* CONFIG_GENERIC_BUG */ /* * Don't use BUG() or BUG_ON() unless there's really no way out; one diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h index 880a292d792f..c13f46109e88 100644 --- a/include/asm-generic/dma-mapping.h +++ b/include/asm-generic/dma-mapping.h @@ -4,7 +4,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - return &dma_direct_ops; + return NULL; } #endif /* _ASM_GENERIC_DMA_MAPPING_H */ diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h index 296c65442f00..95a159a4137f 100644 --- a/include/asm-generic/error-injection.h +++ b/include/asm-generic/error-injection.h @@ -8,6 +8,7 @@ enum { EI_ETYPE_NULL, /* Return NULL if failure */ EI_ETYPE_ERRNO, /* Return -ERRNO if failure */ EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */ + EI_ETYPE_TRUE, /* Return true if failure */ }; struct error_injection_entry { diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h index 4d73e6e3c66c..294d6ae785d4 100644 --- a/include/asm-generic/export.h +++ b/include/asm-generic/export.h @@ -59,16 +59,19 @@ __kcrctab_\name: .endm #undef __put -#if defined(__KSYM_DEPS__) - -#define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym === - -#elif defined(CONFIG_TRIM_UNUSED_KSYMS) +#if defined(CONFIG_TRIM_UNUSED_KSYMS) #include <linux/kconfig.h> #include <generated/autoksyms.h> +.macro __ksym_marker sym + .section ".discard.ksym","a" +__ksym_marker_\sym: + .previous +.endm + #define __EXPORT_SYMBOL(sym, val, sec) \ + __ksym_marker sym; \ __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym)) #define __cond_export_sym(sym, val, sec, conf) \ ___cond_export_sym(sym, val, sec, conf) diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h index 827e4d3bbc7a..8cc7b09c1bc7 100644 --- a/include/asm-generic/fixmap.h +++ b/include/asm-generic/fixmap.h @@ -16,6 +16,7 @@ #define __ASM_GENERIC_FIXMAP_H #include <linux/bug.h> +#include <linux/mm_types.h> #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h index 0c34215263b8..829bdb0d6327 100644 --- a/include/asm-generic/pgtable-nop4d-hack.h +++ b/include/asm-generic/pgtable-nop4d-hack.h @@ -5,7 +5,7 @@ #ifndef __ASSEMBLY__ #include <asm-generic/5level-fixup.h> -#define __PAGETABLE_PUD_FOLDED +#define __PAGETABLE_PUD_FOLDED 1 /* * Having the pud type consist of a pgd gets the size right, and allows @@ -31,6 +31,7 @@ static inline void pgd_clear(pgd_t *pgd) { } #define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) #define pgd_populate(mm, pgd, pud) do { } while (0) +#define pgd_populate_safe(mm, pgd, pud) do { } while (0) /* * (puds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h index 1a29b2a0282b..aebab905e6cd 100644 --- a/include/asm-generic/pgtable-nop4d.h +++ b/include/asm-generic/pgtable-nop4d.h @@ -4,7 +4,7 @@ #ifndef __ASSEMBLY__ -#define __PAGETABLE_P4D_FOLDED +#define __PAGETABLE_P4D_FOLDED 1 typedef struct { pgd_t pgd; } p4d_t; @@ -26,6 +26,7 @@ static inline void pgd_clear(pgd_t *pgd) { } #define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd)) #define pgd_populate(mm, pgd, p4d) do { } while (0) +#define pgd_populate_safe(mm, pgd, p4d) do { } while (0) /* * (p4ds are folded into pgds so this doesn't get actually called, * but the define is needed for a generic inline function.) diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h index f35f6e8149e4..b85b8271a73d 100644 --- a/include/asm-generic/pgtable-nopmd.h +++ b/include/asm-generic/pgtable-nopmd.h @@ -8,7 +8,7 @@ struct mm_struct; -#define __PAGETABLE_PMD_FOLDED +#define __PAGETABLE_PMD_FOLDED 1 /* * Having the pmd type consist of a pud gets the size right, and allows diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h index e950b9c50f34..c77a1d301155 100644 --- a/include/asm-generic/pgtable-nopud.h +++ b/include/asm-generic/pgtable-nopud.h @@ -9,7 +9,7 @@ #else #include <asm-generic/pgtable-nop4d.h> -#define __PAGETABLE_PUD_FOLDED +#define __PAGETABLE_PUD_FOLDED 1 /* * Having the pud type consist of a p4d gets the size right, and allows @@ -35,6 +35,7 @@ static inline void p4d_clear(p4d_t *p4d) { } #define pud_ERROR(pud) (p4d_ERROR((pud).p4d)) #define p4d_populate(mm, p4d, pud) do { } while (0) +#define p4d_populate_safe(mm, p4d, pud) do { } while (0) /* * (puds are folded into p4ds so this doesn't get actually called, * but the define is needed for a generic inline function.) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 5657a20e0c59..05e61e6c843f 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -375,7 +375,6 @@ static inline int pte_unused(pte_t pte) #endif #ifndef __HAVE_ARCH_PMD_SAME -#ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { return pmd_val(pmd_a) == pmd_val(pmd_b); @@ -385,21 +384,60 @@ static inline int pud_same(pud_t pud_a, pud_t pud_b) { return pud_val(pud_a) == pud_val(pud_b); } -#else /* CONFIG_TRANSPARENT_HUGEPAGE */ -static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +#endif + +#ifndef __HAVE_ARCH_P4D_SAME +static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b) { - BUILD_BUG(); - return 0; + return p4d_val(p4d_a) == p4d_val(p4d_b); } +#endif -static inline int pud_same(pud_t pud_a, pud_t pud_b) +#ifndef __HAVE_ARCH_PGD_SAME +static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) { - BUILD_BUG(); - return 0; + return pgd_val(pgd_a) == pgd_val(pgd_b); } -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif +/* + * Use set_p*_safe(), and elide TLB flushing, when confident that *no* + * TLB flush will be required as a result of the "set". For example, use + * in scenarios where it is known ahead of time that the routine is + * setting non-present entries, or re-setting an existing entry to the + * same value. Otherwise, use the typical "set" helpers and flush the + * TLB. + */ +#define set_pte_safe(ptep, pte) \ +({ \ + WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \ + set_pte(ptep, pte); \ +}) + +#define set_pmd_safe(pmdp, pmd) \ +({ \ + WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \ + set_pmd(pmdp, pmd); \ +}) + +#define set_pud_safe(pudp, pud) \ +({ \ + WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \ + set_pud(pudp, pud); \ +}) + +#define set_p4d_safe(p4dp, p4d) \ +({ \ + WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ + set_p4d(p4dp, p4d); \ +}) + +#define set_pgd_safe(pgdp, pgd) \ +({ \ + WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ + set_pgd(pgdp, pgd); \ +}) + #ifndef __HAVE_ARCH_DO_SWAP_PAGE /* * Some architectures support metadata associated with a page. When a @@ -1019,6 +1057,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); int pud_clear_huge(pud_t *pud); int pmd_clear_huge(pmd_t *pmd); +int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); int pud_free_pmd_page(pud_t *pud, unsigned long addr); int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ @@ -1046,6 +1085,10 @@ static inline int pmd_clear_huge(pmd_t *pmd) { return 0; } +static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) +{ + return 0; +} static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) { return 0; @@ -1127,4 +1170,20 @@ static inline bool arch_has_pfn_modify_check(void) #endif #endif +/* + * On some architectures it depends on the mm if the p4d/pud or pmd + * layer of the page table hierarchy is folded or not. + */ +#ifndef mm_p4d_folded +#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED) +#endif + +#ifndef mm_pud_folded +#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED) +#endif + +#ifndef mm_pmd_folded +#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) +#endif + #endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 6b2e63df2739..d82c78a79da5 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -35,7 +35,7 @@ static inline void set_fs(mm_segment_t fs) #define segment_eq(a, b) ((a).seg == (b).seg) #endif -#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size)) +#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size)) /* * The architecture should really override this if possible, at least @@ -78,7 +78,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size) ({ \ void __user *__p = (ptr); \ might_fault(); \ - access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ? \ + access_ok(__p, sizeof(*ptr)) ? \ __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \ -EFAULT; \ }) @@ -140,7 +140,7 @@ extern int __put_user_bad(void) __attribute__((noreturn)); ({ \ const void __user *__p = (ptr); \ might_fault(); \ - access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \ + access_ok(__p, sizeof(*ptr)) ? \ __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\ ((x) = (__typeof__(*(ptr)))0,-EFAULT); \ }) @@ -175,7 +175,7 @@ __strncpy_from_user(char *dst, const char __user *src, long count) static inline long strncpy_from_user(char *dst, const char __user *src, long count) { - if (!access_ok(VERIFY_READ, src, 1)) + if (!access_ok(src, 1)) return -EFAULT; return __strncpy_from_user(dst, src, count); } @@ -196,7 +196,7 @@ strncpy_from_user(char *dst, const char __user *src, long count) */ static inline long strnlen_user(const char __user *src, long n) { - if (!access_ok(VERIFY_READ, src, 1)) + if (!access_ok(src, 1)) return 0; return __strnlen_user(src, n); } @@ -217,7 +217,7 @@ static inline __must_check unsigned long clear_user(void __user *to, unsigned long n) { might_fault(); - if (!access_ok(VERIFY_WRITE, to, n)) + if (!access_ok(to, n)) return n; return __clear_user(to, n); diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index 22e6f412c595..a3e766dff917 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -234,34 +234,6 @@ static inline void acomp_request_set_params(struct acomp_req *req, req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; } -static inline void crypto_stat_compress(struct acomp_req *req, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->compress_err_cnt); - } else { - atomic_inc(&tfm->base.__crt_alg->compress_cnt); - atomic64_add(req->slen, &tfm->base.__crt_alg->compress_tlen); - } -#endif -} - -static inline void crypto_stat_decompress(struct acomp_req *req, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->compress_err_cnt); - } else { - atomic_inc(&tfm->base.__crt_alg->decompress_cnt); - atomic64_add(req->slen, &tfm->base.__crt_alg->decompress_tlen); - } -#endif -} - /** * crypto_acomp_compress() -- Invoke asynchronous compress operation * @@ -274,10 +246,13 @@ static inline void crypto_stat_decompress(struct acomp_req *req, int ret) static inline int crypto_acomp_compress(struct acomp_req *req) { struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int slen = req->slen; int ret; + crypto_stats_get(alg); ret = tfm->compress(req); - crypto_stat_compress(req, ret); + crypto_stats_compress(slen, ret, alg); return ret; } @@ -293,10 +268,13 @@ static inline int crypto_acomp_compress(struct acomp_req *req) static inline int crypto_acomp_decompress(struct acomp_req *req) { struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int slen = req->slen; int ret; + crypto_stats_get(alg); ret = tfm->decompress(req); - crypto_stat_decompress(req, ret); + crypto_stats_decompress(slen, ret, alg); return ret; } diff --git a/include/crypto/aead.h b/include/crypto/aead.h index 0d765d7bfb82..9ad595f97c65 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -115,7 +115,6 @@ struct aead_request { * @setkey: see struct skcipher_alg * @encrypt: see struct skcipher_alg * @decrypt: see struct skcipher_alg - * @geniv: see struct skcipher_alg * @ivsize: see struct skcipher_alg * @chunksize: see struct skcipher_alg * @init: Initialize the cryptographic transformation object. This function @@ -142,8 +141,6 @@ struct aead_alg { int (*init)(struct crypto_aead *tfm); void (*exit)(struct crypto_aead *tfm); - const char *geniv; - unsigned int ivsize; unsigned int maxauthsize; unsigned int chunksize; @@ -306,34 +303,6 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) return __crypto_aead_cast(req->base.tfm); } -static inline void crypto_stat_aead_encrypt(struct aead_request *req, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->aead_err_cnt); - } else { - atomic_inc(&tfm->base.__crt_alg->encrypt_cnt); - atomic64_add(req->cryptlen, &tfm->base.__crt_alg->encrypt_tlen); - } -#endif -} - -static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->aead_err_cnt); - } else { - atomic_inc(&tfm->base.__crt_alg->decrypt_cnt); - atomic64_add(req->cryptlen, &tfm->base.__crt_alg->decrypt_tlen); - } -#endif -} - /** * crypto_aead_encrypt() - encrypt plaintext * @req: reference to the aead_request handle that holds all information @@ -356,13 +325,16 @@ static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret) static inline int crypto_aead_encrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_alg *alg = aead->base.__crt_alg; + unsigned int cryptlen = req->cryptlen; int ret; + crypto_stats_get(alg); if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; else ret = crypto_aead_alg(aead)->encrypt(req); - crypto_stat_aead_encrypt(req, ret); + crypto_stats_aead_encrypt(cryptlen, alg, ret); return ret; } @@ -391,15 +363,18 @@ static inline int crypto_aead_encrypt(struct aead_request *req) static inline int crypto_aead_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct crypto_alg *alg = aead->base.__crt_alg; + unsigned int cryptlen = req->cryptlen; int ret; + crypto_stats_get(alg); if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; else if (req->cryptlen < crypto_aead_authsize(aead)) ret = -EINVAL; else ret = crypto_aead_alg(aead)->decrypt(req); - crypto_stat_aead_decrypt(req, ret); + crypto_stats_aead_decrypt(cryptlen, alg, ret); return ret; } diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index afac71119396..2d690494568c 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -271,62 +271,6 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm) return alg->max_size(tfm); } -static inline void crypto_stat_akcipher_encrypt(struct akcipher_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt); - } else { - atomic_inc(&tfm->base.__crt_alg->encrypt_cnt); - atomic64_add(req->src_len, &tfm->base.__crt_alg->encrypt_tlen); - } -#endif -} - -static inline void crypto_stat_akcipher_decrypt(struct akcipher_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt); - } else { - atomic_inc(&tfm->base.__crt_alg->decrypt_cnt); - atomic64_add(req->src_len, &tfm->base.__crt_alg->decrypt_tlen); - } -#endif -} - -static inline void crypto_stat_akcipher_sign(struct akcipher_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt); - else - atomic_inc(&tfm->base.__crt_alg->sign_cnt); -#endif -} - -static inline void crypto_stat_akcipher_verify(struct akcipher_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt); - else - atomic_inc(&tfm->base.__crt_alg->verify_cnt); -#endif -} - /** * crypto_akcipher_encrypt() - Invoke public key encrypt operation * @@ -341,10 +285,13 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + unsigned int src_len = req->src_len; int ret; + crypto_stats_get(calg); ret = alg->encrypt(req); - crypto_stat_akcipher_encrypt(req, ret); + crypto_stats_akcipher_encrypt(src_len, ret, calg); return ret; } @@ -362,10 +309,13 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; + unsigned int src_len = req->src_len; int ret; + crypto_stats_get(calg); ret = alg->decrypt(req); - crypto_stat_akcipher_decrypt(req, ret); + crypto_stats_akcipher_decrypt(src_len, ret, calg); return ret; } @@ -383,10 +333,12 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; int ret; + crypto_stats_get(calg); ret = alg->sign(req); - crypto_stat_akcipher_sign(req, ret); + crypto_stats_akcipher_sign(ret, calg); return ret; } @@ -404,10 +356,12 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; int ret; + crypto_stats_get(calg); ret = alg->verify(req); - crypto_stat_akcipher_verify(req, ret); + crypto_stats_akcipher_verify(ret, calg); return ret; } diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h new file mode 100644 index 000000000000..1fc70a69d550 --- /dev/null +++ b/include/crypto/chacha.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values and helper functions for the ChaCha and XChaCha stream ciphers. + * + * XChaCha extends ChaCha's nonce to 192 bits, while provably retaining ChaCha's + * security. Here they share the same key size, tfm context, and setkey + * function; only their IV size and encrypt/decrypt function differ. + * + * The ChaCha paper specifies 20, 12, and 8-round variants. In general, it is + * recommended to use the 20-round variant ChaCha20. However, the other + * variants can be needed in some performance-sensitive scenarios. The generic + * ChaCha code currently allows only the 20 and 12-round variants. + */ + +#ifndef _CRYPTO_CHACHA_H +#define _CRYPTO_CHACHA_H + +#include <crypto/skcipher.h> +#include <linux/types.h> +#include <linux/crypto.h> + +/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */ +#define CHACHA_IV_SIZE 16 + +#define CHACHA_KEY_SIZE 32 +#define CHACHA_BLOCK_SIZE 64 +#define CHACHAPOLY_IV_SIZE 12 + +/* 192-bit nonce, then 64-bit stream position */ +#define XCHACHA_IV_SIZE 32 + +struct chacha_ctx { + u32 key[8]; + int nrounds; +}; + +void chacha_block(u32 *state, u8 *stream, int nrounds); +static inline void chacha20_block(u32 *state, u8 *stream) +{ + chacha_block(state, stream, 20); +} +void hchacha_block(const u32 *in, u32 *out, int nrounds); + +void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv); + +int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize); +int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keysize); + +int crypto_chacha_crypt(struct skcipher_request *req); +int crypto_xchacha_crypt(struct skcipher_request *req); + +#endif /* _CRYPTO_CHACHA_H */ diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h deleted file mode 100644 index f76302d99e2b..000000000000 --- a/include/crypto/chacha20.h +++ /dev/null @@ -1,27 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Common values for the ChaCha20 algorithm - */ - -#ifndef _CRYPTO_CHACHA20_H -#define _CRYPTO_CHACHA20_H - -#include <crypto/skcipher.h> -#include <linux/types.h> -#include <linux/crypto.h> - -#define CHACHA20_IV_SIZE 16 -#define CHACHA20_KEY_SIZE 32 -#define CHACHA20_BLOCK_SIZE 64 - -struct chacha20_ctx { - u32 key[8]; -}; - -void chacha20_block(u32 *state, u8 *stream); -void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); -int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keysize); -int crypto_chacha20_crypt(struct skcipher_request *req); - -#endif diff --git a/include/crypto/hash.h b/include/crypto/hash.h index bc7796600338..3b31c1b349ae 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -412,32 +412,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req) int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen); -static inline void crypto_stat_ahash_update(struct ahash_request *req, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic_inc(&tfm->base.__crt_alg->hash_err_cnt); - else - atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen); -#endif -} - -static inline void crypto_stat_ahash_final(struct ahash_request *req, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->hash_err_cnt); - } else { - atomic_inc(&tfm->base.__crt_alg->hash_cnt); - atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen); - } -#endif -} - /** * crypto_ahash_finup() - update and finalize message digest * @req: reference to the ahash_request handle that holds all information @@ -552,10 +526,14 @@ static inline int crypto_ahash_init(struct ahash_request *req) */ static inline int crypto_ahash_update(struct ahash_request *req) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int nbytes = req->nbytes; int ret; + crypto_stats_get(alg); ret = crypto_ahash_reqtfm(req)->update(req); - crypto_stat_ahash_update(req, ret); + crypto_stats_ahash_update(nbytes, ret, alg); return ret; } diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h index 56f217d41f12..91786b68dbdb 100644 --- a/include/crypto/hash_info.h +++ b/include/crypto/hash_info.h @@ -15,6 +15,7 @@ #include <crypto/sha.h> #include <crypto/md5.h> +#include <crypto/streebog.h> #include <uapi/linux/hash_info.h> diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h index 8db299c25566..40623f4457df 100644 --- a/include/crypto/internal/cryptouser.h +++ b/include/crypto/internal/cryptouser.h @@ -3,6 +3,11 @@ struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact); -int crypto_dump_reportstat(struct sk_buff *skb, struct netlink_callback *cb); +#ifdef CONFIG_CRYPTO_STATS int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs); -int crypto_dump_reportstat_done(struct netlink_callback *cb); +#else +static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs) +{ + return -ENOTSUPP; +} +#endif diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index e42f7063f245..453e867b4bd9 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -70,8 +70,6 @@ struct skcipher_walk { unsigned int alignmask; }; -extern const struct crypto_type crypto_givcipher_type; - static inline struct crypto_instance *skcipher_crypto_instance( struct skcipher_instance *inst) { diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index f517ba6d3a27..1a97e1601422 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -268,42 +268,6 @@ struct kpp_secret { unsigned short len; }; -static inline void crypto_stat_kpp_set_secret(struct crypto_kpp *tfm, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - if (ret) - atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt); - else - atomic_inc(&tfm->base.__crt_alg->setsecret_cnt); -#endif -} - -static inline void crypto_stat_kpp_generate_public_key(struct kpp_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - - if (ret) - atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt); - else - atomic_inc(&tfm->base.__crt_alg->generate_public_key_cnt); -#endif -} - -static inline void crypto_stat_kpp_compute_shared_secret(struct kpp_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - - if (ret) - atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt); - else - atomic_inc(&tfm->base.__crt_alg->compute_shared_secret_cnt); -#endif -} - /** * crypto_kpp_set_secret() - Invoke kpp operation * @@ -323,10 +287,12 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, const void *buffer, unsigned int len) { struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; int ret; + crypto_stats_get(calg); ret = alg->set_secret(tfm, buffer, len); - crypto_stat_kpp_set_secret(tfm, ret); + crypto_stats_kpp_set_secret(calg, ret); return ret; } @@ -347,10 +313,12 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; int ret; + crypto_stats_get(calg); ret = alg->generate_public_key(req); - crypto_stat_kpp_generate_public_key(req, ret); + crypto_stats_kpp_generate_public_key(calg, ret); return ret; } @@ -368,10 +336,12 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct kpp_alg *alg = crypto_kpp_alg(tfm); + struct crypto_alg *calg = tfm->base.__crt_alg; int ret; + crypto_stats_get(calg); ret = alg->compute_shared_secret(req); - crypto_stat_kpp_compute_shared_secret(req, ret); + crypto_stats_kpp_compute_shared_secret(calg, ret); return ret; } diff --git a/include/crypto/nhpoly1305.h b/include/crypto/nhpoly1305.h new file mode 100644 index 000000000000..53c04423c582 --- /dev/null +++ b/include/crypto/nhpoly1305.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common values and helper functions for the NHPoly1305 hash function. + */ + +#ifndef _NHPOLY1305_H +#define _NHPOLY1305_H + +#include <crypto/hash.h> +#include <crypto/poly1305.h> + +/* NH parameterization: */ + +/* Endianness: little */ +/* Word size: 32 bits (works well on NEON, SSE2, AVX2) */ + +/* Stride: 2 words (optimal on ARM32 NEON; works okay on other CPUs too) */ +#define NH_PAIR_STRIDE 2 +#define NH_MESSAGE_UNIT (NH_PAIR_STRIDE * 2 * sizeof(u32)) + +/* Num passes (Toeplitz iteration count): 4, to give ε = 2^{-128} */ +#define NH_NUM_PASSES 4 +#define NH_HASH_BYTES (NH_NUM_PASSES * sizeof(u64)) + +/* Max message size: 1024 bytes (32x compression factor) */ +#define NH_NUM_STRIDES 64 +#define NH_MESSAGE_WORDS (NH_PAIR_STRIDE * 2 * NH_NUM_STRIDES) +#define NH_MESSAGE_BYTES (NH_MESSAGE_WORDS * sizeof(u32)) +#define NH_KEY_WORDS (NH_MESSAGE_WORDS + \ + NH_PAIR_STRIDE * 2 * (NH_NUM_PASSES - 1)) +#define NH_KEY_BYTES (NH_KEY_WORDS * sizeof(u32)) + +#define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES) + +struct nhpoly1305_key { + struct poly1305_key poly_key; + u32 nh_key[NH_KEY_WORDS]; +}; + +struct nhpoly1305_state { + + /* Running total of polynomial evaluation */ + struct poly1305_state poly_state; + + /* Partial block buffer */ + u8 buffer[NH_MESSAGE_UNIT]; + unsigned int buflen; + + /* + * Number of bytes remaining until the current NH message reaches + * NH_MESSAGE_BYTES. When nonzero, 'nh_hash' holds the partial NH hash. + */ + unsigned int nh_remaining; + + __le64 nh_hash[NH_NUM_PASSES]; +}; + +typedef void (*nh_t)(const u32 *key, const u8 *message, size_t message_len, + __le64 hash[NH_NUM_PASSES]); + +int crypto_nhpoly1305_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen); + +int crypto_nhpoly1305_init(struct shash_desc *desc); +int crypto_nhpoly1305_update(struct shash_desc *desc, + const u8 *src, unsigned int srclen); +int crypto_nhpoly1305_update_helper(struct shash_desc *desc, + const u8 *src, unsigned int srclen, + nh_t nh_fn); +int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst); +int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst, + nh_t nh_fn); + +#endif /* _NHPOLY1305_H */ diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h index f718a19da82f..34317ed2071e 100644 --- a/include/crypto/poly1305.h +++ b/include/crypto/poly1305.h @@ -13,13 +13,21 @@ #define POLY1305_KEY_SIZE 32 #define POLY1305_DIGEST_SIZE 16 +struct poly1305_key { + u32 r[5]; /* key, base 2^26 */ +}; + +struct poly1305_state { + u32 h[5]; /* accumulator, base 2^26 */ +}; + struct poly1305_desc_ctx { /* key */ - u32 r[5]; + struct poly1305_key r; /* finalize key */ u32 s[4]; /* accumulator */ - u32 h[5]; + struct poly1305_state h; /* partial buffer */ u8 buf[POLY1305_BLOCK_SIZE]; /* bytes used in partial buffer */ @@ -30,6 +38,22 @@ struct poly1305_desc_ctx { bool sset; }; +/* + * Poly1305 core functions. These implement the ε-almost-∆-universal hash + * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce + * ("s key") at the end. They also only support block-aligned inputs. + */ +void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key); +static inline void poly1305_core_init(struct poly1305_state *state) +{ + memset(state->h, 0, sizeof(state->h)); +} +void poly1305_core_blocks(struct poly1305_state *state, + const struct poly1305_key *key, + const void *src, unsigned int nblocks); +void poly1305_core_emit(const struct poly1305_state *state, void *dst); + +/* Crypto API helper functions for the Poly1305 MAC */ int crypto_poly1305_init(struct shash_desc *desc); unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, const u8 *src, unsigned int srclen); diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 6d258f5b68f1..022a1b896b47 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -122,29 +122,6 @@ static inline void crypto_free_rng(struct crypto_rng *tfm) crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm)); } -static inline void crypto_stat_rng_seed(struct crypto_rng *tfm, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - if (ret && ret != -EINPROGRESS && ret != -EBUSY) - atomic_inc(&tfm->base.__crt_alg->rng_err_cnt); - else - atomic_inc(&tfm->base.__crt_alg->seed_cnt); -#endif -} - -static inline void crypto_stat_rng_generate(struct crypto_rng *tfm, - unsigned int dlen, int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&tfm->base.__crt_alg->rng_err_cnt); - } else { - atomic_inc(&tfm->base.__crt_alg->generate_cnt); - atomic64_add(dlen, &tfm->base.__crt_alg->generate_tlen); - } -#endif -} - /** * crypto_rng_generate() - get random number * @tfm: cipher handle @@ -163,10 +140,12 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { + struct crypto_alg *alg = tfm->base.__crt_alg; int ret; + crypto_stats_get(alg); ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen); - crypto_stat_rng_generate(tfm, dlen, ret); + crypto_stats_rng_generate(alg, dlen, ret); return ret; } diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 925f547cdcfa..e555294ed77f 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -39,19 +39,6 @@ struct skcipher_request { void *__ctx[] CRYPTO_MINALIGN_ATTR; }; -/** - * struct skcipher_givcrypt_request - Crypto request with IV generation - * @seq: Sequence number for IV generation - * @giv: Space for generated IV - * @creq: The crypto request itself - */ -struct skcipher_givcrypt_request { - u64 seq; - u8 *giv; - - struct ablkcipher_request creq; -}; - struct crypto_skcipher { int (*setkey)(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); @@ -486,32 +473,6 @@ static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm( return container_of(tfm, struct crypto_sync_skcipher, base); } -static inline void crypto_stat_skcipher_encrypt(struct skcipher_request *req, - int ret, struct crypto_alg *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&alg->cipher_err_cnt); - } else { - atomic_inc(&alg->encrypt_cnt); - atomic64_add(req->cryptlen, &alg->encrypt_tlen); - } -#endif -} - -static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req, - int ret, struct crypto_alg *alg) -{ -#ifdef CONFIG_CRYPTO_STATS - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&alg->cipher_err_cnt); - } else { - atomic_inc(&alg->decrypt_cnt); - atomic64_add(req->cryptlen, &alg->decrypt_tlen); - } -#endif -} - /** * crypto_skcipher_encrypt() - encrypt plaintext * @req: reference to the skcipher_request handle that holds all information @@ -526,13 +487,16 @@ static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req, static inline int crypto_skcipher_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int cryptlen = req->cryptlen; int ret; + crypto_stats_get(alg); if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; else ret = tfm->encrypt(req); - crypto_stat_skcipher_encrypt(req, ret, tfm->base.__crt_alg); + crypto_stats_skcipher_encrypt(cryptlen, ret, alg); return ret; } @@ -550,13 +514,16 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req) static inline int crypto_skcipher_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_alg *alg = tfm->base.__crt_alg; + unsigned int cryptlen = req->cryptlen; int ret; + crypto_stats_get(alg); if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) ret = -ENOKEY; else ret = tfm->decrypt(req); - crypto_stat_skcipher_decrypt(req, ret, tfm->base.__crt_alg); + crypto_stats_skcipher_decrypt(cryptlen, ret, alg); return ret; } diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h new file mode 100644 index 000000000000..4af119f7e07b --- /dev/null +++ b/include/crypto/streebog.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-2-Clause */ +/* + * Copyright (c) 2013 Alexey Degtyarev <alexey@renatasystems.org> + * Copyright (c) 2018 Vitaly Chikunov <vt@altlinux.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#ifndef _CRYPTO_STREEBOG_H_ +#define _CRYPTO_STREEBOG_H_ + +#include <linux/types.h> + +#define STREEBOG256_DIGEST_SIZE 32 +#define STREEBOG512_DIGEST_SIZE 64 +#define STREEBOG_BLOCK_SIZE 64 + +struct streebog_uint512 { + u64 qword[8]; +}; + +struct streebog_state { + u8 buffer[STREEBOG_BLOCK_SIZE]; + struct streebog_uint512 hash; + struct streebog_uint512 h; + struct streebog_uint512 N; + struct streebog_uint512 Sigma; + size_t fillsize; +}; + +#endif /* !_CRYPTO_STREEBOG_H_ */ diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index ccb5aa8468e0..9c56412bb2cf 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -133,6 +133,7 @@ struct dw_hdmi_plat_data { const struct dw_hdmi_phy_ops *phy_ops; const char *phy_name; void *phy_data; + unsigned int phy_force_vendor; /* Synopsys PHY support */ const struct dw_hdmi_mpll_config *mpll_cfg; diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h index d9c6d549f971..48a671e782ca 100644 --- a/include/drm/bridge/dw_mipi_dsi.h +++ b/include/drm/bridge/dw_mipi_dsi.h @@ -19,6 +19,13 @@ struct dw_mipi_dsi_phy_ops { unsigned int *lane_mbps); }; +struct dw_mipi_dsi_host_ops { + int (*attach)(void *priv_data, + struct mipi_dsi_device *dsi); + int (*detach)(void *priv_data, + struct mipi_dsi_device *dsi); +}; + struct dw_mipi_dsi_plat_data { void __iomem *base; unsigned int max_data_lanes; @@ -27,6 +34,7 @@ struct dw_mipi_dsi_plat_data { const struct drm_display_mode *mode); const struct dw_mipi_dsi_phy_ops *phy_ops; + const struct dw_mipi_dsi_host_ops *host_ops; void *priv_data; }; @@ -35,10 +43,8 @@ struct dw_mipi_dsi *dw_mipi_dsi_probe(struct platform_device *pdev, const struct dw_mipi_dsi_plat_data *plat_data); void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi); -struct dw_mipi_dsi *dw_mipi_dsi_bind(struct platform_device *pdev, - struct drm_encoder *encoder, - const struct dw_mipi_dsi_plat_data - *plat_data); +int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder); void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi); +void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave); #endif /* __DW_MIPI_DSI__ */ diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 05350424a4d3..bdb0d5548f39 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -68,7 +68,6 @@ #include <drm/drm_agpsupport.h> #include <drm/drm_crtc.h> #include <drm/drm_fourcc.h> -#include <drm/drm_global.h> #include <drm/drm_hashtab.h> #include <drm/drm_mm.h> #include <drm/drm_os_linux.h> @@ -110,4 +109,10 @@ static inline bool drm_can_sleep(void) return true; } +#if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE) +#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x) +#else +#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) +#endif + #endif diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 1e810e0b7664..f9b35834c45d 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -265,7 +265,6 @@ struct __drm_private_objs_state { * struct drm_atomic_state - the global state object for atomic updates * @ref: count of all references to this state (will not be freed until zero) * @dev: parent DRM device - * @allow_modeset: allow full modeset * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics * @async_update: hint for asynchronous plane update * @planes: pointer to array of structures with per-plane data @@ -284,6 +283,15 @@ struct drm_atomic_state { struct kref ref; struct drm_device *dev; + + /** + * @allow_modeset: + * + * Allow full modeset. This is used by the ATOMIC IOCTL handler to + * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should + * never consult this flag, instead looking at the output of + * drm_atomic_crtc_needs_modeset(). + */ bool allow_modeset : 1; bool legacy_cursor_update : 1; bool async_update : 1; diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 657af7b39379..58214be3bf3d 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -31,6 +31,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_modeset_helper.h> +#include <drm/drm_atomic_state_helper.h> #include <drm/drm_util.h> struct drm_atomic_state; @@ -126,6 +127,9 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, int drm_atomic_helper_disable_all(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); void drm_atomic_helper_shutdown(struct drm_device *dev); +struct drm_atomic_state * +drm_atomic_helper_duplicate_state(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev); int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, struct drm_modeset_acquire_ctx *ctx); @@ -144,51 +148,10 @@ int drm_atomic_helper_page_flip_target( uint32_t flags, uint32_t target, struct drm_modeset_acquire_ctx *ctx); -struct drm_encoder * -drm_atomic_helper_best_encoder(struct drm_connector *connector); - -/* default implementations for state handling */ -void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc); -void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, - struct drm_crtc_state *state); -struct drm_crtc_state * -drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc); -void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state); -void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, - struct drm_crtc_state *state); - -void __drm_atomic_helper_plane_reset(struct drm_plane *plane, - struct drm_plane_state *state); -void drm_atomic_helper_plane_reset(struct drm_plane *plane); -void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, - struct drm_plane_state *state); -struct drm_plane_state * -drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane); -void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state); -void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, - struct drm_plane_state *state); - -void __drm_atomic_helper_connector_reset(struct drm_connector *connector, - struct drm_connector_state *conn_state); -void drm_atomic_helper_connector_reset(struct drm_connector *connector); -void -__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, - struct drm_connector_state *state); -struct drm_connector_state * -drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector); -struct drm_atomic_state * -drm_atomic_helper_duplicate_state(struct drm_device *dev, - struct drm_modeset_acquire_ctx *ctx); -void -__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state); -void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, - struct drm_connector_state *state); int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size, struct drm_modeset_acquire_ctx *ctx); -void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj, - struct drm_private_state *state); /** * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h new file mode 100644 index 000000000000..66c92cbd8e16 --- /dev/null +++ b/include/drm/drm_atomic_state_helper.h @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2018 Intel Corp. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Daniel Vetter <daniel.vetter@ffwll.ch> + */ + +#include <linux/types.h> + +struct drm_crtc; +struct drm_crtc_state; +struct drm_plane; +struct drm_plane_state; +struct drm_connector; +struct drm_connector_state; +struct drm_private_obj; +struct drm_private_state; +struct drm_modeset_acquire_ctx; +struct drm_device; + +void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc); +void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, + struct drm_crtc_state *state); +struct drm_crtc_state * +drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc); +void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state); +void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state); + +void __drm_atomic_helper_plane_reset(struct drm_plane *plane, + struct drm_plane_state *state); +void drm_atomic_helper_plane_reset(struct drm_plane *plane); +void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, + struct drm_plane_state *state); +struct drm_plane_state * +drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane); +void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state); +void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state); + +void __drm_atomic_helper_connector_reset(struct drm_connector *connector, + struct drm_connector_state *conn_state); +void drm_atomic_helper_connector_reset(struct drm_connector *connector); +void +__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, + struct drm_connector_state *state); +struct drm_connector_state * +drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector); +void +__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state); +void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, + struct drm_connector_state *state); +void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj, + struct drm_private_state *state); diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 9ccad6b062f2..9be2181b3ed7 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -508,6 +508,18 @@ struct drm_connector_state { * drm_writeback_signal_completion() */ struct drm_writeback_job *writeback_job; + + /** + * @max_requested_bpc: Connector property to limit the maximum bit + * depth of the pixels. + */ + u8 max_requested_bpc; + + /** + * @max_bpc: Connector max_bpc based on the requested max_bpc property + * and the connector bpc limitations obtained from edid. + */ + u8 max_bpc; }; /** @@ -960,6 +972,17 @@ struct drm_connector { struct drm_property *scaling_mode_property; /** + * @vrr_capable_property: Optional property to help userspace + * query hardware support for variable refresh rate on a connector. + * connector. Drivers can add the property to a connector by + * calling drm_connector_attach_vrr_capable_property(). + * + * This should be updated only by calling + * drm_connector_set_vrr_capable_property(). + */ + struct drm_property *vrr_capable_property; + + /** * @content_protection_property: DRM ENUM property for content * protection. See drm_connector_attach_content_protection_property(). */ @@ -973,6 +996,12 @@ struct drm_connector { */ struct drm_property_blob *path_blob_ptr; + /** + * @max_bpc_property: Default connector property for the max bpc to be + * driven out of the connector. + */ + struct drm_property *max_bpc_property; + #define DRM_CONNECTOR_POLL_HPD (1 << 0) #define DRM_CONNECTOR_POLL_CONNECT (1 << 1) #define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2) @@ -1133,6 +1162,7 @@ int drm_connector_init(struct drm_device *dev, struct drm_connector *connector, const struct drm_connector_funcs *funcs, int connector_type); +void drm_connector_attach_edid_property(struct drm_connector *connector); int drm_connector_register(struct drm_connector *connector); void drm_connector_unregister(struct drm_connector *connector); int drm_connector_attach_encoder(struct drm_connector *connector, @@ -1192,30 +1222,6 @@ static inline void drm_connector_put(struct drm_connector *connector) } /** - * drm_connector_reference - acquire a connector reference - * @connector: DRM connector - * - * This is a compatibility alias for drm_connector_get() and should not be - * used by new code. - */ -static inline void drm_connector_reference(struct drm_connector *connector) -{ - drm_connector_get(connector); -} - -/** - * drm_connector_unreference - release a connector reference - * @connector: DRM connector - * - * This is a compatibility alias for drm_connector_put() and should not be - * used by new code. - */ -static inline void drm_connector_unreference(struct drm_connector *connector) -{ - drm_connector_put(connector); -} - -/** * drm_connector_is_unregistered - has the connector been unregistered from * userspace? * @connector: DRM connector @@ -1250,6 +1256,8 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev); int drm_connector_attach_content_type_property(struct drm_connector *dev); int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, u32 scaling_mode_mask); +int drm_connector_attach_vrr_capable_property( + struct drm_connector *connector); int drm_connector_attach_content_protection_property( struct drm_connector *connector); int drm_mode_create_aspect_ratio_property(struct drm_device *dev); @@ -1266,8 +1274,12 @@ int drm_connector_update_edid_property(struct drm_connector *connector, const struct edid *edid); void drm_connector_set_link_status_property(struct drm_connector *connector, uint64_t link_status); +void drm_connector_set_vrr_capable_property( + struct drm_connector *connector, bool capable); int drm_connector_init_panel_orientation_property( struct drm_connector *connector, int width, int height); +int drm_connector_attach_max_bpc_property(struct drm_connector *connector, + int min, int max); /** * struct drm_tile_group - Tile group metadata diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index b21437bc95bf..39c3900aab3c 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -291,6 +291,15 @@ struct drm_crtc_state { u32 pageflip_flags; /** + * @vrr_enabled: + * + * Indicates if variable refresh rate should be enabled for the CRTC. + * Support for the requested vrr state will depend on driver and + * hardware capabiltiy - lacking support is not treated as failure. + */ + bool vrr_enabled; + + /** * @event: * * Optional pointer to a DRM event to signal upon completion of the diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 6914633037a5..d65f034843ce 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -57,12 +57,6 @@ int drm_helper_connector_dpms(struct drm_connector *connector, int mode); void drm_helper_resume_force_mode(struct drm_device *dev); -int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, int x, int y, - struct drm_framebuffer *old_fb); -int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb); - /* drm_probe_helper.c */ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h new file mode 100644 index 000000000000..4487660b26b8 --- /dev/null +++ b/include/drm/drm_damage_helper.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/************************************************************************** + * + * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Deepak Rawat <drawat@vmware.com> + * + **************************************************************************/ + +#ifndef DRM_DAMAGE_HELPER_H_ +#define DRM_DAMAGE_HELPER_H_ + +#include <drm/drm_atomic_helper.h> + +/** + * drm_atomic_for_each_plane_damage - Iterator macro for plane damage. + * @iter: The iterator to advance. + * @rect: Return a rectangle in fb coordinate clipped to plane src. + * + * Note that if the first call to iterator macro return false then no need to do + * plane update. Iterator will return full plane src when damage is not passed + * by user-space. + */ +#define drm_atomic_for_each_plane_damage(iter, rect) \ + while (drm_atomic_helper_damage_iter_next(iter, rect)) + +/** + * struct drm_atomic_helper_damage_iter - Closure structure for damage iterator. + * + * This structure tracks state needed to walk the list of plane damage clips. + */ +struct drm_atomic_helper_damage_iter { + /* private: Plane src in whole number. */ + struct drm_rect plane_src; + /* private: Rectangles in plane damage blob. */ + const struct drm_rect *clips; + /* private: Number of rectangles in plane damage blob. */ + uint32_t num_clips; + /* private: Current clip iterator is advancing on. */ + uint32_t curr_clip; + /* private: Whether need full plane update. */ + bool full_update; +}; + +void drm_plane_enable_fb_damage_clips(struct drm_plane *plane); +void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state, + struct drm_plane_state *plane_state); +int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb, + struct drm_file *file_priv, unsigned int flags, + unsigned int color, struct drm_clip_rect *clips, + unsigned int num_clips); +void +drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter, + const struct drm_plane_state *old_state, + const struct drm_plane_state *new_state); +bool +drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter, + struct drm_rect *rect); + +/** + * drm_helper_get_plane_damage_clips - Returns damage clips in &drm_rect. + * @state: Plane state. + * + * Returns plane damage rectangles in internal &drm_rect. Currently &drm_rect + * can be obtained by simply typecasting &drm_mode_rect. This is because both + * are signed 32 and during drm_atomic_check_only() it is verified that damage + * clips are inside fb. + * + * Return: Clips in plane fb_damage_clips blob property. + */ +static inline struct drm_rect * +drm_helper_get_plane_damage_clips(const struct drm_plane_state *state) +{ + return (struct drm_rect *)drm_plane_get_damage_clips(state); +} + +#endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 2a3843f248cf..5736c942c85b 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -231,6 +231,8 @@ #define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */ #define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */ +# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0) +# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8 #define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069 # define DP_DSC_RGB (1 << 0) @@ -279,6 +281,8 @@ # define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4) #define DP_DSC_MAX_SLICE_WIDTH 0x06C +#define DP_DSC_MIN_SLICE_WIDTH_VALUE 2560 +#define DP_DSC_SLICE_WIDTH_MULTIPLIER 320 #define DP_DSC_SLICE_CAP_2 0x06D # define DP_DSC_16_PER_DP_DSC_SINK (1 << 0) @@ -477,6 +481,7 @@ # define DP_AUX_FRAME_SYNC_VALID (1 << 0) #define DP_DSC_ENABLE 0x160 /* DP 1.4 */ +# define DP_DECOMPRESSION_EN (1 << 0) #define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ # define DP_PSR_ENABLE (1 << 0) @@ -685,6 +690,8 @@ # define DP_EDP_12 0x01 # define DP_EDP_13 0x02 # define DP_EDP_14 0x03 +# define DP_EDP_14a 0x04 /* eDP 1.4a */ +# define DP_EDP_14b 0x05 /* eDP 1.4b */ #define DP_EDP_GENERAL_CAP_1 0x701 # define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0) @@ -905,6 +912,57 @@ #define DP_AUX_HDCP_KSV_FIFO 0x6802C #define DP_AUX_HDCP_AINFO 0x6803B +/* DP HDCP2.2 parameter offsets in DPCD address space */ +#define DP_HDCP_2_2_REG_RTX_OFFSET 0x69000 +#define DP_HDCP_2_2_REG_TXCAPS_OFFSET 0x69008 +#define DP_HDCP_2_2_REG_CERT_RX_OFFSET 0x6900B +#define DP_HDCP_2_2_REG_RRX_OFFSET 0x69215 +#define DP_HDCP_2_2_REG_RX_CAPS_OFFSET 0x6921D +#define DP_HDCP_2_2_REG_EKPUB_KM_OFFSET 0x69220 +#define DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET 0x692A0 +#define DP_HDCP_2_2_REG_M_OFFSET 0x692B0 +#define DP_HDCP_2_2_REG_HPRIME_OFFSET 0x692C0 +#define DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET 0x692E0 +#define DP_HDCP_2_2_REG_RN_OFFSET 0x692F0 +#define DP_HDCP_2_2_REG_LPRIME_OFFSET 0x692F8 +#define DP_HDCP_2_2_REG_EDKEY_KS_OFFSET 0x69318 +#define DP_HDCP_2_2_REG_RIV_OFFSET 0x69328 +#define DP_HDCP_2_2_REG_RXINFO_OFFSET 0x69330 +#define DP_HDCP_2_2_REG_SEQ_NUM_V_OFFSET 0x69332 +#define DP_HDCP_2_2_REG_VPRIME_OFFSET 0x69335 +#define DP_HDCP_2_2_REG_RECV_ID_LIST_OFFSET 0x69345 +#define DP_HDCP_2_2_REG_V_OFFSET 0x693E0 +#define DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET 0x693F0 +#define DP_HDCP_2_2_REG_K_OFFSET 0x693F3 +#define DP_HDCP_2_2_REG_STREAM_ID_TYPE_OFFSET 0x693F5 +#define DP_HDCP_2_2_REG_MPRIME_OFFSET 0x69473 +#define DP_HDCP_2_2_REG_RXSTATUS_OFFSET 0x69493 +#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494 +#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518 + +/* DP HDCP message start offsets in DPCD address space */ +#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET +#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET +#define DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKPUB_KM_OFFSET +#define DP_HDCP_2_2_AKE_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET +#define DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET DP_HDCP_2_2_REG_HPRIME_OFFSET +#define DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET \ + DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET +#define DP_HDCP_2_2_LC_INIT_OFFSET DP_HDCP_2_2_REG_RN_OFFSET +#define DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET DP_HDCP_2_2_REG_LPRIME_OFFSET +#define DP_HDCP_2_2_SKE_SEND_EKS_OFFSET DP_HDCP_2_2_REG_EDKEY_KS_OFFSET +#define DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET DP_HDCP_2_2_REG_RXINFO_OFFSET +#define DP_HDCP_2_2_REP_SEND_ACK_OFFSET DP_HDCP_2_2_REG_V_OFFSET +#define DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET +#define DP_HDCP_2_2_REP_STREAM_READY_OFFSET DP_HDCP_2_2_REG_MPRIME_OFFSET + +#define HDCP_2_2_DP_RXSTATUS_LEN 1 +#define HDCP_2_2_DP_RXSTATUS_READY(x) ((x) & BIT(0)) +#define HDCP_2_2_DP_RXSTATUS_H_PRIME(x) ((x) & BIT(1)) +#define HDCP_2_2_DP_RXSTATUS_PAIRING(x) ((x) & BIT(2)) +#define HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3)) +#define HDCP_2_2_DP_RXSTATUS_LINK_FAILED(x) ((x) & BIT(4)) + /* DP 1.2 Sideband message defines */ /* peer device type - DP 1.2a Table 2-92 */ #define DP_PEER_DEVICE_NONE 0x0 @@ -963,6 +1021,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI #define DP_BRANCH_OUI_HEADER_SIZE 0xc #define DP_RECEIVER_CAP_SIZE 0xf +#define DP_DSC_RECEIVER_CAP_SIZE 0xf #define EDP_PSR_RECEIVER_CAP_SIZE 2 #define EDP_DISPLAY_CTL_CAP_SIZE 3 @@ -993,6 +1052,7 @@ struct dp_sdp_header { #define EDP_SDP_HEADER_REVISION_MASK 0x1F #define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F +#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F struct edp_vsc_psr { struct dp_sdp_header sdp_header; @@ -1059,6 +1119,44 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT; } +/* DP/eDP DSC support */ +u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], + bool is_edp); +u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); +int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE], + u8 dsc_bpc[3]); + +static inline bool +drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] & + DP_DSC_DECOMPRESSION_IS_SUPPORTED; +} + +static inline u16 +drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] | + (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] & + DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK << + DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT); +} + +static inline u32 +drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) +{ + /* Max Slicewidth = Number of Pixels * 320 */ + return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] * + DP_DSC_SLICE_WIDTH_MULTIPLIER; +} + +/* Forward Error Correction Support on DP 1.4 */ +static inline bool +drm_dp_sink_supports_fec(const u8 fec_capable) +{ + return fec_capable & DP_FEC_CAPABLE; +} + /* * DisplayPort AUX channel */ diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 7f78d26a0766..59f005b419cf 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -409,7 +409,6 @@ struct drm_dp_payload { struct drm_dp_mst_topology_state { struct drm_private_state base; int avail_slots; - struct drm_atomic_state *state; struct drm_dp_mst_topology_mgr *mgr; }; @@ -498,11 +497,6 @@ struct drm_dp_mst_topology_mgr { int pbn_div; /** - * @state: State information for topology manager - */ - struct drm_dp_mst_topology_state *state; - - /** * @funcs: Atomic helper callbacks */ const struct drm_private_state_funcs *funcs; diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 3199ef70c007..35af23f5fa0d 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -471,6 +471,8 @@ struct drm_driver { * @gem_prime_export: * * export GEM -> dmabuf + * + * This defaults to drm_gem_prime_export() if not set. */ struct dma_buf * (*gem_prime_export)(struct drm_device *dev, struct drm_gem_object *obj, int flags); @@ -478,6 +480,8 @@ struct drm_driver { * @gem_prime_import: * * import dmabuf -> GEM + * + * This defaults to drm_gem_prime_import() if not set. */ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, struct dma_buf *dma_buf); @@ -523,8 +527,10 @@ struct drm_driver { * @dumb_map_offset: * * Allocate an offset in the drm device node's address space to be able to - * memory map a dumb buffer. GEM-based drivers must use - * drm_gem_create_mmap_offset() to implement this. + * memory map a dumb buffer. + * + * The default implementation is drm_gem_create_mmap_offset(). GEM based + * drivers must not overwrite this. * * Called by the user via ioctl. * @@ -544,6 +550,9 @@ struct drm_driver { * * Called by the user via ioctl. * + * The default implementation is drm_gem_dumb_destroy(). GEM based drivers + * must not overwrite this. + * * Returns: * * Zero on success, negative errno on failure. @@ -621,7 +630,6 @@ void drm_dev_unregister(struct drm_device *dev); void drm_dev_get(struct drm_device *dev); void drm_dev_put(struct drm_device *dev); -void drm_dev_unref(struct drm_device *dev); void drm_put_dev(struct drm_device *dev); bool drm_dev_enter(struct drm_device *dev, int *idx); void drm_dev_exit(int idx); diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h new file mode 100644 index 000000000000..d03f1b83421a --- /dev/null +++ b/include/drm/drm_dsc.h @@ -0,0 +1,485 @@ +/* SPDX-License-Identifier: MIT + * Copyright (C) 2018 Intel Corp. + * + * Authors: + * Manasi Navare <manasi.d.navare@intel.com> + */ + +#ifndef DRM_DSC_H_ +#define DRM_DSC_H_ + +#include <drm/drm_dp_helper.h> + +/* VESA Display Stream Compression DSC 1.2 constants */ +#define DSC_NUM_BUF_RANGES 15 +#define DSC_MUX_WORD_SIZE_8_10_BPC 48 +#define DSC_MUX_WORD_SIZE_12_BPC 64 +#define DSC_RC_PIXELS_PER_GROUP 3 +#define DSC_SCALE_DECREMENT_INTERVAL_MAX 4095 +#define DSC_RANGE_BPG_OFFSET_MASK 0x3f + +/* DSC Rate Control Constants */ +#define DSC_RC_MODEL_SIZE_CONST 8192 +#define DSC_RC_EDGE_FACTOR_CONST 6 +#define DSC_RC_TGT_OFFSET_HI_CONST 3 +#define DSC_RC_TGT_OFFSET_LO_CONST 3 + +/* DSC PPS constants and macros */ +#define DSC_PPS_VERSION_MAJOR_SHIFT 4 +#define DSC_PPS_BPC_SHIFT 4 +#define DSC_PPS_MSB_SHIFT 8 +#define DSC_PPS_LSB_MASK (0xFF << 0) +#define DSC_PPS_BPP_HIGH_MASK (0x3 << 8) +#define DSC_PPS_VBR_EN_SHIFT 2 +#define DSC_PPS_SIMPLE422_SHIFT 3 +#define DSC_PPS_CONVERT_RGB_SHIFT 4 +#define DSC_PPS_BLOCK_PRED_EN_SHIFT 5 +#define DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK (0x3 << 8) +#define DSC_PPS_SCALE_DEC_INT_HIGH_MASK (0xF << 8) +#define DSC_PPS_RC_TGT_OFFSET_HI_SHIFT 4 +#define DSC_PPS_RC_RANGE_MINQP_SHIFT 11 +#define DSC_PPS_RC_RANGE_MAXQP_SHIFT 6 +#define DSC_PPS_NATIVE_420_SHIFT 1 +#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS 16 +#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0 +#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13 + +/* Configuration for a single Rate Control model range */ +struct drm_dsc_rc_range_parameters { + /* Min Quantization Parameters allowed for this range */ + u8 range_min_qp; + /* Max Quantization Parameters allowed for this range */ + u8 range_max_qp; + /* Bits/group offset to apply to target for this group */ + u8 range_bpg_offset; +}; + +struct drm_dsc_config { + /* Bits / component for previous reconstructed line buffer */ + u8 line_buf_depth; + /* Bits per component to code (must be 8, 10, or 12) */ + u8 bits_per_component; + /* + * Flag indicating to do RGB - YCoCg conversion + * and back (should be 1 for RGB input) + */ + bool convert_rgb; + u8 slice_count; + /* Slice Width */ + u16 slice_width; + /* Slice Height */ + u16 slice_height; + /* + * 4:2:2 enable mode (from PPS, 4:2:2 conversion happens + * outside of DSC encode/decode algorithm) + */ + bool enable422; + /* Picture Width */ + u16 pic_width; + /* Picture Height */ + u16 pic_height; + /* Offset to bits/group used by RC to determine QP adjustment */ + u8 rc_tgt_offset_high; + /* Offset to bits/group used by RC to determine QP adjustment */ + u8 rc_tgt_offset_low; + /* Bits/pixel target << 4 (ie., 4 fractional bits) */ + u16 bits_per_pixel; + /* + * Factor to determine if an edge is present based + * on the bits produced + */ + u8 rc_edge_factor; + /* Slow down incrementing once the range reaches this value */ + u8 rc_quant_incr_limit1; + /* Slow down incrementing once the range reaches this value */ + u8 rc_quant_incr_limit0; + /* Number of pixels to delay the initial transmission */ + u16 initial_xmit_delay; + /* Number of pixels to delay the VLD on the decoder,not including SSM */ + u16 initial_dec_delay; + /* Block prediction enable */ + bool block_pred_enable; + /* Bits/group offset to use for first line of the slice */ + u8 first_line_bpg_offset; + /* Value to use for RC model offset at slice start */ + u16 initial_offset; + /* Thresholds defining each of the buffer ranges */ + u16 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1]; + /* Parameters for each of the RC ranges */ + struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES]; + /* Total size of RC model */ + u16 rc_model_size; + /* Minimum QP where flatness information is sent */ + u8 flatness_min_qp; + /* Maximum QP where flatness information is sent */ + u8 flatness_max_qp; + /* Initial value for scale factor */ + u8 initial_scale_value; + /* Decrement scale factor every scale_decrement_interval groups */ + u16 scale_decrement_interval; + /* Increment scale factor every scale_increment_interval groups */ + u16 scale_increment_interval; + /* Non-first line BPG offset to use */ + u16 nfl_bpg_offset; + /* BPG offset used to enforce slice bit */ + u16 slice_bpg_offset; + /* Final RC linear transformation offset value */ + u16 final_offset; + /* Enable on-off VBR (ie., disable stuffing bits) */ + bool vbr_enable; + /* Mux word size (in bits) for SSM mode */ + u8 mux_word_size; + /* + * The (max) size in bytes of the "chunks" that are + * used in slice multiplexing + */ + u16 slice_chunk_size; + /* Rate Control buffer siz in bits */ + u16 rc_bits; + /* DSC Minor Version */ + u8 dsc_version_minor; + /* DSC Major version */ + u8 dsc_version_major; + /* Native 4:2:2 support */ + bool native_422; + /* Native 4:2:0 support */ + bool native_420; + /* Additional bits/grp for seconnd line of slice for native 4:2:0 */ + u8 second_line_bpg_offset; + /* Num of bits deallocated for each grp that is not in second line of slice */ + u16 nsl_bpg_offset; + /* Offset adj fr second line in Native 4:2:0 mode */ + u16 second_line_offset_adj; +}; + +/** + * struct picture_parameter_set - Represents 128 bytes of Picture Parameter Set + * + * The VESA DSC standard defines picture parameter set (PPS) which display + * stream compression encoders must communicate to decoders. + * The PPS is encapsulated in 128 bytes (PPS 0 through PPS 127). The fields in + * this structure are as per Table 4.1 in Vesa DSC specification v1.1/v1.2. + * The PPS fields that span over more than a byte should be stored in Big Endian + * format. + */ +struct drm_dsc_picture_parameter_set { + /** + * @dsc_version: + * PPS0[3:0] - dsc_version_minor: Contains Minor version of DSC + * PPS0[7:4] - dsc_version_major: Contains major version of DSC + */ + u8 dsc_version; + /** + * @pps_identifier: + * PPS1[7:0] - Application specific identifier that can be + * used to differentiate between different PPS tables. + */ + u8 pps_identifier; + /** + * @pps_reserved: + * PPS2[7:0]- RESERVED Byte + */ + u8 pps_reserved; + /** + * @pps_3: + * PPS3[3:0] - linebuf_depth: Contains linebuffer bit depth used to + * generate the bitstream. (0x0 - 16 bits for DSC 1.2, 0x8 - 8 bits, + * 0xA - 10 bits, 0xB - 11 bits, 0xC - 12 bits, 0xD - 13 bits, + * 0xE - 14 bits for DSC1.2, 0xF - 14 bits for DSC 1.2. + * PPS3[7:4] - bits_per_component: Bits per component for the original + * pixels of the encoded picture. + * 0x0 = 16bpc (allowed only when dsc_version_minor = 0x2) + * 0x8 = 8bpc, 0xA = 10bpc, 0xC = 12bpc, 0xE = 14bpc (also + * allowed only when dsc_minor_version = 0x2) + */ + u8 pps_3; + /** + * @pps_4: + * PPS4[1:0] -These are the most significant 2 bits of + * compressed BPP bits_per_pixel[9:0] syntax element. + * PPS4[2] - vbr_enable: 0 = VBR disabled, 1 = VBR enabled + * PPS4[3] - simple_422: Indicates if decoder drops samples to + * reconstruct the 4:2:2 picture. + * PPS4[4] - Convert_rgb: Indicates if DSC color space conversion is + * active. + * PPS4[5] - blobk_pred_enable: Indicates if BP is used to code any + * groups in picture + * PPS4[7:6] - Reseved bits + */ + u8 pps_4; + /** + * @bits_per_pixel_low: + * PPS5[7:0] - This indicates the lower significant 8 bits of + * the compressed BPP bits_per_pixel[9:0] element. + */ + u8 bits_per_pixel_low; + /** + * @pic_height: + * PPS6[7:0], PPS7[7:0] -pic_height: Specifies the number of pixel rows + * within the raster. + */ + __be16 pic_height; + /** + * @pic_width: + * PPS8[7:0], PPS9[7:0] - pic_width: Number of pixel columns within + * the raster. + */ + __be16 pic_width; + /** + * @slice_height: + * PPS10[7:0], PPS11[7:0] - Slice height in units of pixels. + */ + __be16 slice_height; + /** + * @slice_width: + * PPS12[7:0], PPS13[7:0] - Slice width in terms of pixels. + */ + __be16 slice_width; + /** + * @chunk_size: + * PPS14[7:0], PPS15[7:0] - Size in units of bytes of the chunks + * that are used for slice multiplexing. + */ + __be16 chunk_size; + /** + * @initial_xmit_delay_high: + * PPS16[1:0] - Most Significant two bits of initial transmission delay. + * It specifies the number of pixel times that the encoder waits before + * transmitting data from its rate buffer. + * PPS16[7:2] - Reserved + */ + u8 initial_xmit_delay_high; + /** + * @initial_xmit_delay_low: + * PPS17[7:0] - Least significant 8 bits of initial transmission delay. + */ + u8 initial_xmit_delay_low; + /** + * @initial_dec_delay: + * + * PPS18[7:0], PPS19[7:0] - Initial decoding delay which is the number + * of pixel times that the decoder accumulates data in its rate buffer + * before starting to decode and output pixels. + */ + __be16 initial_dec_delay; + /** + * @pps20_reserved: + * + * PPS20[7:0] - Reserved + */ + u8 pps20_reserved; + /** + * @initial_scale_value: + * PPS21[5:0] - Initial rcXformScale factor used at beginning + * of a slice. + * PPS21[7:6] - Reserved + */ + u8 initial_scale_value; + /** + * @scale_increment_interval: + * PPS22[7:0], PPS23[7:0] - Number of group times between incrementing + * the rcXformScale factor at end of a slice. + */ + __be16 scale_increment_interval; + /** + * @scale_decrement_interval_high: + * PPS24[3:0] - Higher 4 bits indicating number of group times between + * decrementing the rcXformScale factor at beginning of a slice. + * PPS24[7:4] - Reserved + */ + u8 scale_decrement_interval_high; + /** + * @scale_decrement_interval_low: + * PPS25[7:0] - Lower 8 bits of scale decrement interval + */ + u8 scale_decrement_interval_low; + /** + * @pps26_reserved: + * PPS26[7:0] + */ + u8 pps26_reserved; + /** + * @first_line_bpg_offset: + * PPS27[4:0] - Number of additional bits that are allocated + * for each group on first line of a slice. + * PPS27[7:5] - Reserved + */ + u8 first_line_bpg_offset; + /** + * @nfl_bpg_offset: + * PPS28[7:0], PPS29[7:0] - Number of bits including frac bits + * deallocated for each group for groups after the first line of slice. + */ + __be16 nfl_bpg_offset; + /** + * @slice_bpg_offset: + * PPS30, PPS31[7:0] - Number of bits that are deallocated for each + * group to enforce the slice constraint. + */ + __be16 slice_bpg_offset; + /** + * @initial_offset: + * PPS32,33[7:0] - Initial value for rcXformOffset + */ + __be16 initial_offset; + /** + * @final_offset: + * PPS34,35[7:0] - Maximum end-of-slice value for rcXformOffset + */ + __be16 final_offset; + /** + * @flatness_min_qp: + * PPS36[4:0] - Minimum QP at which flatness is signaled and + * flatness QP adjustment is made. + * PPS36[7:5] - Reserved + */ + u8 flatness_min_qp; + /** + * @flatness_max_qp: + * PPS37[4:0] - Max QP at which flatness is signalled and + * the flatness adjustment is made. + * PPS37[7:5] - Reserved + */ + u8 flatness_max_qp; + /** + * @rc_model_size: + * PPS38,39[7:0] - Number of bits within RC Model. + */ + __be16 rc_model_size; + /** + * @rc_edge_factor: + * PPS40[3:0] - Ratio of current activity vs, previous + * activity to determine presence of edge. + * PPS40[7:4] - Reserved + */ + u8 rc_edge_factor; + /** + * @rc_quant_incr_limit0: + * PPS41[4:0] - QP threshold used in short term RC + * PPS41[7:5] - Reserved + */ + u8 rc_quant_incr_limit0; + /** + * @rc_quant_incr_limit1: + * PPS42[4:0] - QP threshold used in short term RC + * PPS42[7:5] - Reserved + */ + u8 rc_quant_incr_limit1; + /** + * @rc_tgt_offset: + * PPS43[3:0] - Lower end of the variability range around the target + * bits per group that is allowed by short term RC. + * PPS43[7:4]- Upper end of the variability range around the target + * bits per group that i allowed by short term rc. + */ + u8 rc_tgt_offset; + /** + * @rc_buf_thresh: + * PPS44[7:0] - PPS57[7:0] - Specifies the thresholds in RC model for + * the 15 ranges defined by 14 thresholds. + */ + u8 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1]; + /** + * @rc_range_parameters: + * PPS58[7:0] - PPS87[7:0] + * Parameters that correspond to each of the 15 ranges. + */ + __be16 rc_range_parameters[DSC_NUM_BUF_RANGES]; + /** + * @native_422_420: + * PPS88[0] - 0 = Native 4:2:2 not used + * 1 = Native 4:2:2 used + * PPS88[1] - 0 = Native 4:2:0 not use + * 1 = Native 4:2:0 used + * PPS88[7:2] - Reserved 6 bits + */ + u8 native_422_420; + /** + * @second_line_bpg_offset: + * PPS89[4:0] - Additional bits/group budget for the + * second line of a slice in Native 4:2:0 mode. + * Set to 0 if DSC minor version is 1 or native420 is 0. + * PPS89[7:5] - Reserved + */ + u8 second_line_bpg_offset; + /** + * @nsl_bpg_offset: + * PPS90[7:0], PPS91[7:0] - Number of bits that are deallocated + * for each group that is not in the second line of a slice. + */ + __be16 nsl_bpg_offset; + /** + * @second_line_offset_adj: + * PPS92[7:0], PPS93[7:0] - Used as offset adjustment for the second + * line in Native 4:2:0 mode. + */ + __be16 second_line_offset_adj; + /** + * @pps_long_94_reserved: + * PPS 94, 95, 96, 97 - Reserved + */ + u32 pps_long_94_reserved; + /** + * @pps_long_98_reserved: + * PPS 98, 99, 100, 101 - Reserved + */ + u32 pps_long_98_reserved; + /** + * @pps_long_102_reserved: + * PPS 102, 103, 104, 105 - Reserved + */ + u32 pps_long_102_reserved; + /** + * @pps_long_106_reserved: + * PPS 106, 107, 108, 109 - reserved + */ + u32 pps_long_106_reserved; + /** + * @pps_long_110_reserved: + * PPS 110, 111, 112, 113 - reserved + */ + u32 pps_long_110_reserved; + /** + * @pps_long_114_reserved: + * PPS 114 - 117 - reserved + */ + u32 pps_long_114_reserved; + /** + * @pps_long_118_reserved: + * PPS 118 - 121 - reserved + */ + u32 pps_long_118_reserved; + /** + * @pps_long_122_reserved: + * PPS 122- 125 - reserved + */ + u32 pps_long_122_reserved; + /** + * @pps_short_126_reserved: + * PPS 126, 127 - reserved + */ + __be16 pps_short_126_reserved; +} __packed; + +/** + * struct drm_dsc_pps_infoframe - DSC infoframe carrying the Picture Parameter + * Set Metadata + * + * This structure represents the DSC PPS infoframe required to send the Picture + * Parameter Set metadata required before enabling VESA Display Stream + * Compression. This is based on the DP Secondary Data Packet structure and + * comprises of SDP Header as defined in drm_dp_helper.h and PPS payload. + * + * @pps_header: Header for PPS as per DP SDP header format + * @pps_payload: PPS payload fields as per DSC specification Table 4-1 + */ +struct drm_dsc_pps_infoframe { + struct dp_sdp_header pps_header; + struct drm_dsc_picture_parameter_set pps_payload; +} __packed; + +void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp); +void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp, + const struct drm_dsc_config *dsc_cfg); + +#endif /* _DRM_DSC_H_ */ diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h index 4a65f0d155b0..8dbbe1eece1b 100644 --- a/include/drm/drm_fb_cma_helper.h +++ b/include/drm/drm_fb_cma_helper.h @@ -26,8 +26,6 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma); void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma); -void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma, - bool state); struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, unsigned int plane); diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 26485acc51d7..84ac79219e4c 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -164,14 +164,14 @@ struct drm_file { * See also the :ref:`section on primary nodes and authentication * <drm_primary_node>`. */ - unsigned authenticated :1; + bool authenticated; /** * @stereo_allowed: * * True when the client has asked us to expose stereo 3D mode flags. */ - unsigned stereo_allowed :1; + bool stereo_allowed; /** * @universal_planes: @@ -179,10 +179,10 @@ struct drm_file { * True if client understands CRTC primary planes and cursor planes * in the plane list. Automatically set when @atomic is set. */ - unsigned universal_planes:1; + bool universal_planes; /** @atomic: True if client understands atomic properties. */ - unsigned atomic:1; + bool atomic; /** * @aspect_ratio_allowed: @@ -190,14 +190,14 @@ struct drm_file { * True, if client can handle picture aspect ratios, and has requested * to pass this information along with the mode. */ - unsigned aspect_ratio_allowed:1; + bool aspect_ratio_allowed; /** * @writeback_connectors: * * True if client understands writeback connectors */ - unsigned writeback_connectors:1; + bool writeback_connectors; /** * @is_master: @@ -208,7 +208,7 @@ struct drm_file { * See also the :ref:`section on primary nodes and authentication * <drm_primary_node>`. */ - unsigned is_master:1; + bool is_master; /** * @master: diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index 865ef60c17af..bcb389f04618 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -52,25 +52,86 @@ struct drm_mode_fb_cmd2; /** * struct drm_format_info - information about a DRM format - * @format: 4CC format identifier (DRM_FORMAT_*) - * @depth: Color depth (number of bits per pixel excluding padding bits), - * valid for a subset of RGB formats only. This is a legacy field, do not - * use in new code and set to 0 for new formats. - * @num_planes: Number of color planes (1 to 3) - * @cpp: Number of bytes per pixel (per plane) - * @hsub: Horizontal chroma subsampling factor - * @vsub: Vertical chroma subsampling factor - * @has_alpha: Does the format embeds an alpha component? - * @is_yuv: Is it a YUV format? */ struct drm_format_info { + /** @format: 4CC format identifier (DRM_FORMAT_*) */ u32 format; + + /** + * @depth: + * + * Color depth (number of bits per pixel excluding padding bits), + * valid for a subset of RGB formats only. This is a legacy field, do + * not use in new code and set to 0 for new formats. + */ u8 depth; + + /** @num_planes: Number of color planes (1 to 3) */ u8 num_planes; - u8 cpp[3]; + + union { + /** + * @cpp: + * + * Number of bytes per pixel (per plane), this is aliased with + * @char_per_block. It is deprecated in favour of using the + * triplet @char_per_block, @block_w, @block_h for better + * describing the pixel format. + */ + u8 cpp[3]; + + /** + * @char_per_block: + * + * Number of bytes per block (per plane), where blocks are + * defined as a rectangle of pixels which are stored next to + * each other in a byte aligned memory region. Together with + * @block_w and @block_h this is used to properly describe tiles + * in tiled formats or to describe groups of pixels in packed + * formats for which the memory needed for a single pixel is not + * byte aligned. + * + * @cpp has been kept for historical reasons because there are + * a lot of places in drivers where it's used. In drm core for + * generic code paths the preferred way is to use + * @char_per_block, drm_format_info_block_width() and + * drm_format_info_block_height() which allows handling both + * block and non-block formats in the same way. + * + * For formats that are intended to be used only with non-linear + * modifiers both @cpp and @char_per_block must be 0 in the + * generic format table. Drivers could supply accurate + * information from their drm_mode_config.get_format_info hook + * if they want the core to be validating the pitch. + */ + u8 char_per_block[3]; + }; + + /** + * @block_w: + * + * Block width in pixels, this is intended to be accessed through + * drm_format_info_block_width() + */ + u8 block_w[3]; + + /** + * @block_h: + * + * Block height in pixels, this is intended to be accessed through + * drm_format_info_block_height() + */ + u8 block_h[3]; + + /** @hsub: Horizontal chroma subsampling factor */ u8 hsub; + /** @vsub: Vertical chroma subsampling factor */ u8 vsub; + + /** @has_alpha: Does the format embeds an alpha component? */ bool has_alpha; + + /** @is_yuv: Is it a YUV format? */ bool is_yuv; }; @@ -96,6 +157,12 @@ int drm_format_horz_chroma_subsampling(uint32_t format); int drm_format_vert_chroma_subsampling(uint32_t format); int drm_format_plane_width(int width, uint32_t format, int plane); int drm_format_plane_height(int height, uint32_t format, int plane); +unsigned int drm_format_info_block_width(const struct drm_format_info *info, + int plane); +unsigned int drm_format_info_block_height(const struct drm_format_info *info, + int plane); +uint64_t drm_format_info_min_pitch(const struct drm_format_info *info, + int plane, unsigned int buffer_width); const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf); #endif /* __DRM_FOURCC_H__ */ diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index c50502c656e5..c94acedfb08e 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -241,30 +241,6 @@ static inline void drm_framebuffer_put(struct drm_framebuffer *fb) } /** - * drm_framebuffer_reference - acquire a framebuffer reference - * @fb: DRM framebuffer - * - * This is a compatibility alias for drm_framebuffer_get() and should not be - * used by new code. - */ -static inline void drm_framebuffer_reference(struct drm_framebuffer *fb) -{ - drm_framebuffer_get(fb); -} - -/** - * drm_framebuffer_unreference - release a framebuffer reference - * @fb: DRM framebuffer - * - * This is a compatibility alias for drm_framebuffer_put() and should not be - * used by new code. - */ -static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb) -{ - drm_framebuffer_put(fb); -} - -/** * drm_framebuffer_read_refcount - read the framebuffer reference count. * @fb: framebuffer * diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 3583b98a1718..c95727425284 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -38,6 +38,121 @@ #include <drm/drm_vma_manager.h> +struct drm_gem_object; + +/** + * struct drm_gem_object_funcs - GEM object functions + */ +struct drm_gem_object_funcs { + /** + * @free: + * + * Deconstructor for drm_gem_objects. + * + * This callback is mandatory. + */ + void (*free)(struct drm_gem_object *obj); + + /** + * @open: + * + * Called upon GEM handle creation. + * + * This callback is optional. + */ + int (*open)(struct drm_gem_object *obj, struct drm_file *file); + + /** + * @close: + * + * Called upon GEM handle release. + * + * This callback is optional. + */ + void (*close)(struct drm_gem_object *obj, struct drm_file *file); + + /** + * @print_info: + * + * If driver subclasses struct &drm_gem_object, it can implement this + * optional hook for printing additional driver specific info. + * + * drm_printf_indent() should be used in the callback passing it the + * indent argument. + * + * This callback is called from drm_gem_print_info(). + * + * This callback is optional. + */ + void (*print_info)(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); + + /** + * @export: + * + * Export backing buffer as a &dma_buf. + * If this is not set drm_gem_prime_export() is used. + * + * This callback is optional. + */ + struct dma_buf *(*export)(struct drm_gem_object *obj, int flags); + + /** + * @pin: + * + * Pin backing buffer in memory. + * + * This callback is optional. + */ + int (*pin)(struct drm_gem_object *obj); + + /** + * @unpin: + * + * Unpin backing buffer. + * + * This callback is optional. + */ + void (*unpin)(struct drm_gem_object *obj); + + /** + * @get_sg_table: + * + * Returns a Scatter-Gather table representation of the buffer. + * Used when exporting a buffer. + * + * This callback is mandatory if buffer export is supported. + */ + struct sg_table *(*get_sg_table)(struct drm_gem_object *obj); + + /** + * @vmap: + * + * Returns a virtual address for the buffer. + * + * This callback is optional. + */ + void *(*vmap)(struct drm_gem_object *obj); + + /** + * @vunmap: + * + * Releases the the address previously returned by @vmap. + * + * This callback is optional. + */ + void (*vunmap)(struct drm_gem_object *obj, void *vaddr); + + /** + * @vm_ops: + * + * Virtual memory operations used with mmap. + * + * This is optional but necessary for mmap support. + */ + const struct vm_operations_struct *vm_ops; +}; + /** * struct drm_gem_object - GEM buffer object * @@ -146,6 +261,17 @@ struct drm_gem_object { * simply leave it as NULL. */ struct dma_buf_attachment *import_attach; + + /** + * @funcs: + * + * Optional GEM object functions. If this is set, it will be used instead of the + * corresponding &drm_driver GEM callbacks. + * + * New drivers should use this. + * + */ + const struct drm_gem_object_funcs *funcs; }; /** @@ -222,56 +348,6 @@ __drm_gem_object_put(struct drm_gem_object *obj) void drm_gem_object_put_unlocked(struct drm_gem_object *obj); void drm_gem_object_put(struct drm_gem_object *obj); -/** - * drm_gem_object_reference - acquire a GEM buffer object reference - * @obj: GEM buffer object - * - * This is a compatibility alias for drm_gem_object_get() and should not be - * used by new code. - */ -static inline void drm_gem_object_reference(struct drm_gem_object *obj) -{ - drm_gem_object_get(obj); -} - -/** - * __drm_gem_object_unreference - raw function to release a GEM buffer object - * reference - * @obj: GEM buffer object - * - * This is a compatibility alias for __drm_gem_object_put() and should not be - * used by new code. - */ -static inline void __drm_gem_object_unreference(struct drm_gem_object *obj) -{ - __drm_gem_object_put(obj); -} - -/** - * drm_gem_object_unreference_unlocked - release a GEM buffer object reference - * @obj: GEM buffer object - * - * This is a compatibility alias for drm_gem_object_put_unlocked() and should - * not be used by new code. - */ -static inline void -drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) -{ - drm_gem_object_put_unlocked(obj); -} - -/** - * drm_gem_object_unreference - release a GEM buffer object reference - * @obj: GEM buffer object - * - * This is a compatibility alias for drm_gem_object_put() and should not be - * used by new code. - */ -static inline void drm_gem_object_unreference(struct drm_gem_object *obj) -{ - drm_gem_object_put(obj); -} - int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep); @@ -293,4 +369,9 @@ int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, uint32_t handle); +int drm_gem_pin(struct drm_gem_object *obj); +void drm_gem_unpin(struct drm_gem_object *obj); +void *drm_gem_vmap(struct drm_gem_object *obj); +void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr); + #endif /* __DRM_GEM_H__ */ diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 19777145cf8e..07c504940ba1 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -103,4 +103,28 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj); void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +struct drm_gem_object * +drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size); + +/** + * DRM_GEM_CMA_VMAP_DRIVER_OPS - CMA GEM driver operations ensuring a virtual + * address on the buffer + * + * This macro provides a shortcut for setting the default GEM operations in the + * &drm_driver structure for drivers that need the virtual address also on + * imported buffers. + */ +#define DRM_GEM_CMA_VMAP_DRIVER_OPS \ + .gem_create_object = drm_cma_gem_create_object_default_funcs, \ + .dumb_create = drm_gem_cma_dumb_create, \ + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \ + .gem_prime_mmap = drm_gem_prime_mmap + +struct drm_gem_object * +drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + #endif /* __DRM_GEM_CMA_HELPER_H__ */ diff --git a/include/drm/drm_global.h b/include/drm/drm_global.h deleted file mode 100644 index 3a830602a2e4..000000000000 --- a/include/drm/drm_global.h +++ /dev/null @@ -1,53 +0,0 @@ -/************************************************************************** - * - * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ -/* - * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> - */ - -#ifndef _DRM_GLOBAL_H_ -#define _DRM_GLOBAL_H_ -enum drm_global_types { - DRM_GLOBAL_TTM_MEM = 0, - DRM_GLOBAL_TTM_BO, - DRM_GLOBAL_TTM_OBJECT, - DRM_GLOBAL_NUM -}; - -struct drm_global_reference { - enum drm_global_types global_type; - size_t size; - void *object; - int (*init) (struct drm_global_reference *); - void (*release) (struct drm_global_reference *); -}; - -void drm_global_init(void); -void drm_global_release(void); -int drm_global_item_ref(struct drm_global_reference *ref); -void drm_global_item_unref(struct drm_global_reference *ref); - -#endif diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h index 98e63d870139..a6de09c5e47f 100644 --- a/include/drm/drm_hdcp.h +++ b/include/drm/drm_hdcp.h @@ -38,4 +38,216 @@ #define DRM_HDCP_DDC_BSTATUS 0x41 #define DRM_HDCP_DDC_KSV_FIFO 0x43 +#define DRM_HDCP_1_4_SRM_ID 0x8 +#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3 +#define DRM_HDCP_1_4_DCP_SIG_SIZE 40 + +/* Protocol message definition for HDCP2.2 specification */ +/* + * Protected content streams are classified into 2 types: + * - Type0: Can be transmitted with HDCP 1.4+ + * - Type1: Can be transmitted with HDCP 2.2+ + */ +#define HDCP_STREAM_TYPE0 0x00 +#define HDCP_STREAM_TYPE1 0x01 + +/* HDCP2.2 Msg IDs */ +#define HDCP_2_2_NULL_MSG 1 +#define HDCP_2_2_AKE_INIT 2 +#define HDCP_2_2_AKE_SEND_CERT 3 +#define HDCP_2_2_AKE_NO_STORED_KM 4 +#define HDCP_2_2_AKE_STORED_KM 5 +#define HDCP_2_2_AKE_SEND_HPRIME 7 +#define HDCP_2_2_AKE_SEND_PAIRING_INFO 8 +#define HDCP_2_2_LC_INIT 9 +#define HDCP_2_2_LC_SEND_LPRIME 10 +#define HDCP_2_2_SKE_SEND_EKS 11 +#define HDCP_2_2_REP_SEND_RECVID_LIST 12 +#define HDCP_2_2_REP_SEND_ACK 15 +#define HDCP_2_2_REP_STREAM_MANAGE 16 +#define HDCP_2_2_REP_STREAM_READY 17 +#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 + +#define HDCP_2_2_RTX_LEN 8 +#define HDCP_2_2_RRX_LEN 8 + +#define HDCP_2_2_K_PUB_RX_MOD_N_LEN 128 +#define HDCP_2_2_K_PUB_RX_EXP_E_LEN 3 +#define HDCP_2_2_K_PUB_RX_LEN (HDCP_2_2_K_PUB_RX_MOD_N_LEN + \ + HDCP_2_2_K_PUB_RX_EXP_E_LEN) + +#define HDCP_2_2_DCP_LLC_SIG_LEN 384 + +#define HDCP_2_2_E_KPUB_KM_LEN 128 +#define HDCP_2_2_E_KH_KM_M_LEN (16 + 16) +#define HDCP_2_2_H_PRIME_LEN 32 +#define HDCP_2_2_E_KH_KM_LEN 16 +#define HDCP_2_2_RN_LEN 8 +#define HDCP_2_2_L_PRIME_LEN 32 +#define HDCP_2_2_E_DKEY_KS_LEN 16 +#define HDCP_2_2_RIV_LEN 8 +#define HDCP_2_2_SEQ_NUM_LEN 3 +#define HDCP_2_2_V_PRIME_HALF_LEN (HDCP_2_2_L_PRIME_LEN / 2) +#define HDCP_2_2_RECEIVER_ID_LEN DRM_HDCP_KSV_LEN +#define HDCP_2_2_MAX_DEVICE_COUNT 31 +#define HDCP_2_2_RECEIVER_IDS_MAX_LEN (HDCP_2_2_RECEIVER_ID_LEN * \ + HDCP_2_2_MAX_DEVICE_COUNT) +#define HDCP_2_2_MPRIME_LEN 32 + +/* Following Macros take a byte at a time for bit(s) masking */ +/* + * TODO: This has to be changed for DP MST, as multiple stream on + * same port is possible. + * For HDCP2.2 on HDMI and DP SST this value is always 1. + */ +#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 1 +#define HDCP_2_2_TXCAP_MASK_LEN 2 +#define HDCP_2_2_RXCAPS_LEN 3 +#define HDCP_2_2_RX_REPEATER(x) ((x) & BIT(0)) +#define HDCP_2_2_DP_HDCP_CAPABLE(x) ((x) & BIT(1)) +#define HDCP_2_2_RXINFO_LEN 2 + +/* HDCP1.x compliant device in downstream */ +#define HDCP_2_2_HDCP1_DEVICE_CONNECTED(x) ((x) & BIT(0)) + +/* HDCP2.0 Compliant repeater in downstream */ +#define HDCP_2_2_HDCP_2_0_REP_CONNECTED(x) ((x) & BIT(1)) +#define HDCP_2_2_MAX_CASCADE_EXCEEDED(x) ((x) & BIT(2)) +#define HDCP_2_2_MAX_DEVS_EXCEEDED(x) ((x) & BIT(3)) +#define HDCP_2_2_DEV_COUNT_LO(x) (((x) & (0xF << 4)) >> 4) +#define HDCP_2_2_DEV_COUNT_HI(x) ((x) & BIT(0)) +#define HDCP_2_2_DEPTH(x) (((x) & (0x7 << 1)) >> 1) + +struct hdcp2_cert_rx { + u8 receiver_id[HDCP_2_2_RECEIVER_ID_LEN]; + u8 kpub_rx[HDCP_2_2_K_PUB_RX_LEN]; + u8 reserved[2]; + u8 dcp_signature[HDCP_2_2_DCP_LLC_SIG_LEN]; +} __packed; + +struct hdcp2_streamid_type { + u8 stream_id; + u8 stream_type; +} __packed; + +/* + * The TxCaps field specified in the HDCP HDMI, DP specs + * This field is big endian as specified in the errata. + */ +struct hdcp2_tx_caps { + /* Transmitter must set this to 0x2 */ + u8 version; + + /* Reserved for HDCP and DP Spec. Read as Zero */ + u8 tx_cap_mask[HDCP_2_2_TXCAP_MASK_LEN]; +} __packed; + +/* Main structures for HDCP2.2 protocol communication */ +struct hdcp2_ake_init { + u8 msg_id; + u8 r_tx[HDCP_2_2_RTX_LEN]; + struct hdcp2_tx_caps tx_caps; +} __packed; + +struct hdcp2_ake_send_cert { + u8 msg_id; + struct hdcp2_cert_rx cert_rx; + u8 r_rx[HDCP_2_2_RRX_LEN]; + u8 rx_caps[HDCP_2_2_RXCAPS_LEN]; +} __packed; + +struct hdcp2_ake_no_stored_km { + u8 msg_id; + u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN]; +} __packed; + +struct hdcp2_ake_stored_km { + u8 msg_id; + u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN]; +} __packed; + +struct hdcp2_ake_send_hprime { + u8 msg_id; + u8 h_prime[HDCP_2_2_H_PRIME_LEN]; +} __packed; + +struct hdcp2_ake_send_pairing_info { + u8 msg_id; + u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN]; +} __packed; + +struct hdcp2_lc_init { + u8 msg_id; + u8 r_n[HDCP_2_2_RN_LEN]; +} __packed; + +struct hdcp2_lc_send_lprime { + u8 msg_id; + u8 l_prime[HDCP_2_2_L_PRIME_LEN]; +} __packed; + +struct hdcp2_ske_send_eks { + u8 msg_id; + u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN]; + u8 riv[HDCP_2_2_RIV_LEN]; +} __packed; + +struct hdcp2_rep_send_receiverid_list { + u8 msg_id; + u8 rx_info[HDCP_2_2_RXINFO_LEN]; + u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN]; + u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN]; + u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN]; +} __packed; + +struct hdcp2_rep_send_ack { + u8 msg_id; + u8 v[HDCP_2_2_V_PRIME_HALF_LEN]; +} __packed; + +struct hdcp2_rep_stream_manage { + u8 msg_id; + u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN]; + __be16 k; + struct hdcp2_streamid_type streams[HDCP_2_2_MAX_CONTENT_STREAMS_CNT]; +} __packed; + +struct hdcp2_rep_stream_ready { + u8 msg_id; + u8 m_prime[HDCP_2_2_MPRIME_LEN]; +} __packed; + +struct hdcp2_dp_errata_stream_type { + u8 msg_id; + u8 stream_type; +} __packed; + +/* HDCP2.2 TIMEOUTs in mSec */ +#define HDCP_2_2_CERT_TIMEOUT_MS 100 +#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS 1000 +#define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS 200 +#define HDCP_2_2_PAIRING_TIMEOUT_MS 200 +#define HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS 20 +#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 7 +#define HDCP_2_2_RECVID_LIST_TIMEOUT_MS 3000 +#define HDCP_2_2_STREAM_READY_TIMEOUT_MS 100 + +/* HDMI HDCP2.2 Register Offsets */ +#define HDCP_2_2_HDMI_REG_VER_OFFSET 0x50 +#define HDCP_2_2_HDMI_REG_WR_MSG_OFFSET 0x60 +#define HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET 0x70 +#define HDCP_2_2_HDMI_REG_RD_MSG_OFFSET 0x80 +#define HDCP_2_2_HDMI_REG_DBG_OFFSET 0xC0 + +#define HDCP_2_2_HDMI_SUPPORT_MASK BIT(2) +#define HDCP_2_2_RX_CAPS_VERSION_VAL 0x02 +#define HDCP_2_2_SEQ_NUM_MAX 0xFFFFFF +#define HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN 200 + +/* Below macros take a byte at a time and mask the bit(s) */ +#define HDCP_2_2_HDMI_RXSTATUS_LEN 2 +#define HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(x) ((x) & 0x3) +#define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2)) +#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3)) + #endif diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 4fef19064b0f..491528f48cfb 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -168,6 +168,12 @@ struct mipi_dsi_device_info { * @format: pixel format for video mode * @lanes: number of active data lanes * @mode_flags: DSI operation mode related flags + * @hs_rate: maximum lane frequency for high speed mode in hertz, this should + * be set to the real limits of the hardware, zero is only accepted for + * legacy drivers + * @lp_rate: maximum lane frequency for low power mode in hertz, this should + * be set to the real limits of the hardware, zero is only accepted for + * legacy drivers */ struct mipi_dsi_device { struct mipi_dsi_host *host; @@ -178,6 +184,8 @@ struct mipi_dsi_device { unsigned int lanes; enum mipi_dsi_pixel_format format; unsigned long mode_flags; + unsigned long hs_rate; + unsigned long lp_rate; }; #define MIPI_DSI_MODULE_PREFIX "mipi-dsi:" diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 928e4172a0bb..572274ccbec7 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -52,6 +52,12 @@ struct drm_mode_config_funcs { * requested metadata, but most of that is left to the driver. See * &struct drm_mode_fb_cmd2 for details. * + * To validate the pixel format and modifier drivers can use + * drm_any_plane_has_format() to make sure at least one plane supports + * the requested values. Note that the driver must first determine the + * actual modifier used if the request doesn't have it specified, + * ie. when (@mode_cmd->flags & DRM_MODE_FB_MODIFIERS) == 0. + * * If the parameters are deemed valid and the backing storage objects in * the underlying memory manager all exist, then the driver allocates * a new &drm_framebuffer structure, subclassed to contain @@ -628,6 +634,15 @@ struct drm_mode_config { */ struct drm_property *prop_crtc_id; /** + * @prop_fb_damage_clips: Optional plane property to mark damaged + * regions on the plane in framebuffer coordinates of the framebuffer + * attached to the plane. + * + * The layout of blob data is simply an array of &drm_mode_rect. Unlike + * plane src coordinates, damage clips are not in 16.16 fixed point. + */ + struct drm_property *prop_fb_damage_clips; + /** * @prop_active: Default atomic CRTC property to control the active * state, which is the simplified implementation for DPMS in atomic * drivers. @@ -639,6 +654,11 @@ struct drm_mode_config { * connectors must be of and active must be set to disabled, too. */ struct drm_property *prop_mode_id; + /** + * @prop_vrr_enabled: Default atomic CRTC property to indicate + * whether variable refresh rate should be enabled on the CRTC. + */ + struct drm_property *prop_vrr_enabled; /** * @dvi_i_subconnector_property: Optional DVI-I property to @@ -809,6 +829,13 @@ struct drm_mode_config { /* dumb ioctl parameters */ uint32_t preferred_depth, prefer_shadow; + + /** + * @quirk_addfb_prefer_xbgr_30bpp: + * + * Special hack for legacy ADDFB to keep nouveau userspace happy. Should + * only ever be set by the nouveau kernel driver. + */ bool quirk_addfb_prefer_xbgr_30bpp; /** diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index a685d1bb21f2..a308f2d6496f 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -130,4 +130,63 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); int drm_modeset_lock_all_ctx(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); +/** + * DRM_MODESET_LOCK_ALL_BEGIN - Helper to acquire modeset locks + * @dev: drm device + * @ctx: local modeset acquire context, will be dereferenced + * @flags: DRM_MODESET_ACQUIRE_* flags to pass to drm_modeset_acquire_init() + * @ret: local ret/err/etc variable to track error status + * + * Use these macros to simplify grabbing all modeset locks using a local + * context. This has the advantage of reducing boilerplate, but also properly + * checking return values where appropriate. + * + * Any code run between BEGIN and END will be holding the modeset locks. + * + * This must be paired with DRM_MODESET_LOCK_ALL_END(). We will jump back and + * forth between the labels on deadlock and error conditions. + * + * Drivers can acquire additional modeset locks. If any lock acquisition + * fails, the control flow needs to jump to DRM_MODESET_LOCK_ALL_END() with + * the @ret parameter containing the return value of drm_modeset_lock(). + * + * Returns: + * The only possible value of ret immediately after DRM_MODESET_LOCK_ALL_BEGIN() + * is 0, so no error checking is necessary + */ +#define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \ + drm_modeset_acquire_init(&ctx, flags); \ +modeset_lock_retry: \ + ret = drm_modeset_lock_all_ctx(dev, &ctx); \ + if (ret) \ + goto modeset_lock_fail; + +/** + * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks + * @ctx: local modeset acquire context, will be dereferenced + * @ret: local ret/err/etc variable to track error status + * + * The other side of DRM_MODESET_LOCK_ALL_BEGIN(). It will bounce back to BEGIN + * if ret is -EDEADLK. + * + * It's important that you use the same ret variable for begin and end so + * deadlock conditions are properly handled. + * + * Returns: + * ret will be untouched unless it is -EDEADLK on entry. That means that if you + * successfully acquire the locks, ret will be whatever your code sets it to. If + * there is a deadlock or other failure with acquire or backoff, ret will be set + * to that failure. In both of these cases the code between BEGIN/END will not + * be run, so the failure will reflect the inability to grab the locks. + */ +#define DRM_MODESET_LOCK_ALL_END(ctx, ret) \ +modeset_lock_fail: \ + if (ret == -EDEADLK) { \ + ret = drm_modeset_backoff(&ctx); \ + if (!ret) \ + goto modeset_lock_retry; \ + } \ + drm_modeset_drop_locks(&ctx); \ + drm_modeset_acquire_fini(&ctx); + #endif /* DRM_MODESET_LOCK_H_ */ diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 0a0834bef8bd..6078c700d9ba 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -173,6 +173,16 @@ struct drm_plane_state { */ enum drm_color_range color_range; + /** + * @fb_damage_clips: + * + * Blob representing damage (area in plane framebuffer that changed + * since last plane update) as an array of &drm_mode_rect in framebuffer + * coodinates of the attached framebuffer. Note that unlike plane src, + * damage clips are not in 16.16 fixed point. + */ + struct drm_property_blob *fb_damage_clips; + /** @src: clipped source coordinates of the plane (in 16.16) */ /** @dst: clipped destination coordinates of the plane */ struct drm_rect src, dst; @@ -798,5 +808,39 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev, #define drm_for_each_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) +bool drm_any_plane_has_format(struct drm_device *dev, + u32 format, u64 modifier); +/** + * drm_plane_get_damage_clips_count - Returns damage clips count. + * @state: Plane state. + * + * Simple helper to get the number of &drm_mode_rect clips set by user-space + * during plane update. + * + * Return: Number of clips in plane fb_damage_clips blob property. + */ +static inline unsigned int +drm_plane_get_damage_clips_count(const struct drm_plane_state *state) +{ + return (state && state->fb_damage_clips) ? + state->fb_damage_clips->length/sizeof(struct drm_mode_rect) : 0; +} + +/** + * drm_plane_get_damage_clips - Returns damage clips. + * @state: Plane state. + * + * Note that this function returns uapi type &drm_mode_rect. Drivers might + * instead be interested in internal &drm_rect which can be obtained by calling + * drm_helper_get_plane_damage_clips(). + * + * Return: Damage clips in plane fb_damage_clips blob property. + */ +static inline struct drm_mode_rect * +drm_plane_get_damage_clips(const struct drm_plane_state *state) +{ + return (struct drm_mode_rect *)((state && state->fb_damage_clips) ? + state->fb_damage_clips->data : NULL); +} #endif diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h index 26cee2934781..331ebd60b3a3 100644 --- a/include/drm/drm_plane_helper.h +++ b/include/drm/drm_plane_helper.h @@ -38,42 +38,7 @@ */ #define DRM_PLANE_HELPER_NO_SCALING (1<<16) -int drm_plane_helper_check_update(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_rect *src, - struct drm_rect *dest, - unsigned int rotation, - int min_scale, - int max_scale, - bool can_position, - bool can_update_disabled, - bool *visible); -int drm_primary_helper_update(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h, - struct drm_modeset_acquire_ctx *ctx); -int drm_primary_helper_disable(struct drm_plane *plane, - struct drm_modeset_acquire_ctx *ctx); void drm_primary_helper_destroy(struct drm_plane *plane); extern const struct drm_plane_funcs drm_primary_helper_funcs; -int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h, - struct drm_modeset_acquire_ctx *ctx); -int drm_plane_helper_disable(struct drm_plane *plane, - struct drm_modeset_acquire_ctx *ctx); - -/* For use by drm_crtc_helper.c */ -int drm_plane_helper_commit(struct drm_plane *plane, - struct drm_plane_state *plane_state, - struct drm_framebuffer *old_fb); #endif diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h index d716d653b096..b03731a3f079 100644 --- a/include/drm/drm_prime.h +++ b/include/drm/drm_prime.h @@ -70,6 +70,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev, int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); +int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); @@ -93,9 +94,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir); void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf); void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr); -void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num); -void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, - void *addr); int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma); int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h index 5b9efff35d6d..4a0a80d658c7 100644 --- a/include/drm/drm_property.h +++ b/include/drm/drm_property.h @@ -153,7 +153,8 @@ struct drm_property { * userspace. The kernel is allowed to update the value of these * properties. This is generally used to expose probe state to * userspace, e.g. the EDID, or the connector path property on DP - * MST sinks. + * MST sinks. Kernel can update the value of an immutable property + * by calling drm_object_property_set_value(). */ uint32_t flags; diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h index 425432b85a87..b1fe921f8e8f 100644 --- a/include/drm/drm_syncobj.h +++ b/include/drm/drm_syncobj.h @@ -131,10 +131,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj) struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, u32 handle); -void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, u64 point, +void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, struct dma_fence *fence); int drm_syncobj_find_fence(struct drm_file *file_private, - u32 handle, u64 point, + u32 handle, u64 point, u64 flags, struct dma_fence **fence); void drm_syncobj_free(struct kref *kref); int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags, diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h index d25a9603ab57..6ad9630d4f48 100644 --- a/include/drm/drm_vblank.h +++ b/include/drm/drm_vblank.h @@ -95,7 +95,7 @@ struct drm_vblank_crtc { /** * @queue: Wait queue for vblank waiters. */ - wait_queue_head_t queue; /**< VBLANK wait queue */ + wait_queue_head_t queue; /** * @disable_timer: Disable timer for the delayed vblank disabling * hysteresis logic. Vblank disabling is controlled through the @@ -107,7 +107,7 @@ struct drm_vblank_crtc { /** * @seqlock: Protect vblank count and time. */ - seqlock_t seqlock; /* protects vblank count and time */ + seqlock_t seqlock; /** * @count: Current software vblank counter. @@ -123,7 +123,7 @@ struct drm_vblank_crtc { * this refcount reaches 0 can the hardware interrupt be disabled using * @disable_timer. */ - atomic_t refcount; /* number of users of vblank interruptsper crtc */ + atomic_t refcount; /** * @last: Protected by &drm_device.vbl_lock, used for wraparound handling. */ @@ -136,7 +136,7 @@ struct drm_vblank_crtc { * call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly * save and restore the vblank count. */ - unsigned int inmodeset; /* Display driver is setting mode */ + unsigned int inmodeset; /** * @pipe: drm_crtc_index() of the &drm_crtc corresponding to this * structure. diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index d87b268f1781..47e19796c450 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -264,6 +264,7 @@ struct drm_sched_backend_ops { * @hang_limit: once the hangs by a job crosses this limit then it is marked * guilty and it will be considered for scheduling further. * @num_jobs: the number of jobs in queue in the scheduler + * @ready: marks if the underlying HW is ready to work * * One scheduler is implemented for each hardware ring. */ @@ -283,22 +284,26 @@ struct drm_gpu_scheduler { spinlock_t job_list_lock; int hang_limit; atomic_t num_jobs; + bool ready; }; int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, uint32_t hw_submission, unsigned hang_limit, long timeout, const char *name); + void drm_sched_fini(struct drm_gpu_scheduler *sched); int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, void *owner); +void drm_sched_job_cleanup(struct drm_sched_job *job); void drm_sched_wakeup(struct drm_gpu_scheduler *sched); void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *job); void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); bool drm_sched_dependency_optimized(struct dma_fence* fence, struct drm_sched_entity *entity); +void drm_sched_fault(struct drm_gpu_scheduler *sched); void drm_sched_job_kickout(struct drm_sched_job *s_job); void drm_sched_rq_add_entity(struct drm_sched_rq *rq, @@ -326,4 +331,8 @@ struct drm_sched_fence *drm_sched_fence_create( void drm_sched_fence_scheduled(struct drm_sched_fence *fence); void drm_sched_fence_finished(struct drm_sched_fence *fence); +unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); +void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, + unsigned long remaining); + #endif diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index fd965ffbb92e..192667144693 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -365,16 +365,20 @@ INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */ /* AML/KBL Y GT2 */ -#define INTEL_AML_GT2_IDS(info) \ +#define INTEL_AML_KBL_GT2_IDS(info) \ INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \ INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */ +/* AML/CFL Y GT2 */ +#define INTEL_AML_CFL_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x87CA, info) + #define INTEL_KBL_IDS(info) \ INTEL_KBL_GT1_IDS(info), \ INTEL_KBL_GT2_IDS(info), \ INTEL_KBL_GT3_IDS(info), \ INTEL_KBL_GT4_IDS(info), \ - INTEL_AML_GT2_IDS(info) + INTEL_AML_KBL_GT2_IDS(info) /* CFL S */ #define INTEL_CFL_S_GT1_IDS(info) \ @@ -407,17 +411,17 @@ /* WHL/CFL U GT1 */ #define INTEL_WHL_U_GT1_IDS(info) \ - INTEL_VGA_DEVICE(0x3EA1, info) + INTEL_VGA_DEVICE(0x3EA1, info), \ + INTEL_VGA_DEVICE(0x3EA4, info) /* WHL/CFL U GT2 */ #define INTEL_WHL_U_GT2_IDS(info) \ - INTEL_VGA_DEVICE(0x3EA0, info) + INTEL_VGA_DEVICE(0x3EA0, info), \ + INTEL_VGA_DEVICE(0x3EA3, info) /* WHL/CFL U GT3 */ #define INTEL_WHL_U_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x3EA2, info), \ - INTEL_VGA_DEVICE(0x3EA3, info), \ - INTEL_VGA_DEVICE(0x3EA4, info) + INTEL_VGA_DEVICE(0x3EA2, info) #define INTEL_CFL_IDS(info) \ INTEL_CFL_S_GT1_IDS(info), \ @@ -427,7 +431,8 @@ INTEL_CFL_U_GT3_IDS(info), \ INTEL_WHL_U_GT1_IDS(info), \ INTEL_WHL_U_GT2_IDS(info), \ - INTEL_WHL_U_GT3_IDS(info) + INTEL_WHL_U_GT3_IDS(info), \ + INTEL_AML_CFL_GT2_IDS(info) /* CNL */ #define INTEL_CNL_IDS(info) \ diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h index fe9827d0ca8a..448aa5ea4722 100644 --- a/include/drm/tinydrm/tinydrm.h +++ b/include/drm/tinydrm/tinydrm.h @@ -10,10 +10,15 @@ #ifndef __LINUX_TINYDRM_H #define __LINUX_TINYDRM_H -#include <drm/drm_gem_cma_helper.h> -#include <drm/drm_fb_cma_helper.h> +#include <linux/mutex.h> #include <drm/drm_simple_kms_helper.h> +struct drm_clip_rect; +struct drm_driver; +struct drm_file; +struct drm_framebuffer; +struct drm_framebuffer_funcs; + /** * struct tinydrm_device - tinydrm device */ @@ -54,27 +59,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe) } /** - * TINYDRM_GEM_DRIVER_OPS - default tinydrm gem operations - * - * This macro provides a shortcut for setting the tinydrm GEM operations in - * the &drm_driver structure. - */ -#define TINYDRM_GEM_DRIVER_OPS \ - .gem_free_object_unlocked = tinydrm_gem_cma_free_object, \ - .gem_print_info = drm_gem_cma_print_info, \ - .gem_vm_ops = &drm_gem_cma_vm_ops, \ - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ - .gem_prime_import = drm_gem_prime_import, \ - .gem_prime_export = drm_gem_prime_export, \ - .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, \ - .gem_prime_import_sg_table = tinydrm_gem_cma_prime_import_sg_table, \ - .gem_prime_vmap = drm_gem_cma_prime_vmap, \ - .gem_prime_vunmap = drm_gem_cma_prime_vunmap, \ - .gem_prime_mmap = drm_gem_cma_prime_mmap, \ - .dumb_create = drm_gem_cma_dumb_create - -/** * TINYDRM_MODE - tinydrm display mode * @hd: Horizontal resolution, width * @vd: Vertical resolution, height @@ -97,11 +81,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe) .type = DRM_MODE_TYPE_DRIVER, \ .clock = 1 /* pass validation */ -void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj); -struct drm_gem_object * -tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm, - struct dma_buf_attachment *attach, - struct sg_table *sgt); int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev, const struct drm_framebuffer_funcs *fb_funcs, struct drm_driver *driver); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e4fee8e02559..1021106438b2 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -31,7 +31,6 @@ #define _TTM_BO_DRIVER_H_ #include <drm/drm_mm.h> -#include <drm/drm_global.h> #include <drm/drm_vma_manager.h> #include <linux/workqueue.h> #include <linux/fs.h> @@ -385,15 +384,6 @@ struct ttm_bo_driver { }; /** - * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global. - */ - -struct ttm_bo_global_ref { - struct drm_global_reference ref; - struct ttm_mem_global *mem_glob; -}; - -/** * struct ttm_bo_global - Buffer object driver global data. * * @mem_glob: Pointer to a struct ttm_mem_global object for accounting. @@ -407,7 +397,7 @@ struct ttm_bo_global_ref { * @swap_lru: Lru list of buffer objects used for swapping. */ -struct ttm_bo_global { +extern struct ttm_bo_global { /** * Constant after init. @@ -416,12 +406,12 @@ struct ttm_bo_global { struct kobject kobj; struct ttm_mem_global *mem_glob; struct page *dummy_read_page; - struct mutex device_list_mutex; spinlock_t lru_lock; /** - * Protected by device_list_mutex. + * Protected by ttm_global_mutex. */ + unsigned int use_count; struct list_head device_list; /** @@ -433,7 +423,7 @@ struct ttm_bo_global { * Internal protection. */ atomic_t bo_count; -}; +} ttm_bo_glob; #define TTM_NUM_MEM_TYPES 8 @@ -578,9 +568,6 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); -void ttm_bo_global_release(struct drm_global_reference *ref); -int ttm_bo_global_init(struct drm_global_reference *ref); - int ttm_bo_device_release(struct ttm_bo_device *bdev); /** @@ -598,7 +585,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev); * Returns: * !0: Failure. */ -int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob, +int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_driver *driver, struct address_space *mapping, uint64_t file_page_offset, bool need_dma32); diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index b0fdd1980034..621615fa7728 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -40,13 +40,13 @@ * * @head: list head for thread-private list. * @bo: refcounted buffer object pointer. - * @shared: should the fence be added shared? + * @num_shared: How many shared fences we want to add. */ struct ttm_validate_buffer { struct list_head head; struct ttm_buffer_object *bo; - bool shared; + unsigned int num_shared; }; /** diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index 737b5fed8003..3ff48a0a2d7b 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h @@ -63,7 +63,7 @@ #define TTM_MEM_MAX_ZONES 2 struct ttm_mem_zone; -struct ttm_mem_global { +extern struct ttm_mem_global { struct kobject kobj; struct ttm_bo_global *bo_glob; struct workqueue_struct *swap_queue; @@ -78,7 +78,7 @@ struct ttm_mem_global { #else struct ttm_mem_zone *zone_dma32; #endif -}; +} ttm_mem_glob; extern int ttm_mem_global_init(struct ttm_mem_global *glob); extern void ttm_mem_global_release(struct ttm_mem_global *glob); diff --git a/include/dt-bindings/clock/bcm2835-aux.h b/include/dt-bindings/clock/bcm2835-aux.h index d91156e2658d..bb79de383a3b 100644 --- a/include/dt-bindings/clock/bcm2835-aux.h +++ b/include/dt-bindings/clock/bcm2835-aux.h @@ -1,14 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2015 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #define BCM2835_AUX_CLOCK_UART 0 diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h index a0c812b0fa39..2cec01f96897 100644 --- a/include/dt-bindings/clock/bcm2835.h +++ b/include/dt-bindings/clock/bcm2835.h @@ -1,14 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2015 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #define BCM2835_PLLA 0 diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h index 3979d48c025f..db0763e96173 100644 --- a/include/dt-bindings/clock/gxbb-clkc.h +++ b/include/dt-bindings/clock/gxbb-clkc.h @@ -128,5 +128,23 @@ #define CLKID_VDEC_1 153 #define CLKID_VDEC_HEVC 156 #define CLKID_GEN_CLK 159 +#define CLKID_VID_PLL 166 +#define CLKID_VCLK 175 +#define CLKID_VCLK2 176 +#define CLKID_VCLK_DIV1 185 +#define CLKID_VCLK_DIV2 186 +#define CLKID_VCLK_DIV4 187 +#define CLKID_VCLK_DIV6 188 +#define CLKID_VCLK_DIV12 189 +#define CLKID_VCLK2_DIV1 190 +#define CLKID_VCLK2_DIV2 191 +#define CLKID_VCLK2_DIV4 192 +#define CLKID_VCLK2_DIV6 193 +#define CLKID_VCLK2_DIV12 194 +#define CLKID_CTS_ENCI 199 +#define CLKID_CTS_ENCP 200 +#define CLKID_CTS_VDAC 201 +#define CLKID_HDMI_TX 202 +#define CLKID_HDMI 205 #endif /* __GXBB_CLKC_H */ diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h index 87b068f4a998..b3cef297d5df 100644 --- a/include/dt-bindings/clock/imx6qdl-clock.h +++ b/include/dt-bindings/clock/imx6qdl-clock.h @@ -274,6 +274,8 @@ #define IMX6QDL_CLK_EPIT1 261 #define IMX6QDL_CLK_EPIT2 262 #define IMX6QDL_CLK_MMDC_P0_IPG 263 -#define IMX6QDL_CLK_END 264 +#define IMX6QDL_CLK_DCIC1 264 +#define IMX6QDL_CLK_DCIC2 265 +#define IMX6QDL_CLK_END 266 #endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */ diff --git a/include/dt-bindings/clock/imx7ulp-clock.h b/include/dt-bindings/clock/imx7ulp-clock.h new file mode 100644 index 000000000000..21d872e69cb1 --- /dev/null +++ b/include/dt-bindings/clock/imx7ulp-clock.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017~2018 NXP + * + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX7ULP_H +#define __DT_BINDINGS_CLOCK_IMX7ULP_H + +/* SCG1 */ + +#define IMX7ULP_CLK_DUMMY 0 +#define IMX7ULP_CLK_ROSC 1 +#define IMX7ULP_CLK_SOSC 2 +#define IMX7ULP_CLK_FIRC 3 +#define IMX7ULP_CLK_SPLL_PRE_SEL 4 +#define IMX7ULP_CLK_SPLL_PRE_DIV 5 +#define IMX7ULP_CLK_SPLL 6 +#define IMX7ULP_CLK_SPLL_POST_DIV1 7 +#define IMX7ULP_CLK_SPLL_POST_DIV2 8 +#define IMX7ULP_CLK_SPLL_PFD0 9 +#define IMX7ULP_CLK_SPLL_PFD1 10 +#define IMX7ULP_CLK_SPLL_PFD2 11 +#define IMX7ULP_CLK_SPLL_PFD3 12 +#define IMX7ULP_CLK_SPLL_PFD_SEL 13 +#define IMX7ULP_CLK_SPLL_SEL 14 +#define IMX7ULP_CLK_APLL_PRE_SEL 15 +#define IMX7ULP_CLK_APLL_PRE_DIV 16 +#define IMX7ULP_CLK_APLL 17 +#define IMX7ULP_CLK_APLL_POST_DIV1 18 +#define IMX7ULP_CLK_APLL_POST_DIV2 19 +#define IMX7ULP_CLK_APLL_PFD0 20 +#define IMX7ULP_CLK_APLL_PFD1 21 +#define IMX7ULP_CLK_APLL_PFD2 22 +#define IMX7ULP_CLK_APLL_PFD3 23 +#define IMX7ULP_CLK_APLL_PFD_SEL 24 +#define IMX7ULP_CLK_APLL_SEL 25 +#define IMX7ULP_CLK_UPLL 26 +#define IMX7ULP_CLK_SYS_SEL 27 +#define IMX7ULP_CLK_CORE_DIV 28 +#define IMX7ULP_CLK_BUS_DIV 29 +#define IMX7ULP_CLK_PLAT_DIV 30 +#define IMX7ULP_CLK_DDR_SEL 31 +#define IMX7ULP_CLK_DDR_DIV 32 +#define IMX7ULP_CLK_NIC_SEL 33 +#define IMX7ULP_CLK_NIC0_DIV 34 +#define IMX7ULP_CLK_GPU_DIV 35 +#define IMX7ULP_CLK_NIC1_DIV 36 +#define IMX7ULP_CLK_NIC1_BUS_DIV 37 +#define IMX7ULP_CLK_NIC1_EXT_DIV 38 +#define IMX7ULP_CLK_MIPI_PLL 39 +#define IMX7ULP_CLK_SIRC 40 +#define IMX7ULP_CLK_SOSC_BUS_CLK 41 +#define IMX7ULP_CLK_FIRC_BUS_CLK 42 +#define IMX7ULP_CLK_SPLL_BUS_CLK 43 +#define IMX7ULP_CLK_HSRUN_SYS_SEL 44 +#define IMX7ULP_CLK_HSRUN_CORE_DIV 45 + +#define IMX7ULP_CLK_SCG1_END 46 + +/* PCC2 */ +#define IMX7ULP_CLK_DMA1 0 +#define IMX7ULP_CLK_RGPIO2P1 1 +#define IMX7ULP_CLK_FLEXBUS 2 +#define IMX7ULP_CLK_SEMA42_1 3 +#define IMX7ULP_CLK_DMA_MUX1 4 +#define IMX7ULP_CLK_SNVS 5 +#define IMX7ULP_CLK_CAAM 6 +#define IMX7ULP_CLK_LPTPM4 7 +#define IMX7ULP_CLK_LPTPM5 8 +#define IMX7ULP_CLK_LPIT1 9 +#define IMX7ULP_CLK_LPSPI2 10 +#define IMX7ULP_CLK_LPSPI3 11 +#define IMX7ULP_CLK_LPI2C4 12 +#define IMX7ULP_CLK_LPI2C5 13 +#define IMX7ULP_CLK_LPUART4 14 +#define IMX7ULP_CLK_LPUART5 15 +#define IMX7ULP_CLK_FLEXIO1 16 +#define IMX7ULP_CLK_USB0 17 +#define IMX7ULP_CLK_USB1 18 +#define IMX7ULP_CLK_USB_PHY 19 +#define IMX7ULP_CLK_USB_PL301 20 +#define IMX7ULP_CLK_USDHC0 21 +#define IMX7ULP_CLK_USDHC1 22 +#define IMX7ULP_CLK_WDG1 23 +#define IMX7ULP_CLK_WDG2 24 + +#define IMX7ULP_CLK_PCC2_END 25 + +/* PCC3 */ +#define IMX7ULP_CLK_LPTPM6 0 +#define IMX7ULP_CLK_LPTPM7 1 +#define IMX7ULP_CLK_LPI2C6 2 +#define IMX7ULP_CLK_LPI2C7 3 +#define IMX7ULP_CLK_LPUART6 4 +#define IMX7ULP_CLK_LPUART7 5 +#define IMX7ULP_CLK_VIU 6 +#define IMX7ULP_CLK_DSI 7 +#define IMX7ULP_CLK_LCDIF 8 +#define IMX7ULP_CLK_MMDC 9 +#define IMX7ULP_CLK_PCTLC 10 +#define IMX7ULP_CLK_PCTLD 11 +#define IMX7ULP_CLK_PCTLE 12 +#define IMX7ULP_CLK_PCTLF 13 +#define IMX7ULP_CLK_GPU3D 14 +#define IMX7ULP_CLK_GPU2D 15 + +#define IMX7ULP_CLK_PCC3_END 16 + +/* SMC1 */ +#define IMX7ULP_CLK_ARM 0 + +#define IMX7ULP_CLK_SMC1_END 1 + +#endif /* __DT_BINDINGS_CLOCK_IMX7ULP_H */ diff --git a/include/dt-bindings/clock/imx8-clock.h b/include/dt-bindings/clock/imx8-clock.h new file mode 100644 index 000000000000..4236818e3be5 --- /dev/null +++ b/include/dt-bindings/clock/imx8-clock.h @@ -0,0 +1,289 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2018 NXP + * Dong Aisheng <aisheng.dong@nxp.com> + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX_H +#define __DT_BINDINGS_CLOCK_IMX_H + +/* SCU Clocks */ + +#define IMX_CLK_DUMMY 0 + +/* CPU */ +#define IMX_A35_CLK 1 + +/* LSIO SS */ +#define IMX_LSIO_MEM_CLK 2 +#define IMX_LSIO_BUS_CLK 3 +#define IMX_LSIO_PWM0_CLK 10 +#define IMX_LSIO_PWM1_CLK 11 +#define IMX_LSIO_PWM2_CLK 12 +#define IMX_LSIO_PWM3_CLK 13 +#define IMX_LSIO_PWM4_CLK 14 +#define IMX_LSIO_PWM5_CLK 15 +#define IMX_LSIO_PWM6_CLK 16 +#define IMX_LSIO_PWM7_CLK 17 +#define IMX_LSIO_GPT0_CLK 18 +#define IMX_LSIO_GPT1_CLK 19 +#define IMX_LSIO_GPT2_CLK 20 +#define IMX_LSIO_GPT3_CLK 21 +#define IMX_LSIO_GPT4_CLK 22 +#define IMX_LSIO_FSPI0_CLK 23 +#define IMX_LSIO_FSPI1_CLK 24 + +/* Connectivity SS */ +#define IMX_CONN_AXI_CLK_ROOT 30 +#define IMX_CONN_AHB_CLK_ROOT 31 +#define IMX_CONN_IPG_CLK_ROOT 32 +#define IMX_CONN_SDHC0_CLK 40 +#define IMX_CONN_SDHC1_CLK 41 +#define IMX_CONN_SDHC2_CLK 42 +#define IMX_CONN_ENET0_ROOT_CLK 43 +#define IMX_CONN_ENET0_BYPASS_CLK 44 +#define IMX_CONN_ENET0_RGMII_CLK 45 +#define IMX_CONN_ENET1_ROOT_CLK 46 +#define IMX_CONN_ENET1_BYPASS_CLK 47 +#define IMX_CONN_ENET1_RGMII_CLK 48 +#define IMX_CONN_GPMI_BCH_IO_CLK 49 +#define IMX_CONN_GPMI_BCH_CLK 50 +#define IMX_CONN_USB2_ACLK 51 +#define IMX_CONN_USB2_BUS_CLK 52 +#define IMX_CONN_USB2_LPM_CLK 53 + +/* HSIO SS */ +#define IMX_HSIO_AXI_CLK 60 +#define IMX_HSIO_PER_CLK 61 + +/* Display controller SS */ +#define IMX_DC_AXI_EXT_CLK 70 +#define IMX_DC_AXI_INT_CLK 71 +#define IMX_DC_CFG_CLK 72 +#define IMX_DC0_PLL0_CLK 80 +#define IMX_DC0_PLL1_CLK 81 +#define IMX_DC0_DISP0_CLK 82 +#define IMX_DC0_DISP1_CLK 83 + +/* MIPI-LVDS SS */ +#define IMX_MIPI_IPG_CLK 90 +#define IMX_MIPI0_PIXEL_CLK 100 +#define IMX_MIPI0_BYPASS_CLK 101 +#define IMX_MIPI0_LVDS_PIXEL_CLK 102 +#define IMX_MIPI0_LVDS_BYPASS_CLK 103 +#define IMX_MIPI0_LVDS_PHY_CLK 104 +#define IMX_MIPI0_I2C0_CLK 105 +#define IMX_MIPI0_I2C1_CLK 106 +#define IMX_MIPI0_PWM0_CLK 107 +#define IMX_MIPI1_PIXEL_CLK 108 +#define IMX_MIPI1_BYPASS_CLK 109 +#define IMX_MIPI1_LVDS_PIXEL_CLK 110 +#define IMX_MIPI1_LVDS_BYPASS_CLK 111 +#define IMX_MIPI1_LVDS_PHY_CLK 112 +#define IMX_MIPI1_I2C0_CLK 113 +#define IMX_MIPI1_I2C1_CLK 114 +#define IMX_MIPI1_PWM0_CLK 115 + +/* IMG SS */ +#define IMX_IMG_AXI_CLK 120 +#define IMX_IMG_IPG_CLK 121 +#define IMX_IMG_PXL_CLK 122 + +/* MIPI-CSI SS */ +#define IMX_CSI0_CORE_CLK 130 +#define IMX_CSI0_ESC_CLK 131 +#define IMX_CSI0_PWM0_CLK 132 +#define IMX_CSI0_I2C0_CLK 133 + +/* PARALLER CSI SS */ +#define IMX_PARALLEL_CSI_DPLL_CLK 140 +#define IMX_PARALLEL_CSI_PIXEL_CLK 141 +#define IMX_PARALLEL_CSI_MCLK_CLK 142 + +/* VPU SS */ +#define IMX_VPU_ENC_CLK 150 +#define IMX_VPU_DEC_CLK 151 + +/* GPU SS */ +#define IMX_GPU0_CORE_CLK 160 +#define IMX_GPU0_SHADER_CLK 161 + +/* ADMA SS */ +#define IMX_ADMA_IPG_CLK_ROOT 165 +#define IMX_ADMA_UART0_CLK 170 +#define IMX_ADMA_UART1_CLK 171 +#define IMX_ADMA_UART2_CLK 172 +#define IMX_ADMA_UART3_CLK 173 +#define IMX_ADMA_SPI0_CLK 174 +#define IMX_ADMA_SPI1_CLK 175 +#define IMX_ADMA_SPI2_CLK 176 +#define IMX_ADMA_SPI3_CLK 177 +#define IMX_ADMA_CAN0_CLK 178 +#define IMX_ADMA_CAN1_CLK 179 +#define IMX_ADMA_CAN2_CLK 180 +#define IMX_ADMA_I2C0_CLK 181 +#define IMX_ADMA_I2C1_CLK 182 +#define IMX_ADMA_I2C2_CLK 183 +#define IMX_ADMA_I2C3_CLK 184 +#define IMX_ADMA_FTM0_CLK 185 +#define IMX_ADMA_FTM1_CLK 186 +#define IMX_ADMA_ADC0_CLK 187 +#define IMX_ADMA_PWM_CLK 188 +#define IMX_ADMA_LCD_CLK 189 + +#define IMX_SCU_CLK_END 190 + +/* LPCG clocks */ + +/* LSIO SS LPCG */ +#define IMX_LSIO_LPCG_PWM0_IPG_CLK 0 +#define IMX_LSIO_LPCG_PWM0_IPG_S_CLK 1 +#define IMX_LSIO_LPCG_PWM0_IPG_HF_CLK 2 +#define IMX_LSIO_LPCG_PWM0_IPG_SLV_CLK 3 +#define IMX_LSIO_LPCG_PWM0_IPG_MSTR_CLK 4 +#define IMX_LSIO_LPCG_PWM1_IPG_CLK 5 +#define IMX_LSIO_LPCG_PWM1_IPG_S_CLK 6 +#define IMX_LSIO_LPCG_PWM1_IPG_HF_CLK 7 +#define IMX_LSIO_LPCG_PWM1_IPG_SLV_CLK 8 +#define IMX_LSIO_LPCG_PWM1_IPG_MSTR_CLK 9 +#define IMX_LSIO_LPCG_PWM2_IPG_CLK 10 +#define IMX_LSIO_LPCG_PWM2_IPG_S_CLK 11 +#define IMX_LSIO_LPCG_PWM2_IPG_HF_CLK 12 +#define IMX_LSIO_LPCG_PWM2_IPG_SLV_CLK 13 +#define IMX_LSIO_LPCG_PWM2_IPG_MSTR_CLK 14 +#define IMX_LSIO_LPCG_PWM3_IPG_CLK 15 +#define IMX_LSIO_LPCG_PWM3_IPG_S_CLK 16 +#define IMX_LSIO_LPCG_PWM3_IPG_HF_CLK 17 +#define IMX_LSIO_LPCG_PWM3_IPG_SLV_CLK 18 +#define IMX_LSIO_LPCG_PWM3_IPG_MSTR_CLK 19 +#define IMX_LSIO_LPCG_PWM4_IPG_CLK 20 +#define IMX_LSIO_LPCG_PWM4_IPG_S_CLK 21 +#define IMX_LSIO_LPCG_PWM4_IPG_HF_CLK 22 +#define IMX_LSIO_LPCG_PWM4_IPG_SLV_CLK 23 +#define IMX_LSIO_LPCG_PWM4_IPG_MSTR_CLK 24 +#define IMX_LSIO_LPCG_PWM5_IPG_CLK 25 +#define IMX_LSIO_LPCG_PWM5_IPG_S_CLK 26 +#define IMX_LSIO_LPCG_PWM5_IPG_HF_CLK 27 +#define IMX_LSIO_LPCG_PWM5_IPG_SLV_CLK 28 +#define IMX_LSIO_LPCG_PWM5_IPG_MSTR_CLK 29 +#define IMX_LSIO_LPCG_PWM6_IPG_CLK 30 +#define IMX_LSIO_LPCG_PWM6_IPG_S_CLK 31 +#define IMX_LSIO_LPCG_PWM6_IPG_HF_CLK 32 +#define IMX_LSIO_LPCG_PWM6_IPG_SLV_CLK 33 +#define IMX_LSIO_LPCG_PWM6_IPG_MSTR_CLK 34 +#define IMX_LSIO_LPCG_PWM7_IPG_CLK 35 +#define IMX_LSIO_LPCG_PWM7_IPG_S_CLK 36 +#define IMX_LSIO_LPCG_PWM7_IPG_HF_CLK 37 +#define IMX_LSIO_LPCG_PWM7_IPG_SLV_CLK 38 +#define IMX_LSIO_LPCG_PWM7_IPG_MSTR_CLK 39 +#define IMX_LSIO_LPCG_GPT0_IPG_CLK 40 +#define IMX_LSIO_LPCG_GPT0_IPG_S_CLK 41 +#define IMX_LSIO_LPCG_GPT0_IPG_HF_CLK 42 +#define IMX_LSIO_LPCG_GPT0_IPG_SLV_CLK 43 +#define IMX_LSIO_LPCG_GPT0_IPG_MSTR_CLK 44 +#define IMX_LSIO_LPCG_GPT1_IPG_CLK 45 +#define IMX_LSIO_LPCG_GPT1_IPG_S_CLK 46 +#define IMX_LSIO_LPCG_GPT1_IPG_HF_CLK 47 +#define IMX_LSIO_LPCG_GPT1_IPG_SLV_CLK 48 +#define IMX_LSIO_LPCG_GPT1_IPG_MSTR_CLK 49 +#define IMX_LSIO_LPCG_GPT2_IPG_CLK 50 +#define IMX_LSIO_LPCG_GPT2_IPG_S_CLK 51 +#define IMX_LSIO_LPCG_GPT2_IPG_HF_CLK 52 +#define IMX_LSIO_LPCG_GPT2_IPG_SLV_CLK 53 +#define IMX_LSIO_LPCG_GPT2_IPG_MSTR_CLK 54 +#define IMX_LSIO_LPCG_GPT3_IPG_CLK 55 +#define IMX_LSIO_LPCG_GPT3_IPG_S_CLK 56 +#define IMX_LSIO_LPCG_GPT3_IPG_HF_CLK 57 +#define IMX_LSIO_LPCG_GPT3_IPG_SLV_CLK 58 +#define IMX_LSIO_LPCG_GPT3_IPG_MSTR_CLK 59 +#define IMX_LSIO_LPCG_GPT4_IPG_CLK 60 +#define IMX_LSIO_LPCG_GPT4_IPG_S_CLK 61 +#define IMX_LSIO_LPCG_GPT4_IPG_HF_CLK 62 +#define IMX_LSIO_LPCG_GPT4_IPG_SLV_CLK 63 +#define IMX_LSIO_LPCG_GPT4_IPG_MSTR_CLK 64 +#define IMX_LSIO_LPCG_FSPI0_HCLK 65 +#define IMX_LSIO_LPCG_FSPI0_IPG_CLK 66 +#define IMX_LSIO_LPCG_FSPI0_IPG_S_CLK 67 +#define IMX_LSIO_LPCG_FSPI0_IPG_SFCK 68 +#define IMX_LSIO_LPCG_FSPI1_HCLK 69 +#define IMX_LSIO_LPCG_FSPI1_IPG_CLK 70 +#define IMX_LSIO_LPCG_FSPI1_IPG_S_CLK 71 +#define IMX_LSIO_LPCG_FSPI1_IPG_SFCK 72 + +#define IMX_LSIO_LPCG_CLK_END 73 + +/* Connectivity SS LPCG */ +#define IMX_CONN_LPCG_SDHC0_IPG_CLK 0 +#define IMX_CONN_LPCG_SDHC0_PER_CLK 1 +#define IMX_CONN_LPCG_SDHC0_HCLK 2 +#define IMX_CONN_LPCG_SDHC1_IPG_CLK 3 +#define IMX_CONN_LPCG_SDHC1_PER_CLK 4 +#define IMX_CONN_LPCG_SDHC1_HCLK 5 +#define IMX_CONN_LPCG_SDHC2_IPG_CLK 6 +#define IMX_CONN_LPCG_SDHC2_PER_CLK 7 +#define IMX_CONN_LPCG_SDHC2_HCLK 8 +#define IMX_CONN_LPCG_GPMI_APB_CLK 9 +#define IMX_CONN_LPCG_GPMI_BCH_APB_CLK 10 +#define IMX_CONN_LPCG_GPMI_BCH_IO_CLK 11 +#define IMX_CONN_LPCG_GPMI_BCH_CLK 12 +#define IMX_CONN_LPCG_APBHDMA_CLK 13 +#define IMX_CONN_LPCG_ENET0_ROOT_CLK 14 +#define IMX_CONN_LPCG_ENET0_TX_CLK 15 +#define IMX_CONN_LPCG_ENET0_AHB_CLK 16 +#define IMX_CONN_LPCG_ENET0_IPG_S_CLK 17 +#define IMX_CONN_LPCG_ENET0_IPG_CLK 18 + +#define IMX_CONN_LPCG_ENET1_ROOT_CLK 19 +#define IMX_CONN_LPCG_ENET1_TX_CLK 20 +#define IMX_CONN_LPCG_ENET1_AHB_CLK 21 +#define IMX_CONN_LPCG_ENET1_IPG_S_CLK 22 +#define IMX_CONN_LPCG_ENET1_IPG_CLK 23 + +#define IMX_CONN_LPCG_CLK_END 24 + +/* ADMA SS LPCG */ +#define IMX_ADMA_LPCG_UART0_IPG_CLK 0 +#define IMX_ADMA_LPCG_UART0_BAUD_CLK 1 +#define IMX_ADMA_LPCG_UART1_IPG_CLK 2 +#define IMX_ADMA_LPCG_UART1_BAUD_CLK 3 +#define IMX_ADMA_LPCG_UART2_IPG_CLK 4 +#define IMX_ADMA_LPCG_UART2_BAUD_CLK 5 +#define IMX_ADMA_LPCG_UART3_IPG_CLK 6 +#define IMX_ADMA_LPCG_UART3_BAUD_CLK 7 +#define IMX_ADMA_LPCG_SPI0_IPG_CLK 8 +#define IMX_ADMA_LPCG_SPI1_IPG_CLK 9 +#define IMX_ADMA_LPCG_SPI2_IPG_CLK 10 +#define IMX_ADMA_LPCG_SPI3_IPG_CLK 11 +#define IMX_ADMA_LPCG_SPI0_CLK 12 +#define IMX_ADMA_LPCG_SPI1_CLK 13 +#define IMX_ADMA_LPCG_SPI2_CLK 14 +#define IMX_ADMA_LPCG_SPI3_CLK 15 +#define IMX_ADMA_LPCG_CAN0_IPG_CLK 16 +#define IMX_ADMA_LPCG_CAN0_IPG_PE_CLK 17 +#define IMX_ADMA_LPCG_CAN0_IPG_CHI_CLK 18 +#define IMX_ADMA_LPCG_CAN1_IPG_CLK 19 +#define IMX_ADMA_LPCG_CAN1_IPG_PE_CLK 20 +#define IMX_ADMA_LPCG_CAN1_IPG_CHI_CLK 21 +#define IMX_ADMA_LPCG_CAN2_IPG_CLK 22 +#define IMX_ADMA_LPCG_CAN2_IPG_PE_CLK 23 +#define IMX_ADMA_LPCG_CAN2_IPG_CHI_CLK 24 +#define IMX_ADMA_LPCG_I2C0_CLK 25 +#define IMX_ADMA_LPCG_I2C1_CLK 26 +#define IMX_ADMA_LPCG_I2C2_CLK 27 +#define IMX_ADMA_LPCG_I2C3_CLK 28 +#define IMX_ADMA_LPCG_I2C0_IPG_CLK 29 +#define IMX_ADMA_LPCG_I2C1_IPG_CLK 30 +#define IMX_ADMA_LPCG_I2C2_IPG_CLK 31 +#define IMX_ADMA_LPCG_I2C3_IPG_CLK 32 +#define IMX_ADMA_LPCG_FTM0_CLK 33 +#define IMX_ADMA_LPCG_FTM1_CLK 34 +#define IMX_ADMA_LPCG_FTM0_IPG_CLK 35 +#define IMX_ADMA_LPCG_FTM1_IPG_CLK 36 +#define IMX_ADMA_LPCG_PWM_HI_CLK 37 +#define IMX_ADMA_LPCG_PWM_IPG_CLK 38 +#define IMX_ADMA_LPCG_LCD_PIX_CLK 39 +#define IMX_ADMA_LPCG_LCD_APB_CLK 40 + +#define IMX_ADMA_LPCG_CLK_END 41 + +#endif /* __DT_BINDINGS_CLOCK_IMX_H */ diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h new file mode 100644 index 000000000000..b53be41929be --- /dev/null +++ b/include/dt-bindings/clock/imx8mq-clock.h @@ -0,0 +1,395 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2016 Freescale Semiconductor, Inc. + * Copyright 2017 NXP + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX8MQ_H +#define __DT_BINDINGS_CLOCK_IMX8MQ_H + +#define IMX8MQ_CLK_DUMMY 0 +#define IMX8MQ_CLK_32K 1 +#define IMX8MQ_CLK_25M 2 +#define IMX8MQ_CLK_27M 3 +#define IMX8MQ_CLK_EXT1 4 +#define IMX8MQ_CLK_EXT2 5 +#define IMX8MQ_CLK_EXT3 6 +#define IMX8MQ_CLK_EXT4 7 + +/* ANAMIX PLL clocks */ +/* FRAC PLLs */ +/* ARM PLL */ +#define IMX8MQ_ARM_PLL_REF_SEL 8 +#define IMX8MQ_ARM_PLL_REF_DIV 9 +#define IMX8MQ_ARM_PLL 10 +#define IMX8MQ_ARM_PLL_BYPASS 11 +#define IMX8MQ_ARM_PLL_OUT 12 + +/* GPU PLL */ +#define IMX8MQ_GPU_PLL_REF_SEL 13 +#define IMX8MQ_GPU_PLL_REF_DIV 14 +#define IMX8MQ_GPU_PLL 15 +#define IMX8MQ_GPU_PLL_BYPASS 16 +#define IMX8MQ_GPU_PLL_OUT 17 + +/* VPU PLL */ +#define IMX8MQ_VPU_PLL_REF_SEL 18 +#define IMX8MQ_VPU_PLL_REF_DIV 19 +#define IMX8MQ_VPU_PLL 20 +#define IMX8MQ_VPU_PLL_BYPASS 21 +#define IMX8MQ_VPU_PLL_OUT 22 + +/* AUDIO PLL1 */ +#define IMX8MQ_AUDIO_PLL1_REF_SEL 23 +#define IMX8MQ_AUDIO_PLL1_REF_DIV 24 +#define IMX8MQ_AUDIO_PLL1 25 +#define IMX8MQ_AUDIO_PLL1_BYPASS 26 +#define IMX8MQ_AUDIO_PLL1_OUT 27 + +/* AUDIO PLL2 */ +#define IMX8MQ_AUDIO_PLL2_REF_SEL 28 +#define IMX8MQ_AUDIO_PLL2_REF_DIV 29 +#define IMX8MQ_AUDIO_PLL2 30 +#define IMX8MQ_AUDIO_PLL2_BYPASS 31 +#define IMX8MQ_AUDIO_PLL2_OUT 32 + +/* VIDEO PLL1 */ +#define IMX8MQ_VIDEO_PLL1_REF_SEL 33 +#define IMX8MQ_VIDEO_PLL1_REF_DIV 34 +#define IMX8MQ_VIDEO_PLL1 35 +#define IMX8MQ_VIDEO_PLL1_BYPASS 36 +#define IMX8MQ_VIDEO_PLL1_OUT 37 + +/* SYS1 PLL */ +#define IMX8MQ_SYS1_PLL1_REF_SEL 38 +#define IMX8MQ_SYS1_PLL1_REF_DIV 39 +#define IMX8MQ_SYS1_PLL1 40 +#define IMX8MQ_SYS1_PLL1_OUT 41 +#define IMX8MQ_SYS1_PLL1_OUT_DIV 42 +#define IMX8MQ_SYS1_PLL2 43 +#define IMX8MQ_SYS1_PLL2_DIV 44 +#define IMX8MQ_SYS1_PLL2_OUT 45 + +/* SYS2 PLL */ +#define IMX8MQ_SYS2_PLL1_REF_SEL 46 +#define IMX8MQ_SYS2_PLL1_REF_DIV 47 +#define IMX8MQ_SYS2_PLL1 48 +#define IMX8MQ_SYS2_PLL1_OUT 49 +#define IMX8MQ_SYS2_PLL1_OUT_DIV 50 +#define IMX8MQ_SYS2_PLL2 51 +#define IMX8MQ_SYS2_PLL2_DIV 52 +#define IMX8MQ_SYS2_PLL2_OUT 53 + +/* SYS3 PLL */ +#define IMX8MQ_SYS3_PLL1_REF_SEL 54 +#define IMX8MQ_SYS3_PLL1_REF_DIV 55 +#define IMX8MQ_SYS3_PLL1 56 +#define IMX8MQ_SYS3_PLL1_OUT 57 +#define IMX8MQ_SYS3_PLL1_OUT_DIV 58 +#define IMX8MQ_SYS3_PLL2 59 +#define IMX8MQ_SYS3_PLL2_DIV 60 +#define IMX8MQ_SYS3_PLL2_OUT 61 + +/* DRAM PLL */ +#define IMX8MQ_DRAM_PLL1_REF_SEL 62 +#define IMX8MQ_DRAM_PLL1_REF_DIV 63 +#define IMX8MQ_DRAM_PLL1 64 +#define IMX8MQ_DRAM_PLL1_OUT 65 +#define IMX8MQ_DRAM_PLL1_OUT_DIV 66 +#define IMX8MQ_DRAM_PLL2 67 +#define IMX8MQ_DRAM_PLL2_DIV 68 +#define IMX8MQ_DRAM_PLL2_OUT 69 + +/* SYS PLL DIV */ +#define IMX8MQ_SYS1_PLL_40M 70 +#define IMX8MQ_SYS1_PLL_80M 71 +#define IMX8MQ_SYS1_PLL_100M 72 +#define IMX8MQ_SYS1_PLL_133M 73 +#define IMX8MQ_SYS1_PLL_160M 74 +#define IMX8MQ_SYS1_PLL_200M 75 +#define IMX8MQ_SYS1_PLL_266M 76 +#define IMX8MQ_SYS1_PLL_400M 77 +#define IMX8MQ_SYS1_PLL_800M 78 + +#define IMX8MQ_SYS2_PLL_50M 79 +#define IMX8MQ_SYS2_PLL_100M 80 +#define IMX8MQ_SYS2_PLL_125M 81 +#define IMX8MQ_SYS2_PLL_166M 82 +#define IMX8MQ_SYS2_PLL_200M 83 +#define IMX8MQ_SYS2_PLL_250M 84 +#define IMX8MQ_SYS2_PLL_333M 85 +#define IMX8MQ_SYS2_PLL_500M 86 +#define IMX8MQ_SYS2_PLL_1000M 87 + +/* CCM ROOT clocks */ +/* A53 */ +#define IMX8MQ_CLK_A53_SRC 88 +#define IMX8MQ_CLK_A53_CG 89 +#define IMX8MQ_CLK_A53_DIV 90 +/* M4 */ +#define IMX8MQ_CLK_M4_SRC 91 +#define IMX8MQ_CLK_M4_CG 92 +#define IMX8MQ_CLK_M4_DIV 93 +/* VPU */ +#define IMX8MQ_CLK_VPU_SRC 94 +#define IMX8MQ_CLK_VPU_CG 95 +#define IMX8MQ_CLK_VPU_DIV 96 +/* GPU CORE */ +#define IMX8MQ_CLK_GPU_CORE_SRC 97 +#define IMX8MQ_CLK_GPU_CORE_CG 98 +#define IMX8MQ_CLK_GPU_CORE_DIV 99 +/* GPU SHADER */ +#define IMX8MQ_CLK_GPU_SHADER_SRC 100 +#define IMX8MQ_CLK_GPU_SHADER_CG 101 +#define IMX8MQ_CLK_GPU_SHADER_DIV 102 + +/* BUS TYPE */ +/* MAIN AXI */ +#define IMX8MQ_CLK_MAIN_AXI 103 +/* ENET AXI */ +#define IMX8MQ_CLK_ENET_AXI 104 +/* NAND_USDHC_BUS */ +#define IMX8MQ_CLK_NAND_USDHC_BUS 105 +/* VPU BUS */ +#define IMX8MQ_CLK_VPU_BUS 106 +/* DISP_AXI */ +#define IMX8MQ_CLK_DISP_AXI 107 +/* DISP APB */ +#define IMX8MQ_CLK_DISP_APB 108 +/* DISP RTRM */ +#define IMX8MQ_CLK_DISP_RTRM 109 +/* USB_BUS */ +#define IMX8MQ_CLK_USB_BUS 110 +/* GPU_AXI */ +#define IMX8MQ_CLK_GPU_AXI 111 +/* GPU_AHB */ +#define IMX8MQ_CLK_GPU_AHB 112 +/* NOC */ +#define IMX8MQ_CLK_NOC 113 +/* NOC_APB */ +#define IMX8MQ_CLK_NOC_APB 115 + +/* AHB */ +#define IMX8MQ_CLK_AHB 116 +/* AUDIO AHB */ +#define IMX8MQ_CLK_AUDIO_AHB 117 + +/* DRAM_ALT */ +#define IMX8MQ_CLK_DRAM_ALT 118 +/* DRAM APB */ +#define IMX8MQ_CLK_DRAM_APB 119 +/* VPU_G1 */ +#define IMX8MQ_CLK_VPU_G1 120 +/* VPU_G2 */ +#define IMX8MQ_CLK_VPU_G2 121 +/* DISP_DTRC */ +#define IMX8MQ_CLK_DISP_DTRC 122 +/* DISP_DC8000 */ +#define IMX8MQ_CLK_DISP_DC8000 123 +/* PCIE_CTRL */ +#define IMX8MQ_CLK_PCIE1_CTRL 124 +/* PCIE_PHY */ +#define IMX8MQ_CLK_PCIE1_PHY 125 +/* PCIE_AUX */ +#define IMX8MQ_CLK_PCIE1_AUX 126 +/* DC_PIXEL */ +#define IMX8MQ_CLK_DC_PIXEL 127 +/* LCDIF_PIXEL */ +#define IMX8MQ_CLK_LCDIF_PIXEL 128 +/* SAI1~6 */ +#define IMX8MQ_CLK_SAI1 129 + +#define IMX8MQ_CLK_SAI2 130 + +#define IMX8MQ_CLK_SAI3 131 + +#define IMX8MQ_CLK_SAI4 132 + +#define IMX8MQ_CLK_SAI5 133 + +#define IMX8MQ_CLK_SAI6 134 +/* SPDIF1 */ +#define IMX8MQ_CLK_SPDIF1 135 +/* SPDIF2 */ +#define IMX8MQ_CLK_SPDIF2 136 +/* ENET_REF */ +#define IMX8MQ_CLK_ENET_REF 137 +/* ENET_TIMER */ +#define IMX8MQ_CLK_ENET_TIMER 138 +/* ENET_PHY */ +#define IMX8MQ_CLK_ENET_PHY_REF 139 +/* NAND */ +#define IMX8MQ_CLK_NAND 140 +/* QSPI */ +#define IMX8MQ_CLK_QSPI 141 +/* USDHC1 */ +#define IMX8MQ_CLK_USDHC1 142 +/* USDHC2 */ +#define IMX8MQ_CLK_USDHC2 143 +/* I2C1 */ +#define IMX8MQ_CLK_I2C1 144 +/* I2C2 */ +#define IMX8MQ_CLK_I2C2 145 +/* I2C3 */ +#define IMX8MQ_CLK_I2C3 146 +/* I2C4 */ +#define IMX8MQ_CLK_I2C4 147 +/* UART1 */ +#define IMX8MQ_CLK_UART1 148 +/* UART2 */ +#define IMX8MQ_CLK_UART2 149 +/* UART3 */ +#define IMX8MQ_CLK_UART3 150 +/* UART4 */ +#define IMX8MQ_CLK_UART4 151 +/* USB_CORE_REF */ +#define IMX8MQ_CLK_USB_CORE_REF 152 +/* USB_PHY_REF */ +#define IMX8MQ_CLK_USB_PHY_REF 163 +/* ECSPI1 */ +#define IMX8MQ_CLK_ECSPI1 164 +/* ECSPI2 */ +#define IMX8MQ_CLK_ECSPI2 165 +/* PWM1 */ +#define IMX8MQ_CLK_PWM1 166 +/* PWM2 */ +#define IMX8MQ_CLK_PWM2 167 +/* PWM3 */ +#define IMX8MQ_CLK_PWM3 168 +/* PWM4 */ +#define IMX8MQ_CLK_PWM4 169 +/* GPT1 */ +#define IMX8MQ_CLK_GPT1 170 +/* WDOG */ +#define IMX8MQ_CLK_WDOG 171 +/* WRCLK */ +#define IMX8MQ_CLK_WRCLK 172 +/* DSI_CORE */ +#define IMX8MQ_CLK_DSI_CORE 173 +/* DSI_PHY */ +#define IMX8MQ_CLK_DSI_PHY_REF 174 +/* DSI_DBI */ +#define IMX8MQ_CLK_DSI_DBI 175 +/*DSI_ESC */ +#define IMX8MQ_CLK_DSI_ESC 176 +/* CSI1_CORE */ +#define IMX8MQ_CLK_CSI1_CORE 177 +/* CSI1_PHY */ +#define IMX8MQ_CLK_CSI1_PHY_REF 178 +/* CSI_ESC */ +#define IMX8MQ_CLK_CSI1_ESC 179 +/* CSI2_CORE */ +#define IMX8MQ_CLK_CSI2_CORE 170 +/* CSI2_PHY */ +#define IMX8MQ_CLK_CSI2_PHY_REF 181 +/* CSI2_ESC */ +#define IMX8MQ_CLK_CSI2_ESC 182 +/* PCIE2_CTRL */ +#define IMX8MQ_CLK_PCIE2_CTRL 183 +/* PCIE2_PHY */ +#define IMX8MQ_CLK_PCIE2_PHY 184 +/* PCIE2_AUX */ +#define IMX8MQ_CLK_PCIE2_AUX 185 +/* ECSPI3 */ +#define IMX8MQ_CLK_ECSPI3 186 + +/* CCGR clocks */ +#define IMX8MQ_CLK_A53_ROOT 187 +#define IMX8MQ_CLK_DRAM_ROOT 188 +#define IMX8MQ_CLK_ECSPI1_ROOT 189 +#define IMX8MQ_CLK_ECSPI2_ROOT 180 +#define IMX8MQ_CLK_ECSPI3_ROOT 181 +#define IMX8MQ_CLK_ENET1_ROOT 182 +#define IMX8MQ_CLK_GPT1_ROOT 193 +#define IMX8MQ_CLK_I2C1_ROOT 194 +#define IMX8MQ_CLK_I2C2_ROOT 195 +#define IMX8MQ_CLK_I2C3_ROOT 196 +#define IMX8MQ_CLK_I2C4_ROOT 197 +#define IMX8MQ_CLK_M4_ROOT 198 +#define IMX8MQ_CLK_PCIE1_ROOT 199 +#define IMX8MQ_CLK_PCIE2_ROOT 200 +#define IMX8MQ_CLK_PWM1_ROOT 201 +#define IMX8MQ_CLK_PWM2_ROOT 202 +#define IMX8MQ_CLK_PWM3_ROOT 203 +#define IMX8MQ_CLK_PWM4_ROOT 204 +#define IMX8MQ_CLK_QSPI_ROOT 205 +#define IMX8MQ_CLK_SAI1_ROOT 206 +#define IMX8MQ_CLK_SAI2_ROOT 207 +#define IMX8MQ_CLK_SAI3_ROOT 208 +#define IMX8MQ_CLK_SAI4_ROOT 209 +#define IMX8MQ_CLK_SAI5_ROOT 210 +#define IMX8MQ_CLK_SAI6_ROOT 212 +#define IMX8MQ_CLK_UART1_ROOT 213 +#define IMX8MQ_CLK_UART2_ROOT 214 +#define IMX8MQ_CLK_UART3_ROOT 215 +#define IMX8MQ_CLK_UART4_ROOT 216 +#define IMX8MQ_CLK_USB1_CTRL_ROOT 217 +#define IMX8MQ_CLK_USB2_CTRL_ROOT 218 +#define IMX8MQ_CLK_USB1_PHY_ROOT 219 +#define IMX8MQ_CLK_USB2_PHY_ROOT 220 +#define IMX8MQ_CLK_USDHC1_ROOT 221 +#define IMX8MQ_CLK_USDHC2_ROOT 222 +#define IMX8MQ_CLK_WDOG1_ROOT 223 +#define IMX8MQ_CLK_WDOG2_ROOT 224 +#define IMX8MQ_CLK_WDOG3_ROOT 225 +#define IMX8MQ_CLK_GPU_ROOT 226 +#define IMX8MQ_CLK_HEVC_ROOT 227 +#define IMX8MQ_CLK_AVC_ROOT 228 +#define IMX8MQ_CLK_VP9_ROOT 229 +#define IMX8MQ_CLK_HEVC_INTER_ROOT 230 +#define IMX8MQ_CLK_DISP_ROOT 231 +#define IMX8MQ_CLK_HDMI_ROOT 232 +#define IMX8MQ_CLK_HDMI_PHY_ROOT 233 +#define IMX8MQ_CLK_VPU_DEC_ROOT 234 +#define IMX8MQ_CLK_CSI1_ROOT 235 +#define IMX8MQ_CLK_CSI2_ROOT 236 +#define IMX8MQ_CLK_RAWNAND_ROOT 237 +#define IMX8MQ_CLK_SDMA1_ROOT 238 +#define IMX8MQ_CLK_SDMA2_ROOT 239 +#define IMX8MQ_CLK_VPU_G1_ROOT 240 +#define IMX8MQ_CLK_VPU_G2_ROOT 241 + +/* SCCG PLL GATE */ +#define IMX8MQ_SYS1_PLL_OUT 232 +#define IMX8MQ_SYS2_PLL_OUT 243 +#define IMX8MQ_SYS3_PLL_OUT 244 +#define IMX8MQ_DRAM_PLL_OUT 245 + +#define IMX8MQ_GPT_3M_CLK 246 + +#define IMX8MQ_CLK_IPG_ROOT 247 +#define IMX8MQ_CLK_IPG_AUDIO_ROOT 248 +#define IMX8MQ_CLK_SAI1_IPG 249 +#define IMX8MQ_CLK_SAI2_IPG 250 +#define IMX8MQ_CLK_SAI3_IPG 251 +#define IMX8MQ_CLK_SAI4_IPG 252 +#define IMX8MQ_CLK_SAI5_IPG 253 +#define IMX8MQ_CLK_SAI6_IPG 254 + +/* DSI AHB/IPG clocks */ +/* rxesc clock */ +#define IMX8MQ_CLK_DSI_AHB 255 +/* txesc clock */ +#define IMX8MQ_CLK_DSI_IPG_DIV 256 + +#define IMX8MQ_CLK_TMU_ROOT 265 + +/* Display root clocks */ +#define IMX8MQ_CLK_DISP_AXI_ROOT 266 +#define IMX8MQ_CLK_DISP_APB_ROOT 267 +#define IMX8MQ_CLK_DISP_RTRM_ROOT 268 + +#define IMX8MQ_CLK_OCOTP_ROOT 269 + +#define IMX8MQ_CLK_DRAM_ALT_ROOT 270 +#define IMX8MQ_CLK_DRAM_CORE 271 + +#define IMX8MQ_CLK_MU_ROOT 272 +#define IMX8MQ_VIDEO2_PLL_OUT 273 + +#define IMX8MQ_CLK_CLKO2 274 + +#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 275 + +#define IMX8MQ_CLK_END 276 +#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */ diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h index 228a5e234af0..7b24fc791146 100644 --- a/include/dt-bindings/clock/marvell,mmp2.h +++ b/include/dt-bindings/clock/marvell,mmp2.h @@ -71,6 +71,7 @@ #define MMP2_CLK_CCIC1_MIX 117 #define MMP2_CLK_CCIC1_PHY 118 #define MMP2_CLK_CCIC1_SPHY 119 +#define MMP2_CLK_SP 120 #define MMP2_NR_CLKS 200 #endif diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h index a60f47b49231..5fe2923382d0 100644 --- a/include/dt-bindings/clock/meson8b-clkc.h +++ b/include/dt-bindings/clock/meson8b-clkc.h @@ -103,5 +103,9 @@ #define CLKID_MPLL1 94 #define CLKID_MPLL2 95 #define CLKID_NAND_CLK 112 +#define CLKID_ABP 124 +#define CLKID_PERIPH 126 +#define CLKID_AXI 128 +#define CLKID_L2_DRAM 130 #endif /* __MESON8B_CLKC_H */ diff --git a/include/dt-bindings/clock/mt7629-clk.h b/include/dt-bindings/clock/mt7629-clk.h new file mode 100644 index 000000000000..ad8e6d7f0154 --- /dev/null +++ b/include/dt-bindings/clock/mt7629-clk.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 MediaTek Inc. + */ + +#ifndef _DT_BINDINGS_CLK_MT7629_H +#define _DT_BINDINGS_CLK_MT7629_H + +/* TOPCKGEN */ +#define CLK_TOP_TO_U2_PHY 0 +#define CLK_TOP_TO_U2_PHY_1P 1 +#define CLK_TOP_PCIE0_PIPE_EN 2 +#define CLK_TOP_PCIE1_PIPE_EN 3 +#define CLK_TOP_SSUSB_TX250M 4 +#define CLK_TOP_SSUSB_EQ_RX250M 5 +#define CLK_TOP_SSUSB_CDR_REF 6 +#define CLK_TOP_SSUSB_CDR_FB 7 +#define CLK_TOP_SATA_ASIC 8 +#define CLK_TOP_SATA_RBC 9 +#define CLK_TOP_TO_USB3_SYS 10 +#define CLK_TOP_P1_1MHZ 11 +#define CLK_TOP_4MHZ 12 +#define CLK_TOP_P0_1MHZ 13 +#define CLK_TOP_ETH_500M 14 +#define CLK_TOP_TXCLK_SRC_PRE 15 +#define CLK_TOP_RTC 16 +#define CLK_TOP_PWM_QTR_26M 17 +#define CLK_TOP_CPUM_TCK_IN 18 +#define CLK_TOP_TO_USB3_DA_TOP 19 +#define CLK_TOP_MEMPLL 20 +#define CLK_TOP_DMPLL 21 +#define CLK_TOP_DMPLL_D4 22 +#define CLK_TOP_DMPLL_D8 23 +#define CLK_TOP_SYSPLL_D2 24 +#define CLK_TOP_SYSPLL1_D2 25 +#define CLK_TOP_SYSPLL1_D4 26 +#define CLK_TOP_SYSPLL1_D8 27 +#define CLK_TOP_SYSPLL1_D16 28 +#define CLK_TOP_SYSPLL2_D2 29 +#define CLK_TOP_SYSPLL2_D4 30 +#define CLK_TOP_SYSPLL2_D8 31 +#define CLK_TOP_SYSPLL_D5 32 +#define CLK_TOP_SYSPLL3_D2 33 +#define CLK_TOP_SYSPLL3_D4 34 +#define CLK_TOP_SYSPLL_D7 35 +#define CLK_TOP_SYSPLL4_D2 36 +#define CLK_TOP_SYSPLL4_D4 37 +#define CLK_TOP_SYSPLL4_D16 38 +#define CLK_TOP_UNIVPLL 39 +#define CLK_TOP_UNIVPLL1_D2 40 +#define CLK_TOP_UNIVPLL1_D4 41 +#define CLK_TOP_UNIVPLL1_D8 42 +#define CLK_TOP_UNIVPLL_D3 43 +#define CLK_TOP_UNIVPLL2_D2 44 +#define CLK_TOP_UNIVPLL2_D4 45 +#define CLK_TOP_UNIVPLL2_D8 46 +#define CLK_TOP_UNIVPLL2_D16 47 +#define CLK_TOP_UNIVPLL_D5 48 +#define CLK_TOP_UNIVPLL3_D2 49 +#define CLK_TOP_UNIVPLL3_D4 50 +#define CLK_TOP_UNIVPLL3_D16 51 +#define CLK_TOP_UNIVPLL_D7 52 +#define CLK_TOP_UNIVPLL_D80_D4 53 +#define CLK_TOP_UNIV48M 54 +#define CLK_TOP_SGMIIPLL_D2 55 +#define CLK_TOP_CLKXTAL_D4 56 +#define CLK_TOP_HD_FAXI 57 +#define CLK_TOP_FAXI 58 +#define CLK_TOP_F_FAUD_INTBUS 59 +#define CLK_TOP_AP2WBHIF_HCLK 60 +#define CLK_TOP_10M_INFRAO 61 +#define CLK_TOP_MSDC30_1 62 +#define CLK_TOP_SPI 63 +#define CLK_TOP_SF 64 +#define CLK_TOP_FLASH 65 +#define CLK_TOP_TO_USB3_REF 66 +#define CLK_TOP_TO_USB3_MCU 67 +#define CLK_TOP_TO_USB3_DMA 68 +#define CLK_TOP_FROM_TOP_AHB 69 +#define CLK_TOP_FROM_TOP_AXI 70 +#define CLK_TOP_PCIE1_MAC_EN 71 +#define CLK_TOP_PCIE0_MAC_EN 72 +#define CLK_TOP_AXI_SEL 73 +#define CLK_TOP_MEM_SEL 74 +#define CLK_TOP_DDRPHYCFG_SEL 75 +#define CLK_TOP_ETH_SEL 76 +#define CLK_TOP_PWM_SEL 77 +#define CLK_TOP_F10M_REF_SEL 78 +#define CLK_TOP_NFI_INFRA_SEL 79 +#define CLK_TOP_FLASH_SEL 80 +#define CLK_TOP_UART_SEL 81 +#define CLK_TOP_SPI0_SEL 82 +#define CLK_TOP_SPI1_SEL 83 +#define CLK_TOP_MSDC50_0_SEL 84 +#define CLK_TOP_MSDC30_0_SEL 85 +#define CLK_TOP_MSDC30_1_SEL 86 +#define CLK_TOP_AP2WBMCU_SEL 87 +#define CLK_TOP_AP2WBHIF_SEL 88 +#define CLK_TOP_AUDIO_SEL 89 +#define CLK_TOP_AUD_INTBUS_SEL 90 +#define CLK_TOP_PMICSPI_SEL 91 +#define CLK_TOP_SCP_SEL 92 +#define CLK_TOP_ATB_SEL 93 +#define CLK_TOP_HIF_SEL 94 +#define CLK_TOP_SATA_SEL 95 +#define CLK_TOP_U2_SEL 96 +#define CLK_TOP_AUD1_SEL 97 +#define CLK_TOP_AUD2_SEL 98 +#define CLK_TOP_IRRX_SEL 99 +#define CLK_TOP_IRTX_SEL 100 +#define CLK_TOP_SATA_MCU_SEL 101 +#define CLK_TOP_PCIE0_MCU_SEL 102 +#define CLK_TOP_PCIE1_MCU_SEL 103 +#define CLK_TOP_SSUSB_MCU_SEL 104 +#define CLK_TOP_CRYPTO_SEL 105 +#define CLK_TOP_SGMII_REF_1_SEL 106 +#define CLK_TOP_10M_SEL 107 +#define CLK_TOP_NR_CLK 108 + +/* INFRACFG */ +#define CLK_INFRA_MUX1_SEL 0 +#define CLK_INFRA_DBGCLK_PD 1 +#define CLK_INFRA_TRNG_PD 2 +#define CLK_INFRA_DEVAPC_PD 3 +#define CLK_INFRA_APXGPT_PD 4 +#define CLK_INFRA_SEJ_PD 5 +#define CLK_INFRA_NR_CLK 6 + +/* PERICFG */ +#define CLK_PERIBUS_SEL 0 +#define CLK_PERI_PWM1_PD 1 +#define CLK_PERI_PWM2_PD 2 +#define CLK_PERI_PWM3_PD 3 +#define CLK_PERI_PWM4_PD 4 +#define CLK_PERI_PWM5_PD 5 +#define CLK_PERI_PWM6_PD 6 +#define CLK_PERI_PWM7_PD 7 +#define CLK_PERI_PWM_PD 8 +#define CLK_PERI_AP_DMA_PD 9 +#define CLK_PERI_MSDC30_1_PD 10 +#define CLK_PERI_UART0_PD 11 +#define CLK_PERI_UART1_PD 12 +#define CLK_PERI_UART2_PD 13 +#define CLK_PERI_UART3_PD 14 +#define CLK_PERI_BTIF_PD 15 +#define CLK_PERI_I2C0_PD 16 +#define CLK_PERI_SPI0_PD 17 +#define CLK_PERI_SNFI_PD 18 +#define CLK_PERI_NFI_PD 19 +#define CLK_PERI_NFIECC_PD 20 +#define CLK_PERI_FLASH_PD 21 +#define CLK_PERI_NR_CLK 22 + +/* APMIXEDSYS */ +#define CLK_APMIXED_ARMPLL 0 +#define CLK_APMIXED_MAINPLL 1 +#define CLK_APMIXED_UNIV2PLL 2 +#define CLK_APMIXED_ETH1PLL 3 +#define CLK_APMIXED_ETH2PLL 4 +#define CLK_APMIXED_SGMIPLL 5 +#define CLK_APMIXED_MAIN_CORE_EN 6 +#define CLK_APMIXED_NR_CLK 7 + +/* SSUSBSYS */ +#define CLK_SSUSB_U2_PHY_1P_EN 0 +#define CLK_SSUSB_U2_PHY_EN 1 +#define CLK_SSUSB_REF_EN 2 +#define CLK_SSUSB_SYS_EN 3 +#define CLK_SSUSB_MCU_EN 4 +#define CLK_SSUSB_DMA_EN 5 +#define CLK_SSUSB_NR_CLK 6 + +/* PCIESYS */ +#define CLK_PCIE_P1_AUX_EN 0 +#define CLK_PCIE_P1_OBFF_EN 1 +#define CLK_PCIE_P1_AHB_EN 2 +#define CLK_PCIE_P1_AXI_EN 3 +#define CLK_PCIE_P1_MAC_EN 4 +#define CLK_PCIE_P1_PIPE_EN 5 +#define CLK_PCIE_P0_AUX_EN 6 +#define CLK_PCIE_P0_OBFF_EN 7 +#define CLK_PCIE_P0_AHB_EN 8 +#define CLK_PCIE_P0_AXI_EN 9 +#define CLK_PCIE_P0_MAC_EN 10 +#define CLK_PCIE_P0_PIPE_EN 11 +#define CLK_PCIE_NR_CLK 12 + +/* ETHSYS */ +#define CLK_ETH_FE_EN 0 +#define CLK_ETH_GP2_EN 1 +#define CLK_ETH_GP1_EN 2 +#define CLK_ETH_GP0_EN 3 +#define CLK_ETH_ESW_EN 4 +#define CLK_ETH_NR_CLK 5 + +/* SGMIISYS */ +#define CLK_SGMII_TX_EN 0 +#define CLK_SGMII_RX_EN 1 +#define CLK_SGMII_CDR_REF 2 +#define CLK_SGMII_CDR_FB 3 +#define CLK_SGMII_NR_CLK 4 + +#endif /* _DT_BINDINGS_CLK_MT7629_H */ diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h index 58a242e656b1..ba84bbab5c83 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8998.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h @@ -180,6 +180,11 @@ #define USB30_MASTER_CLK_SRC 163 #define USB30_MOCK_UTMI_CLK_SRC 164 #define USB3_PHY_AUX_CLK_SRC 165 +#define GCC_USB3_CLKREF_CLK 166 +#define GCC_HDMI_CLKREF_CLK 167 +#define GCC_UFS_CLKREF_CLK 168 +#define GCC_PCIE_CLKREF_CLK 169 +#define GCC_RX1_USB2_CLKREF_CLK 170 #define PCIE_0_GDSC 0 #define UFS_GDSC 1 @@ -204,5 +209,94 @@ #define GCC_TSIF_BCR 16 #define GCC_UFS_BCR 17 #define GCC_USB_30_BCR 18 +#define GCC_SYSTEM_NOC_BCR 19 +#define GCC_CONFIG_NOC_BCR 20 +#define GCC_AHB2PHY_EAST_BCR 21 +#define GCC_IMEM_BCR 22 +#define GCC_PIMEM_BCR 23 +#define GCC_MMSS_BCR 24 +#define GCC_QDSS_BCR 25 +#define GCC_WCSS_BCR 26 +#define GCC_BLSP1_BCR 27 +#define GCC_BLSP1_UART1_BCR 28 +#define GCC_BLSP1_UART2_BCR 29 +#define GCC_BLSP1_UART3_BCR 30 +#define GCC_CM_PHY_REFGEN1_BCR 31 +#define GCC_CM_PHY_REFGEN2_BCR 32 +#define GCC_BLSP2_BCR 33 +#define GCC_BLSP2_UART1_BCR 34 +#define GCC_BLSP2_UART2_BCR 35 +#define GCC_BLSP2_UART3_BCR 36 +#define GCC_SRAM_SENSOR_BCR 37 +#define GCC_PRNG_BCR 38 +#define GCC_TSIF_0_RESET 39 +#define GCC_TSIF_1_RESET 40 +#define GCC_TCSR_BCR 41 +#define GCC_BOOT_ROM_BCR 42 +#define GCC_MSG_RAM_BCR 43 +#define GCC_TLMM_BCR 44 +#define GCC_MPM_BCR 45 +#define GCC_SEC_CTRL_BCR 46 +#define GCC_SPMI_BCR 47 +#define GCC_SPDM_BCR 48 +#define GCC_CE1_BCR 49 +#define GCC_BIMC_BCR 50 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 51 +#define GCC_SNOC_BUS_TIMEOUT1_BCR 52 +#define GCC_SNOC_BUS_TIMEOUT3_BCR 53 +#define GCC_SNOC_BUS_TIMEOUT_EXTREF_BCR 54 +#define GCC_PNOC_BUS_TIMEOUT0_BCR 55 +#define GCC_CNOC_PERIPH_BUS_TIMEOUT1_BCR 56 +#define GCC_CNOC_PERIPH_BUS_TIMEOUT2_BCR 57 +#define GCC_CNOC_BUS_TIMEOUT0_BCR 58 +#define GCC_CNOC_BUS_TIMEOUT1_BCR 59 +#define GCC_CNOC_BUS_TIMEOUT2_BCR 60 +#define GCC_CNOC_BUS_TIMEOUT3_BCR 61 +#define GCC_CNOC_BUS_TIMEOUT4_BCR 62 +#define GCC_CNOC_BUS_TIMEOUT5_BCR 63 +#define GCC_CNOC_BUS_TIMEOUT6_BCR 64 +#define GCC_CNOC_BUS_TIMEOUT7_BCR 65 +#define GCC_APB2JTAG_BCR 66 +#define GCC_RBCPR_CX_BCR 67 +#define GCC_RBCPR_MX_BCR 68 +#define GCC_USB3_PHY_BCR 69 +#define GCC_USB3PHY_PHY_BCR 70 +#define GCC_USB3_DP_PHY_BCR 71 +#define GCC_SSC_BCR 72 +#define GCC_SSC_RESET 73 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 74 +#define GCC_PCIE_0_LINK_DOWN_BCR 75 +#define GCC_PCIE_0_PHY_BCR 76 +#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 77 +#define GCC_PCIE_PHY_BCR 78 +#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 79 +#define GCC_PCIE_PHY_CFG_AHB_BCR 80 +#define GCC_PCIE_PHY_COM_BCR 81 +#define GCC_GPU_BCR 82 +#define GCC_SPSS_BCR 83 +#define GCC_OBT_ODT_BCR 84 +#define GCC_VS_BCR 85 +#define GCC_MSS_VS_RESET 86 +#define GCC_GPU_VS_RESET 87 +#define GCC_APC0_VS_RESET 88 +#define GCC_APC1_VS_RESET 89 +#define GCC_CNOC_BUS_TIMEOUT8_BCR 90 +#define GCC_CNOC_BUS_TIMEOUT9_BCR 91 +#define GCC_CNOC_BUS_TIMEOUT10_BCR 92 +#define GCC_CNOC_BUS_TIMEOUT11_BCR 93 +#define GCC_CNOC_BUS_TIMEOUT12_BCR 94 +#define GCC_CNOC_BUS_TIMEOUT13_BCR 95 +#define GCC_CNOC_BUS_TIMEOUT14_BCR 96 +#define GCC_CNOC_BUS_TIMEOUT_EXTREF_BCR 97 +#define GCC_AGGRE1_NOC_BCR 98 +#define GCC_AGGRE2_NOC_BCR 99 +#define GCC_DCC_BCR 100 +#define GCC_QREFS_VBG_CAL_BCR 101 +#define GCC_IPA_BCR 102 +#define GCC_GLM_BCR 103 +#define GCC_SKL_BCR 104 +#define GCC_MSMPU_BCR 105 +#define GCC_QUSB2PHY_PRIM_BCR 106 +#define GCC_QUSB2PHY_SEC_BCR 107 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h index b8eae5a76503..968fa65b9c42 100644 --- a/include/dt-bindings/clock/qcom,gcc-sdm845.h +++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h @@ -197,6 +197,8 @@ #define GCC_QSPI_CORE_CLK_SRC 187 #define GCC_QSPI_CORE_CLK 188 #define GCC_QSPI_CNOC_PERIPH_AHB_CLK 189 +#define GCC_LPASS_Q6_AXI_CLK 190 +#define GCC_LPASS_SWAY_CLK 191 /* GCC Resets */ #define GCC_MMSS_BCR 0 diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm845.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h new file mode 100644 index 000000000000..9690d901b50a --- /dev/null +++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_SDM_GPU_CC_SDM845_H +#define _DT_BINDINGS_CLK_SDM_GPU_CC_SDM845_H + +/* GPU_CC clock registers */ +#define GPU_CC_CX_GMU_CLK 0 +#define GPU_CC_CXO_CLK 1 +#define GPU_CC_GMU_CLK_SRC 2 +#define GPU_CC_PLL1 3 + +/* GPU_CC Resets */ +#define GPUCC_GPU_CC_CX_BCR 0 +#define GPUCC_GPU_CC_GMU_BCR 1 +#define GPUCC_GPU_CC_XO_BCR 2 + +/* GPU_CC GDSCRs */ +#define GPU_CX_GDSC 0 +#define GPU_GX_GDSC 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,lpass-sdm845.h b/include/dt-bindings/clock/qcom,lpass-sdm845.h new file mode 100644 index 000000000000..659050846f61 --- /dev/null +++ b/include/dt-bindings/clock/qcom,lpass-sdm845.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_SDM_LPASS_SDM845_H +#define _DT_BINDINGS_CLK_SDM_LPASS_SDM845_H + +#define LPASS_Q6SS_AHBM_AON_CLK 0 +#define LPASS_Q6SS_AHBS_AON_CLK 1 +#define LPASS_QDSP6SS_XO_CLK 2 +#define LPASS_QDSP6SS_SLEEP_CLK 3 +#define LPASS_QDSP6SS_CORE_CLK 4 + +#endif diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index c585b82b9c05..3658b0c14966 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -123,5 +123,9 @@ #define RPM_SMD_DIV_A_CLK3 73 #define RPM_SMD_LN_BB_CLK 74 #define RPM_SMD_LN_BB_A_CLK 75 +#define RPM_SMD_BIMC_GPU_CLK 76 +#define RPM_SMD_BIMC_GPU_A_CLK 77 +#define RPM_SMD_QPIC_CLK 78 +#define RPM_SMD_QPIC_CLK_A 79 #endif diff --git a/include/dt-bindings/clock/r8a7795-cpg-mssr.h b/include/dt-bindings/clock/r8a7795-cpg-mssr.h index 948389641565..92b3e2a95179 100644 --- a/include/dt-bindings/clock/r8a7795-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a7795-cpg-mssr.h @@ -50,7 +50,7 @@ #define R8A7795_CLK_CANFD 39 #define R8A7795_CLK_HDMI 40 #define R8A7795_CLK_CSI0 41 -#define R8A7795_CLK_CSIREF 42 +/* CLK_CSIREF was removed */ #define R8A7795_CLK_CP 43 #define R8A7795_CLK_CPEX 44 #define R8A7795_CLK_R 45 diff --git a/include/dt-bindings/clock/r8a7796-cpg-mssr.h b/include/dt-bindings/clock/r8a7796-cpg-mssr.h index e6087f2f7e3a..c0957cf45840 100644 --- a/include/dt-bindings/clock/r8a7796-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a7796-cpg-mssr.h @@ -56,7 +56,7 @@ #define R8A7796_CLK_CANFD 45 #define R8A7796_CLK_HDMI 46 #define R8A7796_CLK_CSI0 47 -#define R8A7796_CLK_CSIREF 48 +/* CLK_CSIREF was removed */ #define R8A7796_CLK_CP 49 #define R8A7796_CLK_CPEX 50 #define R8A7796_CLK_R 51 diff --git a/include/dt-bindings/clock/r8a77995-cpg-mssr.h b/include/dt-bindings/clock/r8a77995-cpg-mssr.h index 1eb11acfa563..fd701c4e87cf 100644 --- a/include/dt-bindings/clock/r8a77995-cpg-mssr.h +++ b/include/dt-bindings/clock/r8a77995-cpg-mssr.h @@ -35,8 +35,8 @@ #define R8A77995_CLK_CRD2 24 #define R8A77995_CLK_SD0H 25 #define R8A77995_CLK_SD0 26 -#define R8A77995_CLK_SSP2 27 -#define R8A77995_CLK_SSP1 28 +/* CLK_SSP2 was removed */ +/* CLK_SSP1 was removed */ #define R8A77995_CLK_RPC 29 #define R8A77995_CLK_RPCD2 30 #define R8A77995_CLK_ZA2 31 @@ -49,5 +49,6 @@ #define R8A77995_CLK_LV0 38 #define R8A77995_CLK_LV1 39 #define R8A77995_CLK_CP 40 +#define R8A77995_CLK_CPEX 41 #endif /* __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h index a82a0109faff..bcaa4559ab1b 100644 --- a/include/dt-bindings/clock/rk3328-cru.h +++ b/include/dt-bindings/clock/rk3328-cru.h @@ -172,13 +172,14 @@ #define PCLK_HDCP 232 #define PCLK_DCF 233 #define PCLK_SARADC 234 +#define PCLK_ACODECPHY 235 /* hclk gates */ #define HCLK_PERI 308 #define HCLK_TSP 309 #define HCLK_GMAC 310 #define HCLK_I2S0_8CH 311 -#define HCLK_I2S1_8CH 313 +#define HCLK_I2S1_8CH 312 #define HCLK_I2S2_2CH 313 #define HCLK_SPDIF_8CH 314 #define HCLK_VOP 315 diff --git a/include/dt-bindings/clock/sun8i-de2.h b/include/dt-bindings/clock/sun8i-de2.h index 3bed63b524aa..7768f73b051e 100644 --- a/include/dt-bindings/clock/sun8i-de2.h +++ b/include/dt-bindings/clock/sun8i-de2.h @@ -15,4 +15,7 @@ #define CLK_MIXER1 7 #define CLK_WB 8 +#define CLK_BUS_ROT 9 +#define CLK_ROT 10 + #endif /* _DT_BINDINGS_CLOCK_SUN8I_DE2_H_ */ diff --git a/include/dt-bindings/clock/suniv-ccu-f1c100s.h b/include/dt-bindings/clock/suniv-ccu-f1c100s.h new file mode 100644 index 000000000000..f5ac155c9c70 --- /dev/null +++ b/include/dt-bindings/clock/suniv-ccu-f1c100s.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) + * + * Copyright (c) 2018 Icenowy Zheng <icenowy@aosc.xyz> + * + */ + +#ifndef _DT_BINDINGS_CLK_SUNIV_F1C100S_H_ +#define _DT_BINDINGS_CLK_SUNIV_F1C100S_H_ + +#define CLK_CPU 11 + +#define CLK_BUS_DMA 14 +#define CLK_BUS_MMC0 15 +#define CLK_BUS_MMC1 16 +#define CLK_BUS_DRAM 17 +#define CLK_BUS_SPI0 18 +#define CLK_BUS_SPI1 19 +#define CLK_BUS_OTG 20 +#define CLK_BUS_VE 21 +#define CLK_BUS_LCD 22 +#define CLK_BUS_DEINTERLACE 23 +#define CLK_BUS_CSI 24 +#define CLK_BUS_TVD 25 +#define CLK_BUS_TVE 26 +#define CLK_BUS_DE_BE 27 +#define CLK_BUS_DE_FE 28 +#define CLK_BUS_CODEC 29 +#define CLK_BUS_SPDIF 30 +#define CLK_BUS_IR 31 +#define CLK_BUS_RSB 32 +#define CLK_BUS_I2S0 33 +#define CLK_BUS_I2C0 34 +#define CLK_BUS_I2C1 35 +#define CLK_BUS_I2C2 36 +#define CLK_BUS_PIO 37 +#define CLK_BUS_UART0 38 +#define CLK_BUS_UART1 39 +#define CLK_BUS_UART2 40 + +#define CLK_MMC0 41 +#define CLK_MMC0_SAMPLE 42 +#define CLK_MMC0_OUTPUT 43 +#define CLK_MMC1 44 +#define CLK_MMC1_SAMPLE 45 +#define CLK_MMC1_OUTPUT 46 +#define CLK_I2S 47 +#define CLK_SPDIF 48 + +#define CLK_USB_PHY0 49 + +#define CLK_DRAM_VE 50 +#define CLK_DRAM_CSI 51 +#define CLK_DRAM_DEINTERLACE 52 +#define CLK_DRAM_TVD 53 +#define CLK_DRAM_DE_FE 54 +#define CLK_DRAM_DE_BE 55 + +#define CLK_DE_BE 56 +#define CLK_DE_FE 57 +#define CLK_TCON 58 +#define CLK_DEINTERLACE 59 +#define CLK_TVE2_CLK 60 +#define CLK_TVE1_CLK 61 +#define CLK_TVD 62 +#define CLK_CSI 63 +#define CLK_VE 64 +#define CLK_CODEC 65 +#define CLK_AVS 66 + +#endif diff --git a/include/dt-bindings/dma/dw-dmac.h b/include/dt-bindings/dma/dw-dmac.h new file mode 100644 index 000000000000..d1ca705c95b3 --- /dev/null +++ b/include/dt-bindings/dma/dw-dmac.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ + +#ifndef __DT_BINDINGS_DMA_DW_DMAC_H__ +#define __DT_BINDINGS_DMA_DW_DMAC_H__ + +/* + * Protection Control bits provide protection against illegal transactions. + * The protection bits[0:2] are one-to-one mapped to AHB HPROT[3:1] signals. + */ +#define DW_DMAC_HPROT1_PRIVILEGED_MODE (1 << 0) /* Privileged Mode */ +#define DW_DMAC_HPROT2_BUFFERABLE (1 << 1) /* DMA is bufferable */ +#define DW_DMAC_HPROT3_CACHEABLE (1 << 2) /* DMA is cacheable */ + +#endif /* __DT_BINDINGS_DMA_DW_DMAC_H__ */ diff --git a/include/dt-bindings/firmware/imx/rsrc.h b/include/dt-bindings/firmware/imx/rsrc.h new file mode 100644 index 000000000000..4481f2d60d65 --- /dev/null +++ b/include/dt-bindings/firmware/imx/rsrc.h @@ -0,0 +1,559 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2018 NXP + */ + +#ifndef __DT_BINDINGS_RSCRC_IMX_H +#define __DT_BINDINGS_RSCRC_IMX_H + +/* + * These defines are used to indicate a resource. Resources include peripherals + * and bus masters (but not memory regions). Note items from list should + * never be changed or removed (only added to at the end of the list). + */ + +#define IMX_SC_R_A53 0 +#define IMX_SC_R_A53_0 1 +#define IMX_SC_R_A53_1 2 +#define IMX_SC_R_A53_2 3 +#define IMX_SC_R_A53_3 4 +#define IMX_SC_R_A72 5 +#define IMX_SC_R_A72_0 6 +#define IMX_SC_R_A72_1 7 +#define IMX_SC_R_A72_2 8 +#define IMX_SC_R_A72_3 9 +#define IMX_SC_R_CCI 10 +#define IMX_SC_R_DB 11 +#define IMX_SC_R_DRC_0 12 +#define IMX_SC_R_DRC_1 13 +#define IMX_SC_R_GIC_SMMU 14 +#define IMX_SC_R_IRQSTR_M4_0 15 +#define IMX_SC_R_IRQSTR_M4_1 16 +#define IMX_SC_R_SMMU 17 +#define IMX_SC_R_GIC 18 +#define IMX_SC_R_DC_0_BLIT0 19 +#define IMX_SC_R_DC_0_BLIT1 20 +#define IMX_SC_R_DC_0_BLIT2 21 +#define IMX_SC_R_DC_0_BLIT_OUT 22 +#define IMX_SC_R_DC_0_CAPTURE0 23 +#define IMX_SC_R_DC_0_CAPTURE1 24 +#define IMX_SC_R_DC_0_WARP 25 +#define IMX_SC_R_DC_0_INTEGRAL0 26 +#define IMX_SC_R_DC_0_INTEGRAL1 27 +#define IMX_SC_R_DC_0_VIDEO0 28 +#define IMX_SC_R_DC_0_VIDEO1 29 +#define IMX_SC_R_DC_0_FRAC0 30 +#define IMX_SC_R_DC_0_FRAC1 31 +#define IMX_SC_R_DC_0 32 +#define IMX_SC_R_GPU_2_PID0 33 +#define IMX_SC_R_DC_0_PLL_0 34 +#define IMX_SC_R_DC_0_PLL_1 35 +#define IMX_SC_R_DC_1_BLIT0 36 +#define IMX_SC_R_DC_1_BLIT1 37 +#define IMX_SC_R_DC_1_BLIT2 38 +#define IMX_SC_R_DC_1_BLIT_OUT 39 +#define IMX_SC_R_DC_1_CAPTURE0 40 +#define IMX_SC_R_DC_1_CAPTURE1 41 +#define IMX_SC_R_DC_1_WARP 42 +#define IMX_SC_R_DC_1_INTEGRAL0 43 +#define IMX_SC_R_DC_1_INTEGRAL1 44 +#define IMX_SC_R_DC_1_VIDEO0 45 +#define IMX_SC_R_DC_1_VIDEO1 46 +#define IMX_SC_R_DC_1_FRAC0 47 +#define IMX_SC_R_DC_1_FRAC1 48 +#define IMX_SC_R_DC_1 49 +#define IMX_SC_R_GPU_3_PID0 50 +#define IMX_SC_R_DC_1_PLL_0 51 +#define IMX_SC_R_DC_1_PLL_1 52 +#define IMX_SC_R_SPI_0 53 +#define IMX_SC_R_SPI_1 54 +#define IMX_SC_R_SPI_2 55 +#define IMX_SC_R_SPI_3 56 +#define IMX_SC_R_UART_0 57 +#define IMX_SC_R_UART_1 58 +#define IMX_SC_R_UART_2 59 +#define IMX_SC_R_UART_3 60 +#define IMX_SC_R_UART_4 61 +#define IMX_SC_R_EMVSIM_0 62 +#define IMX_SC_R_EMVSIM_1 63 +#define IMX_SC_R_DMA_0_CH0 64 +#define IMX_SC_R_DMA_0_CH1 65 +#define IMX_SC_R_DMA_0_CH2 66 +#define IMX_SC_R_DMA_0_CH3 67 +#define IMX_SC_R_DMA_0_CH4 68 +#define IMX_SC_R_DMA_0_CH5 69 +#define IMX_SC_R_DMA_0_CH6 70 +#define IMX_SC_R_DMA_0_CH7 71 +#define IMX_SC_R_DMA_0_CH8 72 +#define IMX_SC_R_DMA_0_CH9 73 +#define IMX_SC_R_DMA_0_CH10 74 +#define IMX_SC_R_DMA_0_CH11 75 +#define IMX_SC_R_DMA_0_CH12 76 +#define IMX_SC_R_DMA_0_CH13 77 +#define IMX_SC_R_DMA_0_CH14 78 +#define IMX_SC_R_DMA_0_CH15 79 +#define IMX_SC_R_DMA_0_CH16 80 +#define IMX_SC_R_DMA_0_CH17 81 +#define IMX_SC_R_DMA_0_CH18 82 +#define IMX_SC_R_DMA_0_CH19 83 +#define IMX_SC_R_DMA_0_CH20 84 +#define IMX_SC_R_DMA_0_CH21 85 +#define IMX_SC_R_DMA_0_CH22 86 +#define IMX_SC_R_DMA_0_CH23 87 +#define IMX_SC_R_DMA_0_CH24 88 +#define IMX_SC_R_DMA_0_CH25 89 +#define IMX_SC_R_DMA_0_CH26 90 +#define IMX_SC_R_DMA_0_CH27 91 +#define IMX_SC_R_DMA_0_CH28 92 +#define IMX_SC_R_DMA_0_CH29 93 +#define IMX_SC_R_DMA_0_CH30 94 +#define IMX_SC_R_DMA_0_CH31 95 +#define IMX_SC_R_I2C_0 96 +#define IMX_SC_R_I2C_1 97 +#define IMX_SC_R_I2C_2 98 +#define IMX_SC_R_I2C_3 99 +#define IMX_SC_R_I2C_4 100 +#define IMX_SC_R_ADC_0 101 +#define IMX_SC_R_ADC_1 102 +#define IMX_SC_R_FTM_0 103 +#define IMX_SC_R_FTM_1 104 +#define IMX_SC_R_CAN_0 105 +#define IMX_SC_R_CAN_1 106 +#define IMX_SC_R_CAN_2 107 +#define IMX_SC_R_DMA_1_CH0 108 +#define IMX_SC_R_DMA_1_CH1 109 +#define IMX_SC_R_DMA_1_CH2 110 +#define IMX_SC_R_DMA_1_CH3 111 +#define IMX_SC_R_DMA_1_CH4 112 +#define IMX_SC_R_DMA_1_CH5 113 +#define IMX_SC_R_DMA_1_CH6 114 +#define IMX_SC_R_DMA_1_CH7 115 +#define IMX_SC_R_DMA_1_CH8 116 +#define IMX_SC_R_DMA_1_CH9 117 +#define IMX_SC_R_DMA_1_CH10 118 +#define IMX_SC_R_DMA_1_CH11 119 +#define IMX_SC_R_DMA_1_CH12 120 +#define IMX_SC_R_DMA_1_CH13 121 +#define IMX_SC_R_DMA_1_CH14 122 +#define IMX_SC_R_DMA_1_CH15 123 +#define IMX_SC_R_DMA_1_CH16 124 +#define IMX_SC_R_DMA_1_CH17 125 +#define IMX_SC_R_DMA_1_CH18 126 +#define IMX_SC_R_DMA_1_CH19 127 +#define IMX_SC_R_DMA_1_CH20 128 +#define IMX_SC_R_DMA_1_CH21 129 +#define IMX_SC_R_DMA_1_CH22 130 +#define IMX_SC_R_DMA_1_CH23 131 +#define IMX_SC_R_DMA_1_CH24 132 +#define IMX_SC_R_DMA_1_CH25 133 +#define IMX_SC_R_DMA_1_CH26 134 +#define IMX_SC_R_DMA_1_CH27 135 +#define IMX_SC_R_DMA_1_CH28 136 +#define IMX_SC_R_DMA_1_CH29 137 +#define IMX_SC_R_DMA_1_CH30 138 +#define IMX_SC_R_DMA_1_CH31 139 +#define IMX_SC_R_UNUSED1 140 +#define IMX_SC_R_UNUSED2 141 +#define IMX_SC_R_UNUSED3 142 +#define IMX_SC_R_UNUSED4 143 +#define IMX_SC_R_GPU_0_PID0 144 +#define IMX_SC_R_GPU_0_PID1 145 +#define IMX_SC_R_GPU_0_PID2 146 +#define IMX_SC_R_GPU_0_PID3 147 +#define IMX_SC_R_GPU_1_PID0 148 +#define IMX_SC_R_GPU_1_PID1 149 +#define IMX_SC_R_GPU_1_PID2 150 +#define IMX_SC_R_GPU_1_PID3 151 +#define IMX_SC_R_PCIE_A 152 +#define IMX_SC_R_SERDES_0 153 +#define IMX_SC_R_MATCH_0 154 +#define IMX_SC_R_MATCH_1 155 +#define IMX_SC_R_MATCH_2 156 +#define IMX_SC_R_MATCH_3 157 +#define IMX_SC_R_MATCH_4 158 +#define IMX_SC_R_MATCH_5 159 +#define IMX_SC_R_MATCH_6 160 +#define IMX_SC_R_MATCH_7 161 +#define IMX_SC_R_MATCH_8 162 +#define IMX_SC_R_MATCH_9 163 +#define IMX_SC_R_MATCH_10 164 +#define IMX_SC_R_MATCH_11 165 +#define IMX_SC_R_MATCH_12 166 +#define IMX_SC_R_MATCH_13 167 +#define IMX_SC_R_MATCH_14 168 +#define IMX_SC_R_PCIE_B 169 +#define IMX_SC_R_SATA_0 170 +#define IMX_SC_R_SERDES_1 171 +#define IMX_SC_R_HSIO_GPIO 172 +#define IMX_SC_R_MATCH_15 173 +#define IMX_SC_R_MATCH_16 174 +#define IMX_SC_R_MATCH_17 175 +#define IMX_SC_R_MATCH_18 176 +#define IMX_SC_R_MATCH_19 177 +#define IMX_SC_R_MATCH_20 178 +#define IMX_SC_R_MATCH_21 179 +#define IMX_SC_R_MATCH_22 180 +#define IMX_SC_R_MATCH_23 181 +#define IMX_SC_R_MATCH_24 182 +#define IMX_SC_R_MATCH_25 183 +#define IMX_SC_R_MATCH_26 184 +#define IMX_SC_R_MATCH_27 185 +#define IMX_SC_R_MATCH_28 186 +#define IMX_SC_R_LCD_0 187 +#define IMX_SC_R_LCD_0_PWM_0 188 +#define IMX_SC_R_LCD_0_I2C_0 189 +#define IMX_SC_R_LCD_0_I2C_1 190 +#define IMX_SC_R_PWM_0 191 +#define IMX_SC_R_PWM_1 192 +#define IMX_SC_R_PWM_2 193 +#define IMX_SC_R_PWM_3 194 +#define IMX_SC_R_PWM_4 195 +#define IMX_SC_R_PWM_5 196 +#define IMX_SC_R_PWM_6 197 +#define IMX_SC_R_PWM_7 198 +#define IMX_SC_R_GPIO_0 199 +#define IMX_SC_R_GPIO_1 200 +#define IMX_SC_R_GPIO_2 201 +#define IMX_SC_R_GPIO_3 202 +#define IMX_SC_R_GPIO_4 203 +#define IMX_SC_R_GPIO_5 204 +#define IMX_SC_R_GPIO_6 205 +#define IMX_SC_R_GPIO_7 206 +#define IMX_SC_R_GPT_0 207 +#define IMX_SC_R_GPT_1 208 +#define IMX_SC_R_GPT_2 209 +#define IMX_SC_R_GPT_3 210 +#define IMX_SC_R_GPT_4 211 +#define IMX_SC_R_KPP 212 +#define IMX_SC_R_MU_0A 213 +#define IMX_SC_R_MU_1A 214 +#define IMX_SC_R_MU_2A 215 +#define IMX_SC_R_MU_3A 216 +#define IMX_SC_R_MU_4A 217 +#define IMX_SC_R_MU_5A 218 +#define IMX_SC_R_MU_6A 219 +#define IMX_SC_R_MU_7A 220 +#define IMX_SC_R_MU_8A 221 +#define IMX_SC_R_MU_9A 222 +#define IMX_SC_R_MU_10A 223 +#define IMX_SC_R_MU_11A 224 +#define IMX_SC_R_MU_12A 225 +#define IMX_SC_R_MU_13A 226 +#define IMX_SC_R_MU_5B 227 +#define IMX_SC_R_MU_6B 228 +#define IMX_SC_R_MU_7B 229 +#define IMX_SC_R_MU_8B 230 +#define IMX_SC_R_MU_9B 231 +#define IMX_SC_R_MU_10B 232 +#define IMX_SC_R_MU_11B 233 +#define IMX_SC_R_MU_12B 234 +#define IMX_SC_R_MU_13B 235 +#define IMX_SC_R_ROM_0 236 +#define IMX_SC_R_FSPI_0 237 +#define IMX_SC_R_FSPI_1 238 +#define IMX_SC_R_IEE 239 +#define IMX_SC_R_IEE_R0 240 +#define IMX_SC_R_IEE_R1 241 +#define IMX_SC_R_IEE_R2 242 +#define IMX_SC_R_IEE_R3 243 +#define IMX_SC_R_IEE_R4 244 +#define IMX_SC_R_IEE_R5 245 +#define IMX_SC_R_IEE_R6 246 +#define IMX_SC_R_IEE_R7 247 +#define IMX_SC_R_SDHC_0 248 +#define IMX_SC_R_SDHC_1 249 +#define IMX_SC_R_SDHC_2 250 +#define IMX_SC_R_ENET_0 251 +#define IMX_SC_R_ENET_1 252 +#define IMX_SC_R_MLB_0 253 +#define IMX_SC_R_DMA_2_CH0 254 +#define IMX_SC_R_DMA_2_CH1 255 +#define IMX_SC_R_DMA_2_CH2 256 +#define IMX_SC_R_DMA_2_CH3 257 +#define IMX_SC_R_DMA_2_CH4 258 +#define IMX_SC_R_USB_0 259 +#define IMX_SC_R_USB_1 260 +#define IMX_SC_R_USB_0_PHY 261 +#define IMX_SC_R_USB_2 262 +#define IMX_SC_R_USB_2_PHY 263 +#define IMX_SC_R_DTCP 264 +#define IMX_SC_R_NAND 265 +#define IMX_SC_R_LVDS_0 266 +#define IMX_SC_R_LVDS_0_PWM_0 267 +#define IMX_SC_R_LVDS_0_I2C_0 268 +#define IMX_SC_R_LVDS_0_I2C_1 269 +#define IMX_SC_R_LVDS_1 270 +#define IMX_SC_R_LVDS_1_PWM_0 271 +#define IMX_SC_R_LVDS_1_I2C_0 272 +#define IMX_SC_R_LVDS_1_I2C_1 273 +#define IMX_SC_R_LVDS_2 274 +#define IMX_SC_R_LVDS_2_PWM_0 275 +#define IMX_SC_R_LVDS_2_I2C_0 276 +#define IMX_SC_R_LVDS_2_I2C_1 277 +#define IMX_SC_R_M4_0_PID0 278 +#define IMX_SC_R_M4_0_PID1 279 +#define IMX_SC_R_M4_0_PID2 280 +#define IMX_SC_R_M4_0_PID3 281 +#define IMX_SC_R_M4_0_PID4 282 +#define IMX_SC_R_M4_0_RGPIO 283 +#define IMX_SC_R_M4_0_SEMA42 284 +#define IMX_SC_R_M4_0_TPM 285 +#define IMX_SC_R_M4_0_PIT 286 +#define IMX_SC_R_M4_0_UART 287 +#define IMX_SC_R_M4_0_I2C 288 +#define IMX_SC_R_M4_0_INTMUX 289 +#define IMX_SC_R_M4_0_SIM 290 +#define IMX_SC_R_M4_0_WDOG 291 +#define IMX_SC_R_M4_0_MU_0B 292 +#define IMX_SC_R_M4_0_MU_0A0 293 +#define IMX_SC_R_M4_0_MU_0A1 294 +#define IMX_SC_R_M4_0_MU_0A2 295 +#define IMX_SC_R_M4_0_MU_0A3 296 +#define IMX_SC_R_M4_0_MU_1A 297 +#define IMX_SC_R_M4_1_PID0 298 +#define IMX_SC_R_M4_1_PID1 299 +#define IMX_SC_R_M4_1_PID2 300 +#define IMX_SC_R_M4_1_PID3 301 +#define IMX_SC_R_M4_1_PID4 302 +#define IMX_SC_R_M4_1_RGPIO 303 +#define IMX_SC_R_M4_1_SEMA42 304 +#define IMX_SC_R_M4_1_TPM 305 +#define IMX_SC_R_M4_1_PIT 306 +#define IMX_SC_R_M4_1_UART 307 +#define IMX_SC_R_M4_1_I2C 308 +#define IMX_SC_R_M4_1_INTMUX 309 +#define IMX_SC_R_M4_1_SIM 310 +#define IMX_SC_R_M4_1_WDOG 311 +#define IMX_SC_R_M4_1_MU_0B 312 +#define IMX_SC_R_M4_1_MU_0A0 313 +#define IMX_SC_R_M4_1_MU_0A1 314 +#define IMX_SC_R_M4_1_MU_0A2 315 +#define IMX_SC_R_M4_1_MU_0A3 316 +#define IMX_SC_R_M4_1_MU_1A 317 +#define IMX_SC_R_SAI_0 318 +#define IMX_SC_R_SAI_1 319 +#define IMX_SC_R_SAI_2 320 +#define IMX_SC_R_IRQSTR_SCU2 321 +#define IMX_SC_R_IRQSTR_DSP 322 +#define IMX_SC_R_ELCDIF_PLL 323 +#define IMX_SC_R_UNUSED6 324 +#define IMX_SC_R_AUDIO_PLL_0 325 +#define IMX_SC_R_PI_0 326 +#define IMX_SC_R_PI_0_PWM_0 327 +#define IMX_SC_R_PI_0_PWM_1 328 +#define IMX_SC_R_PI_0_I2C_0 329 +#define IMX_SC_R_PI_0_PLL 330 +#define IMX_SC_R_PI_1 331 +#define IMX_SC_R_PI_1_PWM_0 332 +#define IMX_SC_R_PI_1_PWM_1 333 +#define IMX_SC_R_PI_1_I2C_0 334 +#define IMX_SC_R_PI_1_PLL 335 +#define IMX_SC_R_SC_PID0 336 +#define IMX_SC_R_SC_PID1 337 +#define IMX_SC_R_SC_PID2 338 +#define IMX_SC_R_SC_PID3 339 +#define IMX_SC_R_SC_PID4 340 +#define IMX_SC_R_SC_SEMA42 341 +#define IMX_SC_R_SC_TPM 342 +#define IMX_SC_R_SC_PIT 343 +#define IMX_SC_R_SC_UART 344 +#define IMX_SC_R_SC_I2C 345 +#define IMX_SC_R_SC_MU_0B 346 +#define IMX_SC_R_SC_MU_0A0 347 +#define IMX_SC_R_SC_MU_0A1 348 +#define IMX_SC_R_SC_MU_0A2 349 +#define IMX_SC_R_SC_MU_0A3 350 +#define IMX_SC_R_SC_MU_1A 351 +#define IMX_SC_R_SYSCNT_RD 352 +#define IMX_SC_R_SYSCNT_CMP 353 +#define IMX_SC_R_DEBUG 354 +#define IMX_SC_R_SYSTEM 355 +#define IMX_SC_R_SNVS 356 +#define IMX_SC_R_OTP 357 +#define IMX_SC_R_VPU_PID0 358 +#define IMX_SC_R_VPU_PID1 359 +#define IMX_SC_R_VPU_PID2 360 +#define IMX_SC_R_VPU_PID3 361 +#define IMX_SC_R_VPU_PID4 362 +#define IMX_SC_R_VPU_PID5 363 +#define IMX_SC_R_VPU_PID6 364 +#define IMX_SC_R_VPU_PID7 365 +#define IMX_SC_R_VPU_UART 366 +#define IMX_SC_R_VPUCORE 367 +#define IMX_SC_R_VPUCORE_0 368 +#define IMX_SC_R_VPUCORE_1 369 +#define IMX_SC_R_VPUCORE_2 370 +#define IMX_SC_R_VPUCORE_3 371 +#define IMX_SC_R_DMA_4_CH0 372 +#define IMX_SC_R_DMA_4_CH1 373 +#define IMX_SC_R_DMA_4_CH2 374 +#define IMX_SC_R_DMA_4_CH3 375 +#define IMX_SC_R_DMA_4_CH4 376 +#define IMX_SC_R_ISI_CH0 377 +#define IMX_SC_R_ISI_CH1 378 +#define IMX_SC_R_ISI_CH2 379 +#define IMX_SC_R_ISI_CH3 380 +#define IMX_SC_R_ISI_CH4 381 +#define IMX_SC_R_ISI_CH5 382 +#define IMX_SC_R_ISI_CH6 383 +#define IMX_SC_R_ISI_CH7 384 +#define IMX_SC_R_MJPEG_DEC_S0 385 +#define IMX_SC_R_MJPEG_DEC_S1 386 +#define IMX_SC_R_MJPEG_DEC_S2 387 +#define IMX_SC_R_MJPEG_DEC_S3 388 +#define IMX_SC_R_MJPEG_ENC_S0 389 +#define IMX_SC_R_MJPEG_ENC_S1 390 +#define IMX_SC_R_MJPEG_ENC_S2 391 +#define IMX_SC_R_MJPEG_ENC_S3 392 +#define IMX_SC_R_MIPI_0 393 +#define IMX_SC_R_MIPI_0_PWM_0 394 +#define IMX_SC_R_MIPI_0_I2C_0 395 +#define IMX_SC_R_MIPI_0_I2C_1 396 +#define IMX_SC_R_MIPI_1 397 +#define IMX_SC_R_MIPI_1_PWM_0 398 +#define IMX_SC_R_MIPI_1_I2C_0 399 +#define IMX_SC_R_MIPI_1_I2C_1 400 +#define IMX_SC_R_CSI_0 401 +#define IMX_SC_R_CSI_0_PWM_0 402 +#define IMX_SC_R_CSI_0_I2C_0 403 +#define IMX_SC_R_CSI_1 404 +#define IMX_SC_R_CSI_1_PWM_0 405 +#define IMX_SC_R_CSI_1_I2C_0 406 +#define IMX_SC_R_HDMI 407 +#define IMX_SC_R_HDMI_I2S 408 +#define IMX_SC_R_HDMI_I2C_0 409 +#define IMX_SC_R_HDMI_PLL_0 410 +#define IMX_SC_R_HDMI_RX 411 +#define IMX_SC_R_HDMI_RX_BYPASS 412 +#define IMX_SC_R_HDMI_RX_I2C_0 413 +#define IMX_SC_R_ASRC_0 414 +#define IMX_SC_R_ESAI_0 415 +#define IMX_SC_R_SPDIF_0 416 +#define IMX_SC_R_SPDIF_1 417 +#define IMX_SC_R_SAI_3 418 +#define IMX_SC_R_SAI_4 419 +#define IMX_SC_R_SAI_5 420 +#define IMX_SC_R_GPT_5 421 +#define IMX_SC_R_GPT_6 422 +#define IMX_SC_R_GPT_7 423 +#define IMX_SC_R_GPT_8 424 +#define IMX_SC_R_GPT_9 425 +#define IMX_SC_R_GPT_10 426 +#define IMX_SC_R_DMA_2_CH5 427 +#define IMX_SC_R_DMA_2_CH6 428 +#define IMX_SC_R_DMA_2_CH7 429 +#define IMX_SC_R_DMA_2_CH8 430 +#define IMX_SC_R_DMA_2_CH9 431 +#define IMX_SC_R_DMA_2_CH10 432 +#define IMX_SC_R_DMA_2_CH11 433 +#define IMX_SC_R_DMA_2_CH12 434 +#define IMX_SC_R_DMA_2_CH13 435 +#define IMX_SC_R_DMA_2_CH14 436 +#define IMX_SC_R_DMA_2_CH15 437 +#define IMX_SC_R_DMA_2_CH16 438 +#define IMX_SC_R_DMA_2_CH17 439 +#define IMX_SC_R_DMA_2_CH18 440 +#define IMX_SC_R_DMA_2_CH19 441 +#define IMX_SC_R_DMA_2_CH20 442 +#define IMX_SC_R_DMA_2_CH21 443 +#define IMX_SC_R_DMA_2_CH22 444 +#define IMX_SC_R_DMA_2_CH23 445 +#define IMX_SC_R_DMA_2_CH24 446 +#define IMX_SC_R_DMA_2_CH25 447 +#define IMX_SC_R_DMA_2_CH26 448 +#define IMX_SC_R_DMA_2_CH27 449 +#define IMX_SC_R_DMA_2_CH28 450 +#define IMX_SC_R_DMA_2_CH29 451 +#define IMX_SC_R_DMA_2_CH30 452 +#define IMX_SC_R_DMA_2_CH31 453 +#define IMX_SC_R_ASRC_1 454 +#define IMX_SC_R_ESAI_1 455 +#define IMX_SC_R_SAI_6 456 +#define IMX_SC_R_SAI_7 457 +#define IMX_SC_R_AMIX 458 +#define IMX_SC_R_MQS_0 459 +#define IMX_SC_R_DMA_3_CH0 460 +#define IMX_SC_R_DMA_3_CH1 461 +#define IMX_SC_R_DMA_3_CH2 462 +#define IMX_SC_R_DMA_3_CH3 463 +#define IMX_SC_R_DMA_3_CH4 464 +#define IMX_SC_R_DMA_3_CH5 465 +#define IMX_SC_R_DMA_3_CH6 466 +#define IMX_SC_R_DMA_3_CH7 467 +#define IMX_SC_R_DMA_3_CH8 468 +#define IMX_SC_R_DMA_3_CH9 469 +#define IMX_SC_R_DMA_3_CH10 470 +#define IMX_SC_R_DMA_3_CH11 471 +#define IMX_SC_R_DMA_3_CH12 472 +#define IMX_SC_R_DMA_3_CH13 473 +#define IMX_SC_R_DMA_3_CH14 474 +#define IMX_SC_R_DMA_3_CH15 475 +#define IMX_SC_R_DMA_3_CH16 476 +#define IMX_SC_R_DMA_3_CH17 477 +#define IMX_SC_R_DMA_3_CH18 478 +#define IMX_SC_R_DMA_3_CH19 479 +#define IMX_SC_R_DMA_3_CH20 480 +#define IMX_SC_R_DMA_3_CH21 481 +#define IMX_SC_R_DMA_3_CH22 482 +#define IMX_SC_R_DMA_3_CH23 483 +#define IMX_SC_R_DMA_3_CH24 484 +#define IMX_SC_R_DMA_3_CH25 485 +#define IMX_SC_R_DMA_3_CH26 486 +#define IMX_SC_R_DMA_3_CH27 487 +#define IMX_SC_R_DMA_3_CH28 488 +#define IMX_SC_R_DMA_3_CH29 489 +#define IMX_SC_R_DMA_3_CH30 490 +#define IMX_SC_R_DMA_3_CH31 491 +#define IMX_SC_R_AUDIO_PLL_1 492 +#define IMX_SC_R_AUDIO_CLK_0 493 +#define IMX_SC_R_AUDIO_CLK_1 494 +#define IMX_SC_R_MCLK_OUT_0 495 +#define IMX_SC_R_MCLK_OUT_1 496 +#define IMX_SC_R_PMIC_0 497 +#define IMX_SC_R_PMIC_1 498 +#define IMX_SC_R_SECO 499 +#define IMX_SC_R_CAAM_JR1 500 +#define IMX_SC_R_CAAM_JR2 501 +#define IMX_SC_R_CAAM_JR3 502 +#define IMX_SC_R_SECO_MU_2 503 +#define IMX_SC_R_SECO_MU_3 504 +#define IMX_SC_R_SECO_MU_4 505 +#define IMX_SC_R_HDMI_RX_PWM_0 506 +#define IMX_SC_R_A35 507 +#define IMX_SC_R_A35_0 508 +#define IMX_SC_R_A35_1 509 +#define IMX_SC_R_A35_2 510 +#define IMX_SC_R_A35_3 511 +#define IMX_SC_R_DSP 512 +#define IMX_SC_R_DSP_RAM 513 +#define IMX_SC_R_CAAM_JR1_OUT 514 +#define IMX_SC_R_CAAM_JR2_OUT 515 +#define IMX_SC_R_CAAM_JR3_OUT 516 +#define IMX_SC_R_VPU_DEC_0 517 +#define IMX_SC_R_VPU_ENC_0 518 +#define IMX_SC_R_CAAM_JR0 519 +#define IMX_SC_R_CAAM_JR0_OUT 520 +#define IMX_SC_R_PMIC_2 521 +#define IMX_SC_R_DBLOGIC 522 +#define IMX_SC_R_HDMI_PLL_1 523 +#define IMX_SC_R_BOARD_R0 524 +#define IMX_SC_R_BOARD_R1 525 +#define IMX_SC_R_BOARD_R2 526 +#define IMX_SC_R_BOARD_R3 527 +#define IMX_SC_R_BOARD_R4 528 +#define IMX_SC_R_BOARD_R5 529 +#define IMX_SC_R_BOARD_R6 530 +#define IMX_SC_R_BOARD_R7 531 +#define IMX_SC_R_MJPEG_DEC_MP 532 +#define IMX_SC_R_MJPEG_ENC_MP 533 +#define IMX_SC_R_VPU_TS_0 534 +#define IMX_SC_R_VPU_MU_0 535 +#define IMX_SC_R_VPU_MU_1 536 +#define IMX_SC_R_VPU_MU_2 537 +#define IMX_SC_R_VPU_MU_3 538 +#define IMX_SC_R_VPU_ENC_1 539 +#define IMX_SC_R_VPU 540 +#define IMX_SC_R_LAST 541 + +#endif /* __DT_BINDINGS_RSCRC_IMX_H */ diff --git a/include/dt-bindings/gpio/tegra186-gpio.h b/include/dt-bindings/gpio/tegra186-gpio.h index 463ad398fe3e..cabc5712e745 100644 --- a/include/dt-bindings/gpio/tegra186-gpio.h +++ b/include/dt-bindings/gpio/tegra186-gpio.h @@ -14,6 +14,34 @@ #include <dt-bindings/gpio/gpio.h> /* GPIOs implemented by main GPIO controller */ +#define TEGRA186_MAIN_GPIO_PORT_A 0 +#define TEGRA186_MAIN_GPIO_PORT_B 1 +#define TEGRA186_MAIN_GPIO_PORT_C 2 +#define TEGRA186_MAIN_GPIO_PORT_D 3 +#define TEGRA186_MAIN_GPIO_PORT_E 4 +#define TEGRA186_MAIN_GPIO_PORT_F 5 +#define TEGRA186_MAIN_GPIO_PORT_G 6 +#define TEGRA186_MAIN_GPIO_PORT_H 7 +#define TEGRA186_MAIN_GPIO_PORT_I 8 +#define TEGRA186_MAIN_GPIO_PORT_J 9 +#define TEGRA186_MAIN_GPIO_PORT_K 10 +#define TEGRA186_MAIN_GPIO_PORT_L 11 +#define TEGRA186_MAIN_GPIO_PORT_M 12 +#define TEGRA186_MAIN_GPIO_PORT_N 13 +#define TEGRA186_MAIN_GPIO_PORT_O 14 +#define TEGRA186_MAIN_GPIO_PORT_P 15 +#define TEGRA186_MAIN_GPIO_PORT_Q 16 +#define TEGRA186_MAIN_GPIO_PORT_R 17 +#define TEGRA186_MAIN_GPIO_PORT_T 18 +#define TEGRA186_MAIN_GPIO_PORT_X 19 +#define TEGRA186_MAIN_GPIO_PORT_Y 20 +#define TEGRA186_MAIN_GPIO_PORT_BB 21 +#define TEGRA186_MAIN_GPIO_PORT_CC 22 + +#define TEGRA186_MAIN_GPIO(port, offset) \ + ((TEGRA186_MAIN_GPIO_PORT_##port * 8) + offset) + +/* need to keep these for backwards-compatibility */ #define TEGRA_MAIN_GPIO_PORT_A 0 #define TEGRA_MAIN_GPIO_PORT_B 1 #define TEGRA_MAIN_GPIO_PORT_C 2 @@ -42,6 +70,19 @@ ((TEGRA_MAIN_GPIO_PORT_##port * 8) + offset) /* GPIOs implemented by AON GPIO controller */ +#define TEGRA186_AON_GPIO_PORT_S 0 +#define TEGRA186_AON_GPIO_PORT_U 1 +#define TEGRA186_AON_GPIO_PORT_V 2 +#define TEGRA186_AON_GPIO_PORT_W 3 +#define TEGRA186_AON_GPIO_PORT_Z 4 +#define TEGRA186_AON_GPIO_PORT_AA 5 +#define TEGRA186_AON_GPIO_PORT_EE 6 +#define TEGRA186_AON_GPIO_PORT_FF 7 + +#define TEGRA186_AON_GPIO(port, offset) \ + ((TEGRA186_AON_GPIO_PORT_##port * 8) + offset) + +/* need to keep these for backwards-compatibility */ #define TEGRA_AON_GPIO_PORT_S 0 #define TEGRA_AON_GPIO_PORT_U 1 #define TEGRA_AON_GPIO_PORT_V 2 diff --git a/include/dt-bindings/mailbox/tegra186-hsp.h b/include/dt-bindings/mailbox/tegra186-hsp.h index bcab5b7ca785..3bdec7a84d35 100644 --- a/include/dt-bindings/mailbox/tegra186-hsp.h +++ b/include/dt-bindings/mailbox/tegra186-hsp.h @@ -22,4 +22,15 @@ #define TEGRA_HSP_DB_MASTER_CCPLEX 17 #define TEGRA_HSP_DB_MASTER_BPMP 19 +/* + * Shared mailboxes are unidirectional, so the direction needs to be specified + * in the device tree. + */ +#define TEGRA_HSP_SM_MASK 0x00ffffff +#define TEGRA_HSP_SM_FLAG_RX (0 << 31) +#define TEGRA_HSP_SM_FLAG_TX (1 << 31) + +#define TEGRA_HSP_SM_RX(x) (TEGRA_HSP_SM_FLAG_RX | ((x) & TEGRA_HSP_SM_MASK)) +#define TEGRA_HSP_SM_TX(x) (TEGRA_HSP_SM_FLAG_TX | ((x) & TEGRA_HSP_SM_MASK)) + #endif diff --git a/include/dt-bindings/media/xilinx-vip.h b/include/dt-bindings/media/xilinx-vip.h index 6298fec00685..94ed3edfcc70 100644 --- a/include/dt-bindings/media/xilinx-vip.h +++ b/include/dt-bindings/media/xilinx-vip.h @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Xilinx Video IP Core * @@ -6,10 +7,6 @@ * * Contacts: Hyun Kwon <hyun.kwon@xilinx.com> * Laurent Pinchart <laurent.pinchart@ideasonboard.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __DT_BINDINGS_MEDIA_XILINX_VIP_H__ diff --git a/include/dt-bindings/pinctrl/bcm2835.h b/include/dt-bindings/pinctrl/bcm2835.h index e4e4fdf5d38f..b5b2654a0e4d 100644 --- a/include/dt-bindings/pinctrl/bcm2835.h +++ b/include/dt-bindings/pinctrl/bcm2835.h @@ -1,14 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Header providing constants for bcm2835 pinctrl bindings. * * Copyright (C) 2015 Stefan Wahren <stefan.wahren@i2se.com> - * - * The code contained herein is licensed under the GNU General Public - * License. You may obtain a copy of the GNU General Public License - * Version 2 at the following locations: - * - * http://www.opensource.org/licenses/gpl-license.html - * http://www.gnu.org/copyleft/gpl.html */ #ifndef __DT_BINDINGS_PINCTRL_BCM2835_H__ diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h new file mode 100644 index 000000000000..45e11b6170ca --- /dev/null +++ b/include/dt-bindings/pinctrl/k3.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for pinctrl bindings for TI's K3 SoC + * family. + * + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + */ +#ifndef _DT_BINDINGS_PINCTRL_TI_K3_H +#define _DT_BINDINGS_PINCTRL_TI_K3_H + +#define PULLUDEN_SHIFT (16) +#define PULLTYPESEL_SHIFT (17) +#define RXACTIVE_SHIFT (18) + +#define PULL_DISABLE (1 << PULLUDEN_SHIFT) +#define PULL_ENABLE (0 << PULLUDEN_SHIFT) + +#define PULL_UP (1 << PULLTYPESEL_SHIFT | PULL_ENABLE) +#define PULL_DOWN (0 << PULLTYPESEL_SHIFT | PULL_ENABLE) + +#define INPUT_EN (1 << RXACTIVE_SHIFT) +#define INPUT_DISABLE (0 << RXACTIVE_SHIFT) + +/* Only these macros are expected be used directly in device tree files */ +#define PIN_OUTPUT (INPUT_DISABLE | PULL_DISABLE) +#define PIN_OUTPUT_PULLUP (INPUT_DISABLE | PULL_UP) +#define PIN_OUTPUT_PULLDOWN (INPUT_DISABLE | PULL_DOWN) +#define PIN_INPUT (INPUT_EN | PULL_DISABLE) +#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP) +#define PIN_INPUT_PULLDOWN (INPUT_EN | PULL_DOWN) + +#define AM65X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) +#define AM65X_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) + +#endif diff --git a/include/dt-bindings/pinctrl/mt6797-pinfunc.h b/include/dt-bindings/pinctrl/mt6797-pinfunc.h new file mode 100644 index 000000000000..e9813361b27c --- /dev/null +++ b/include/dt-bindings/pinctrl/mt6797-pinfunc.h @@ -0,0 +1,1368 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MT6797_PINFUNC_H +#define __DTS_MT6797_PINFUNC_H + +#include <dt-bindings/pinctrl/mt65xx.h> + +#define MT6797_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0) +#define MT6797_GPIO0__FUNC_CSI0A_L0P_T0A (MTK_PIN_NO(0) | 1) + +#define MT6797_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0) +#define MT6797_GPIO1__FUNC_CSI0A_L0N_T0B (MTK_PIN_NO(1) | 1) + +#define MT6797_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0) +#define MT6797_GPIO2__FUNC_CSI0A_L1P_T0C (MTK_PIN_NO(2) | 1) + +#define MT6797_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0) +#define MT6797_GPIO3__FUNC_CSI0A_L1N_T1A (MTK_PIN_NO(3) | 1) + +#define MT6797_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0) +#define MT6797_GPIO4__FUNC_CSI0A_L2P_T1B (MTK_PIN_NO(4) | 1) + +#define MT6797_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0) +#define MT6797_GPIO5__FUNC_CSI0A_L2N_T1C (MTK_PIN_NO(5) | 1) + +#define MT6797_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0) +#define MT6797_GPIO6__FUNC_CSI0B_L0P_T0A (MTK_PIN_NO(6) | 1) + +#define MT6797_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0) +#define MT6797_GPIO7__FUNC_CSI0B_L0N_T0B (MTK_PIN_NO(7) | 1) + +#define MT6797_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0) +#define MT6797_GPIO8__FUNC_CSI0B_L1P_T0C (MTK_PIN_NO(8) | 1) + +#define MT6797_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0) +#define MT6797_GPIO9__FUNC_CSI0B_L1N_T1A (MTK_PIN_NO(9) | 1) + +#define MT6797_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0) +#define MT6797_GPIO10__FUNC_CSI1A_L0P_T0A (MTK_PIN_NO(10) | 1) + +#define MT6797_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0) +#define MT6797_GPIO11__FUNC_CSI1A_L0N_T0B (MTK_PIN_NO(11) | 1) + +#define MT6797_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0) +#define MT6797_GPIO12__FUNC_CSI1A_L1P_T0C (MTK_PIN_NO(12) | 1) + +#define MT6797_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0) +#define MT6797_GPIO13__FUNC_CSI1A_L1N_T1A (MTK_PIN_NO(13) | 1) + +#define MT6797_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0) +#define MT6797_GPIO14__FUNC_CSI1A_L2P_T1B (MTK_PIN_NO(14) | 1) + +#define MT6797_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0) +#define MT6797_GPIO15__FUNC_CSI1A_L2N_T1C (MTK_PIN_NO(15) | 1) + +#define MT6797_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0) +#define MT6797_GPIO16__FUNC_CSI1B_L0P_T0A (MTK_PIN_NO(16) | 1) + +#define MT6797_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0) +#define MT6797_GPIO17__FUNC_CSI1B_L0N_T0B (MTK_PIN_NO(17) | 1) + +#define MT6797_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0) +#define MT6797_GPIO18__FUNC_CSI1B_L1P_T0C (MTK_PIN_NO(18) | 1) + +#define MT6797_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0) +#define MT6797_GPIO19__FUNC_CSI1B_L1N_T1A (MTK_PIN_NO(19) | 1) + +#define MT6797_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0) +#define MT6797_GPIO20__FUNC_CSI1B_L2P_T1B (MTK_PIN_NO(20) | 1) + +#define MT6797_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0) +#define MT6797_GPIO21__FUNC_CSI1B_L2N_T1C (MTK_PIN_NO(21) | 1) + +#define MT6797_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0) +#define MT6797_GPIO22__FUNC_CSI2_L0P_T0A (MTK_PIN_NO(22) | 1) + +#define MT6797_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0) +#define MT6797_GPIO23__FUNC_CSI2_L0N_T0B (MTK_PIN_NO(23) | 1) + +#define MT6797_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0) +#define MT6797_GPIO24__FUNC_CSI2_L1P_T0C (MTK_PIN_NO(24) | 1) + +#define MT6797_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0) +#define MT6797_GPIO25__FUNC_CSI2_L1N_T1A (MTK_PIN_NO(25) | 1) + +#define MT6797_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0) +#define MT6797_GPIO26__FUNC_CSI2_L2P_T1B (MTK_PIN_NO(26) | 1) + +#define MT6797_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0) +#define MT6797_GPIO27__FUNC_CSI2_L2N_T1C (MTK_PIN_NO(27) | 1) + +#define MT6797_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0) +#define MT6797_GPIO28__FUNC_SPI5_CLK_A (MTK_PIN_NO(28) | 1) +#define MT6797_GPIO28__FUNC_IRTX_OUT (MTK_PIN_NO(28) | 2) +#define MT6797_GPIO28__FUNC_UDI_TDO (MTK_PIN_NO(28) | 3) +#define MT6797_GPIO28__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(28) | 4) +#define MT6797_GPIO28__FUNC_CONN_MCU_TDO (MTK_PIN_NO(28) | 5) +#define MT6797_GPIO28__FUNC_PWM_A (MTK_PIN_NO(28) | 6) +#define MT6797_GPIO28__FUNC_C2K_DM_OTDO (MTK_PIN_NO(28) | 7) + +#define MT6797_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0) +#define MT6797_GPIO29__FUNC_SPI5_MI_A (MTK_PIN_NO(29) | 1) +#define MT6797_GPIO29__FUNC_DAP_SIB1_SWD (MTK_PIN_NO(29) | 2) +#define MT6797_GPIO29__FUNC_UDI_TMS (MTK_PIN_NO(29) | 3) +#define MT6797_GPIO29__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(29) | 4) +#define MT6797_GPIO29__FUNC_CONN_MCU_TMS (MTK_PIN_NO(29) | 5) +#define MT6797_GPIO29__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(29) | 6) +#define MT6797_GPIO29__FUNC_C2K_DM_OTMS (MTK_PIN_NO(29) | 7) + +#define MT6797_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0) +#define MT6797_GPIO30__FUNC_CMMCLK0 (MTK_PIN_NO(30) | 1) +#define MT6797_GPIO30__FUNC_MD_CLKM0 (MTK_PIN_NO(30) | 7) + +#define MT6797_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0) +#define MT6797_GPIO31__FUNC_CMMCLK1 (MTK_PIN_NO(31) | 1) +#define MT6797_GPIO31__FUNC_MD_CLKM1 (MTK_PIN_NO(31) | 7) + +#define MT6797_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0) +#define MT6797_GPIO32__FUNC_SPI5_CS_A (MTK_PIN_NO(32) | 1) +#define MT6797_GPIO32__FUNC_DAP_SIB1_SWCK (MTK_PIN_NO(32) | 2) +#define MT6797_GPIO32__FUNC_UDI_TCK_XI (MTK_PIN_NO(32) | 3) +#define MT6797_GPIO32__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(32) | 4) +#define MT6797_GPIO32__FUNC_CONN_MCU_TCK (MTK_PIN_NO(32) | 5) +#define MT6797_GPIO32__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(32) | 6) +#define MT6797_GPIO32__FUNC_C2K_DM_OTCK (MTK_PIN_NO(32) | 7) + +#define MT6797_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0) +#define MT6797_GPIO33__FUNC_SPI5_MO_A (MTK_PIN_NO(33) | 1) +#define MT6797_GPIO33__FUNC_CMFLASH (MTK_PIN_NO(33) | 2) +#define MT6797_GPIO33__FUNC_UDI_TDI (MTK_PIN_NO(33) | 3) +#define MT6797_GPIO33__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(33) | 4) +#define MT6797_GPIO33__FUNC_CONN_MCU_TDI (MTK_PIN_NO(33) | 5) +#define MT6797_GPIO33__FUNC_MD_URXD0 (MTK_PIN_NO(33) | 6) +#define MT6797_GPIO33__FUNC_C2K_DM_OTDI (MTK_PIN_NO(33) | 7) + +#define MT6797_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0) +#define MT6797_GPIO34__FUNC_CMFLASH (MTK_PIN_NO(34) | 1) +#define MT6797_GPIO34__FUNC_CLKM0 (MTK_PIN_NO(34) | 2) +#define MT6797_GPIO34__FUNC_UDI_NTRST (MTK_PIN_NO(34) | 3) +#define MT6797_GPIO34__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(34) | 4) +#define MT6797_GPIO34__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(34) | 5) +#define MT6797_GPIO34__FUNC_MD_UTXD0 (MTK_PIN_NO(34) | 6) +#define MT6797_GPIO34__FUNC_C2K_DM_JTINTP (MTK_PIN_NO(34) | 7) + +#define MT6797_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0) +#define MT6797_GPIO35__FUNC_CMMCLK3 (MTK_PIN_NO(35) | 1) +#define MT6797_GPIO35__FUNC_CLKM1 (MTK_PIN_NO(35) | 2) +#define MT6797_GPIO35__FUNC_MD_URXD1 (MTK_PIN_NO(35) | 3) +#define MT6797_GPIO35__FUNC_PTA_RXD (MTK_PIN_NO(35) | 4) +#define MT6797_GPIO35__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(35) | 5) +#define MT6797_GPIO35__FUNC_PWM_B (MTK_PIN_NO(35) | 6) +#define MT6797_GPIO35__FUNC_PCC_PPC_IO (MTK_PIN_NO(35) | 7) + +#define MT6797_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0) +#define MT6797_GPIO36__FUNC_CMMCLK2 (MTK_PIN_NO(36) | 1) +#define MT6797_GPIO36__FUNC_CLKM2 (MTK_PIN_NO(36) | 2) +#define MT6797_GPIO36__FUNC_MD_UTXD1 (MTK_PIN_NO(36) | 3) +#define MT6797_GPIO36__FUNC_PTA_TXD (MTK_PIN_NO(36) | 4) +#define MT6797_GPIO36__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(36) | 5) +#define MT6797_GPIO36__FUNC_PWM_C (MTK_PIN_NO(36) | 6) +#define MT6797_GPIO36__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(36) | 7) + +#define MT6797_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0) +#define MT6797_GPIO37__FUNC_SCL0_0 (MTK_PIN_NO(37) | 1) + +#define MT6797_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0) +#define MT6797_GPIO38__FUNC_SDA0_0 (MTK_PIN_NO(38) | 1) + +#define MT6797_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0) +#define MT6797_GPIO39__FUNC_DPI_D0 (MTK_PIN_NO(39) | 1) +#define MT6797_GPIO39__FUNC_SPI1_CLK_A (MTK_PIN_NO(39) | 2) +#define MT6797_GPIO39__FUNC_PCM0_SYNC (MTK_PIN_NO(39) | 3) +#define MT6797_GPIO39__FUNC_I2S0_LRCK (MTK_PIN_NO(39) | 4) +#define MT6797_GPIO39__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(39) | 5) +#define MT6797_GPIO39__FUNC_URXD3 (MTK_PIN_NO(39) | 6) +#define MT6797_GPIO39__FUNC_C2K_NTRST (MTK_PIN_NO(39) | 7) + +#define MT6797_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0) +#define MT6797_GPIO40__FUNC_DPI_D1 (MTK_PIN_NO(40) | 1) +#define MT6797_GPIO40__FUNC_SPI1_MI_A (MTK_PIN_NO(40) | 2) +#define MT6797_GPIO40__FUNC_PCM0_CLK (MTK_PIN_NO(40) | 3) +#define MT6797_GPIO40__FUNC_I2S0_BCK (MTK_PIN_NO(40) | 4) +#define MT6797_GPIO40__FUNC_CONN_MCU_TDO (MTK_PIN_NO(40) | 5) +#define MT6797_GPIO40__FUNC_UTXD3 (MTK_PIN_NO(40) | 6) +#define MT6797_GPIO40__FUNC_C2K_TCK (MTK_PIN_NO(40) | 7) + +#define MT6797_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0) +#define MT6797_GPIO41__FUNC_DPI_D2 (MTK_PIN_NO(41) | 1) +#define MT6797_GPIO41__FUNC_SPI1_CS_A (MTK_PIN_NO(41) | 2) +#define MT6797_GPIO41__FUNC_PCM0_DO (MTK_PIN_NO(41) | 3) +#define MT6797_GPIO41__FUNC_I2S3_DO (MTK_PIN_NO(41) | 4) +#define MT6797_GPIO41__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(41) | 5) +#define MT6797_GPIO41__FUNC_URTS3 (MTK_PIN_NO(41) | 6) +#define MT6797_GPIO41__FUNC_C2K_TDI (MTK_PIN_NO(41) | 7) + +#define MT6797_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0) +#define MT6797_GPIO42__FUNC_DPI_D3 (MTK_PIN_NO(42) | 1) +#define MT6797_GPIO42__FUNC_SPI1_MO_A (MTK_PIN_NO(42) | 2) +#define MT6797_GPIO42__FUNC_PCM0_DI (MTK_PIN_NO(42) | 3) +#define MT6797_GPIO42__FUNC_I2S0_DI (MTK_PIN_NO(42) | 4) +#define MT6797_GPIO42__FUNC_CONN_MCU_TDI (MTK_PIN_NO(42) | 5) +#define MT6797_GPIO42__FUNC_UCTS3 (MTK_PIN_NO(42) | 6) +#define MT6797_GPIO42__FUNC_C2K_TMS (MTK_PIN_NO(42) | 7) + +#define MT6797_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0) +#define MT6797_GPIO43__FUNC_DPI_D4 (MTK_PIN_NO(43) | 1) +#define MT6797_GPIO43__FUNC_SPI2_CLK_A (MTK_PIN_NO(43) | 2) +#define MT6797_GPIO43__FUNC_PCM1_SYNC (MTK_PIN_NO(43) | 3) +#define MT6797_GPIO43__FUNC_I2S2_LRCK (MTK_PIN_NO(43) | 4) +#define MT6797_GPIO43__FUNC_CONN_MCU_TMS (MTK_PIN_NO(43) | 5) +#define MT6797_GPIO43__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(43) | 6) +#define MT6797_GPIO43__FUNC_C2K_TDO (MTK_PIN_NO(43) | 7) + +#define MT6797_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0) +#define MT6797_GPIO44__FUNC_DPI_D5 (MTK_PIN_NO(44) | 1) +#define MT6797_GPIO44__FUNC_SPI2_MI_A (MTK_PIN_NO(44) | 2) +#define MT6797_GPIO44__FUNC_PCM1_CLK (MTK_PIN_NO(44) | 3) +#define MT6797_GPIO44__FUNC_I2S2_BCK (MTK_PIN_NO(44) | 4) +#define MT6797_GPIO44__FUNC_CONN_MCU_TCK (MTK_PIN_NO(44) | 5) +#define MT6797_GPIO44__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(44) | 6) +#define MT6797_GPIO44__FUNC_C2K_RTCK (MTK_PIN_NO(44) | 7) + +#define MT6797_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0) +#define MT6797_GPIO45__FUNC_DPI_D6 (MTK_PIN_NO(45) | 1) +#define MT6797_GPIO45__FUNC_SPI2_CS_A (MTK_PIN_NO(45) | 2) +#define MT6797_GPIO45__FUNC_PCM1_DI (MTK_PIN_NO(45) | 3) +#define MT6797_GPIO45__FUNC_I2S2_DI (MTK_PIN_NO(45) | 4) +#define MT6797_GPIO45__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(45) | 5) +#define MT6797_GPIO45__FUNC_MD_URXD0 (MTK_PIN_NO(45) | 6) + +#define MT6797_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0) +#define MT6797_GPIO46__FUNC_DPI_D7 (MTK_PIN_NO(46) | 1) +#define MT6797_GPIO46__FUNC_SPI2_MO_A (MTK_PIN_NO(46) | 2) +#define MT6797_GPIO46__FUNC_PCM1_DO0 (MTK_PIN_NO(46) | 3) +#define MT6797_GPIO46__FUNC_I2S1_DO (MTK_PIN_NO(46) | 4) +#define MT6797_GPIO46__FUNC_ANT_SEL0 (MTK_PIN_NO(46) | 5) +#define MT6797_GPIO46__FUNC_MD_UTXD0 (MTK_PIN_NO(46) | 6) + +#define MT6797_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0) +#define MT6797_GPIO47__FUNC_DPI_D8 (MTK_PIN_NO(47) | 1) +#define MT6797_GPIO47__FUNC_CLKM0 (MTK_PIN_NO(47) | 2) +#define MT6797_GPIO47__FUNC_PCM1_DO1 (MTK_PIN_NO(47) | 3) +#define MT6797_GPIO47__FUNC_I2S0_MCK (MTK_PIN_NO(47) | 4) +#define MT6797_GPIO47__FUNC_ANT_SEL1 (MTK_PIN_NO(47) | 5) +#define MT6797_GPIO47__FUNC_PTA_RXD (MTK_PIN_NO(47) | 6) +#define MT6797_GPIO47__FUNC_C2K_URXD0 (MTK_PIN_NO(47) | 7) + +#define MT6797_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0) +#define MT6797_GPIO48__FUNC_DPI_D9 (MTK_PIN_NO(48) | 1) +#define MT6797_GPIO48__FUNC_CLKM1 (MTK_PIN_NO(48) | 2) +#define MT6797_GPIO48__FUNC_CMFLASH (MTK_PIN_NO(48) | 3) +#define MT6797_GPIO48__FUNC_I2S2_MCK (MTK_PIN_NO(48) | 4) +#define MT6797_GPIO48__FUNC_ANT_SEL2 (MTK_PIN_NO(48) | 5) +#define MT6797_GPIO48__FUNC_PTA_TXD (MTK_PIN_NO(48) | 6) +#define MT6797_GPIO48__FUNC_C2K_UTXD0 (MTK_PIN_NO(48) | 7) + +#define MT6797_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0) +#define MT6797_GPIO49__FUNC_DPI_D10 (MTK_PIN_NO(49) | 1) +#define MT6797_GPIO49__FUNC_MD_INT1_C2K_UIM1_HOT_PLUG_IN (MTK_PIN_NO(49) | 2) +#define MT6797_GPIO49__FUNC_PWM_C (MTK_PIN_NO(49) | 3) +#define MT6797_GPIO49__FUNC_IRTX_OUT (MTK_PIN_NO(49) | 4) +#define MT6797_GPIO49__FUNC_ANT_SEL3 (MTK_PIN_NO(49) | 5) +#define MT6797_GPIO49__FUNC_MD_URXD1 (MTK_PIN_NO(49) | 6) + +#define MT6797_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0) +#define MT6797_GPIO50__FUNC_DPI_D11 (MTK_PIN_NO(50) | 1) +#define MT6797_GPIO50__FUNC_MD_INT2 (MTK_PIN_NO(50) | 2) +#define MT6797_GPIO50__FUNC_PWM_D (MTK_PIN_NO(50) | 3) +#define MT6797_GPIO50__FUNC_CLKM2 (MTK_PIN_NO(50) | 4) +#define MT6797_GPIO50__FUNC_ANT_SEL4 (MTK_PIN_NO(50) | 5) +#define MT6797_GPIO50__FUNC_MD_UTXD1 (MTK_PIN_NO(50) | 6) + +#define MT6797_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0) +#define MT6797_GPIO51__FUNC_DPI_DE (MTK_PIN_NO(51) | 1) +#define MT6797_GPIO51__FUNC_SPI4_CLK_A (MTK_PIN_NO(51) | 2) +#define MT6797_GPIO51__FUNC_IRTX_OUT (MTK_PIN_NO(51) | 3) +#define MT6797_GPIO51__FUNC_SCL0_1 (MTK_PIN_NO(51) | 4) +#define MT6797_GPIO51__FUNC_ANT_SEL5 (MTK_PIN_NO(51) | 5) +#define MT6797_GPIO51__FUNC_C2K_UTXD1 (MTK_PIN_NO(51) | 7) + +#define MT6797_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0) +#define MT6797_GPIO52__FUNC_DPI_CK (MTK_PIN_NO(52) | 1) +#define MT6797_GPIO52__FUNC_SPI4_MI_A (MTK_PIN_NO(52) | 2) +#define MT6797_GPIO52__FUNC_SPI4_MO_A (MTK_PIN_NO(52) | 3) +#define MT6797_GPIO52__FUNC_SDA0_1 (MTK_PIN_NO(52) | 4) +#define MT6797_GPIO52__FUNC_ANT_SEL6 (MTK_PIN_NO(52) | 5) +#define MT6797_GPIO52__FUNC_C2K_URXD1 (MTK_PIN_NO(52) | 7) + +#define MT6797_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0) +#define MT6797_GPIO53__FUNC_DPI_HSYNC (MTK_PIN_NO(53) | 1) +#define MT6797_GPIO53__FUNC_SPI4_CS_A (MTK_PIN_NO(53) | 2) +#define MT6797_GPIO53__FUNC_CMFLASH (MTK_PIN_NO(53) | 3) +#define MT6797_GPIO53__FUNC_SCL1_1 (MTK_PIN_NO(53) | 4) +#define MT6797_GPIO53__FUNC_ANT_SEL7 (MTK_PIN_NO(53) | 5) +#define MT6797_GPIO53__FUNC_MD_URXD2 (MTK_PIN_NO(53) | 6) +#define MT6797_GPIO53__FUNC_PCC_PPC_IO (MTK_PIN_NO(53) | 7) + +#define MT6797_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0) +#define MT6797_GPIO54__FUNC_DPI_VSYNC (MTK_PIN_NO(54) | 1) +#define MT6797_GPIO54__FUNC_SPI4_MO_A (MTK_PIN_NO(54) | 2) +#define MT6797_GPIO54__FUNC_SPI4_MI_A (MTK_PIN_NO(54) | 3) +#define MT6797_GPIO54__FUNC_SDA1_1 (MTK_PIN_NO(54) | 4) +#define MT6797_GPIO54__FUNC_PWM_A (MTK_PIN_NO(54) | 5) +#define MT6797_GPIO54__FUNC_MD_UTXD2 (MTK_PIN_NO(54) | 6) +#define MT6797_GPIO54__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(54) | 7) + +#define MT6797_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0) +#define MT6797_GPIO55__FUNC_SCL1_0 (MTK_PIN_NO(55) | 1) + +#define MT6797_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0) +#define MT6797_GPIO56__FUNC_SDA1_0 (MTK_PIN_NO(56) | 1) + +#define MT6797_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0) +#define MT6797_GPIO57__FUNC_SPI0_CLK (MTK_PIN_NO(57) | 1) +#define MT6797_GPIO57__FUNC_SCL0_2 (MTK_PIN_NO(57) | 2) +#define MT6797_GPIO57__FUNC_PWM_B (MTK_PIN_NO(57) | 3) +#define MT6797_GPIO57__FUNC_UTXD3 (MTK_PIN_NO(57) | 4) +#define MT6797_GPIO57__FUNC_PCM0_SYNC (MTK_PIN_NO(57) | 5) + +#define MT6797_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0) +#define MT6797_GPIO58__FUNC_SPI0_MI (MTK_PIN_NO(58) | 1) +#define MT6797_GPIO58__FUNC_SPI0_MO (MTK_PIN_NO(58) | 2) +#define MT6797_GPIO58__FUNC_SDA1_2 (MTK_PIN_NO(58) | 3) +#define MT6797_GPIO58__FUNC_URXD3 (MTK_PIN_NO(58) | 4) +#define MT6797_GPIO58__FUNC_PCM0_CLK (MTK_PIN_NO(58) | 5) + +#define MT6797_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0) +#define MT6797_GPIO59__FUNC_SPI0_MO (MTK_PIN_NO(59) | 1) +#define MT6797_GPIO59__FUNC_SPI0_MI (MTK_PIN_NO(59) | 2) +#define MT6797_GPIO59__FUNC_PWM_C (MTK_PIN_NO(59) | 3) +#define MT6797_GPIO59__FUNC_URTS3 (MTK_PIN_NO(59) | 4) +#define MT6797_GPIO59__FUNC_PCM0_DO (MTK_PIN_NO(59) | 5) + +#define MT6797_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0) +#define MT6797_GPIO60__FUNC_SPI0_CS (MTK_PIN_NO(60) | 1) +#define MT6797_GPIO60__FUNC_SDA0_2 (MTK_PIN_NO(60) | 2) +#define MT6797_GPIO60__FUNC_SCL1_2 (MTK_PIN_NO(60) | 3) +#define MT6797_GPIO60__FUNC_UCTS3 (MTK_PIN_NO(60) | 4) +#define MT6797_GPIO60__FUNC_PCM0_DI (MTK_PIN_NO(60) | 5) + +#define MT6797_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0) +#define MT6797_GPIO61__FUNC_EINT0 (MTK_PIN_NO(61) | 1) +#define MT6797_GPIO61__FUNC_IDDIG (MTK_PIN_NO(61) | 2) +#define MT6797_GPIO61__FUNC_SPI4_CLK_B (MTK_PIN_NO(61) | 3) +#define MT6797_GPIO61__FUNC_I2S0_LRCK (MTK_PIN_NO(61) | 4) +#define MT6797_GPIO61__FUNC_PCM0_SYNC (MTK_PIN_NO(61) | 5) +#define MT6797_GPIO61__FUNC_C2K_EINT0 (MTK_PIN_NO(61) | 7) + +#define MT6797_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0) +#define MT6797_GPIO62__FUNC_EINT1 (MTK_PIN_NO(62) | 1) +#define MT6797_GPIO62__FUNC_USB_DRVVBUS (MTK_PIN_NO(62) | 2) +#define MT6797_GPIO62__FUNC_SPI4_MI_B (MTK_PIN_NO(62) | 3) +#define MT6797_GPIO62__FUNC_I2S0_BCK (MTK_PIN_NO(62) | 4) +#define MT6797_GPIO62__FUNC_PCM0_CLK (MTK_PIN_NO(62) | 5) +#define MT6797_GPIO62__FUNC_C2K_EINT1 (MTK_PIN_NO(62) | 7) + +#define MT6797_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0) +#define MT6797_GPIO63__FUNC_EINT2 (MTK_PIN_NO(63) | 1) +#define MT6797_GPIO63__FUNC_IRTX_OUT (MTK_PIN_NO(63) | 2) +#define MT6797_GPIO63__FUNC_SPI4_MO_B (MTK_PIN_NO(63) | 3) +#define MT6797_GPIO63__FUNC_I2S0_MCK (MTK_PIN_NO(63) | 4) +#define MT6797_GPIO63__FUNC_PCM0_DI (MTK_PIN_NO(63) | 5) +#define MT6797_GPIO63__FUNC_C2K_DM_EINT0 (MTK_PIN_NO(63) | 7) + +#define MT6797_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0) +#define MT6797_GPIO64__FUNC_EINT3 (MTK_PIN_NO(64) | 1) +#define MT6797_GPIO64__FUNC_CMFLASH (MTK_PIN_NO(64) | 2) +#define MT6797_GPIO64__FUNC_SPI4_CS_B (MTK_PIN_NO(64) | 3) +#define MT6797_GPIO64__FUNC_I2S0_DI (MTK_PIN_NO(64) | 4) +#define MT6797_GPIO64__FUNC_PCM0_DO (MTK_PIN_NO(64) | 5) +#define MT6797_GPIO64__FUNC_C2K_DM_EINT1 (MTK_PIN_NO(64) | 7) + +#define MT6797_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0) +#define MT6797_GPIO65__FUNC_EINT4 (MTK_PIN_NO(65) | 1) +#define MT6797_GPIO65__FUNC_CLKM0 (MTK_PIN_NO(65) | 2) +#define MT6797_GPIO65__FUNC_SPI5_CLK_B (MTK_PIN_NO(65) | 3) +#define MT6797_GPIO65__FUNC_I2S1_LRCK (MTK_PIN_NO(65) | 4) +#define MT6797_GPIO65__FUNC_PWM_A (MTK_PIN_NO(65) | 5) +#define MT6797_GPIO65__FUNC_C2K_DM_EINT2 (MTK_PIN_NO(65) | 7) + +#define MT6797_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0) +#define MT6797_GPIO66__FUNC_EINT5 (MTK_PIN_NO(66) | 1) +#define MT6797_GPIO66__FUNC_CLKM1 (MTK_PIN_NO(66) | 2) +#define MT6797_GPIO66__FUNC_SPI5_MI_B (MTK_PIN_NO(66) | 3) +#define MT6797_GPIO66__FUNC_I2S1_BCK (MTK_PIN_NO(66) | 4) +#define MT6797_GPIO66__FUNC_PWM_B (MTK_PIN_NO(66) | 5) +#define MT6797_GPIO66__FUNC_C2K_DM_EINT3 (MTK_PIN_NO(66) | 7) + +#define MT6797_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0) +#define MT6797_GPIO67__FUNC_EINT6 (MTK_PIN_NO(67) | 1) +#define MT6797_GPIO67__FUNC_CLKM2 (MTK_PIN_NO(67) | 2) +#define MT6797_GPIO67__FUNC_SPI5_MO_B (MTK_PIN_NO(67) | 3) +#define MT6797_GPIO67__FUNC_I2S1_MCK (MTK_PIN_NO(67) | 4) +#define MT6797_GPIO67__FUNC_PWM_C (MTK_PIN_NO(67) | 5) +#define MT6797_GPIO67__FUNC_DBG_MON_A0 (MTK_PIN_NO(67) | 7) + +#define MT6797_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0) +#define MT6797_GPIO68__FUNC_EINT7 (MTK_PIN_NO(68) | 1) +#define MT6797_GPIO68__FUNC_CLKM3 (MTK_PIN_NO(68) | 2) +#define MT6797_GPIO68__FUNC_SPI5_CS_B (MTK_PIN_NO(68) | 3) +#define MT6797_GPIO68__FUNC_I2S1_DO (MTK_PIN_NO(68) | 4) +#define MT6797_GPIO68__FUNC_PWM_D (MTK_PIN_NO(68) | 5) +#define MT6797_GPIO68__FUNC_DBG_MON_A1 (MTK_PIN_NO(68) | 7) + +#define MT6797_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0) +#define MT6797_GPIO69__FUNC_I2S0_LRCK (MTK_PIN_NO(69) | 1) +#define MT6797_GPIO69__FUNC_I2S3_LRCK (MTK_PIN_NO(69) | 2) +#define MT6797_GPIO69__FUNC_I2S1_LRCK (MTK_PIN_NO(69) | 3) +#define MT6797_GPIO69__FUNC_I2S2_LRCK (MTK_PIN_NO(69) | 4) +#define MT6797_GPIO69__FUNC_DBG_MON_A2 (MTK_PIN_NO(69) | 7) + +#define MT6797_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0) +#define MT6797_GPIO70__FUNC_I2S0_BCK (MTK_PIN_NO(70) | 1) +#define MT6797_GPIO70__FUNC_I2S3_BCK (MTK_PIN_NO(70) | 2) +#define MT6797_GPIO70__FUNC_I2S1_BCK (MTK_PIN_NO(70) | 3) +#define MT6797_GPIO70__FUNC_I2S2_BCK (MTK_PIN_NO(70) | 4) +#define MT6797_GPIO70__FUNC_DBG_MON_A3 (MTK_PIN_NO(70) | 7) + +#define MT6797_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0) +#define MT6797_GPIO71__FUNC_I2S0_MCK (MTK_PIN_NO(71) | 1) +#define MT6797_GPIO71__FUNC_I2S3_MCK (MTK_PIN_NO(71) | 2) +#define MT6797_GPIO71__FUNC_I2S1_MCK (MTK_PIN_NO(71) | 3) +#define MT6797_GPIO71__FUNC_I2S2_MCK (MTK_PIN_NO(71) | 4) +#define MT6797_GPIO71__FUNC_DBG_MON_A4 (MTK_PIN_NO(71) | 7) + +#define MT6797_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0) +/* #define MT6797_GPIO72__FUNC_I2S0_DI (MTK_PIN_NO(72) | 1) */ +#define MT6797_GPIO72__FUNC_I2S0_DI (MTK_PIN_NO(72) | 2) +/* #define MT6797_GPIO72__FUNC_I2S2_DI (MTK_PIN_NO(72) | 3) */ +#define MT6797_GPIO72__FUNC_I2S2_DI (MTK_PIN_NO(72) | 4) +#define MT6797_GPIO72__FUNC_DBG_MON_A5 (MTK_PIN_NO(72) | 7) + +#define MT6797_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0) +/* #define MT6797_GPIO73__FUNC_I2S3_DO (MTK_PIN_NO(73) | 1) */ +#define MT6797_GPIO73__FUNC_I2S3_DO (MTK_PIN_NO(73) | 2) +/* #define MT6797_GPIO73__FUNC_I2S1_DO (MTK_PIN_NO(73) | 3) */ +#define MT6797_GPIO73__FUNC_I2S1_DO (MTK_PIN_NO(73) | 4) +#define MT6797_GPIO73__FUNC_DBG_MON_A6 (MTK_PIN_NO(73) | 7) + +#define MT6797_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0) +#define MT6797_GPIO74__FUNC_SCL3_0 (MTK_PIN_NO(74) | 1) +#define MT6797_GPIO74__FUNC_AUXIF_CLK1 (MTK_PIN_NO(74) | 7) + +#define MT6797_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0) +#define MT6797_GPIO75__FUNC_SDA3_0 (MTK_PIN_NO(75) | 1) +#define MT6797_GPIO75__FUNC_AUXIF_ST1 (MTK_PIN_NO(75) | 7) + +#define MT6797_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0) +#define MT6797_GPIO76__FUNC_CONN_HRST_B (MTK_PIN_NO(76) | 1) +#define MT6797_GPIO76__FUNC_C2K_DM_EINT0 (MTK_PIN_NO(76) | 7) + +#define MT6797_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0) +#define MT6797_GPIO77__FUNC_CONN_TOP_CLK (MTK_PIN_NO(77) | 1) +#define MT6797_GPIO77__FUNC_C2K_DM_EINT1 (MTK_PIN_NO(77) | 7) + +#define MT6797_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0) +#define MT6797_GPIO78__FUNC_CONN_TOP_DATA (MTK_PIN_NO(78) | 1) +#define MT6797_GPIO78__FUNC_C2K_DM_EINT2 (MTK_PIN_NO(78) | 7) + +#define MT6797_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0) +#define MT6797_GPIO79__FUNC_CONN_WB_PTA (MTK_PIN_NO(79) | 1) +#define MT6797_GPIO79__FUNC_C2K_DM_EINT3 (MTK_PIN_NO(79) | 7) + +#define MT6797_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0) +#define MT6797_GPIO80__FUNC_CONN_WF_HB0 (MTK_PIN_NO(80) | 1) +#define MT6797_GPIO80__FUNC_C2K_EINT0 (MTK_PIN_NO(80) | 7) + +#define MT6797_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0) +#define MT6797_GPIO81__FUNC_CONN_WF_HB1 (MTK_PIN_NO(81) | 1) +#define MT6797_GPIO81__FUNC_C2K_EINT1 (MTK_PIN_NO(81) | 7) + +#define MT6797_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0) +#define MT6797_GPIO82__FUNC_CONN_WF_HB2 (MTK_PIN_NO(82) | 1) +#define MT6797_GPIO82__FUNC_MD_CLKM0 (MTK_PIN_NO(82) | 7) + +#define MT6797_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0) +#define MT6797_GPIO83__FUNC_CONN_BT_CLK (MTK_PIN_NO(83) | 1) +#define MT6797_GPIO83__FUNC_MD_CLKM1 (MTK_PIN_NO(83) | 7) + +#define MT6797_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0) +#define MT6797_GPIO84__FUNC_CONN_BT_DATA (MTK_PIN_NO(84) | 1) + +#define MT6797_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0) +#define MT6797_GPIO85__FUNC_EINT8 (MTK_PIN_NO(85) | 1) +#define MT6797_GPIO85__FUNC_I2S1_LRCK (MTK_PIN_NO(85) | 2) +#define MT6797_GPIO85__FUNC_I2S2_LRCK (MTK_PIN_NO(85) | 3) +#define MT6797_GPIO85__FUNC_URXD1 (MTK_PIN_NO(85) | 4) +#define MT6797_GPIO85__FUNC_MD_URXD0 (MTK_PIN_NO(85) | 5) +#define MT6797_GPIO85__FUNC_DBG_MON_A7 (MTK_PIN_NO(85) | 7) + +#define MT6797_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0) +#define MT6797_GPIO86__FUNC_EINT9 (MTK_PIN_NO(86) | 1) +#define MT6797_GPIO86__FUNC_I2S1_BCK (MTK_PIN_NO(86) | 2) +#define MT6797_GPIO86__FUNC_I2S2_BCK (MTK_PIN_NO(86) | 3) +#define MT6797_GPIO86__FUNC_UTXD1 (MTK_PIN_NO(86) | 4) +#define MT6797_GPIO86__FUNC_MD_UTXD0 (MTK_PIN_NO(86) | 5) +#define MT6797_GPIO86__FUNC_DBG_MON_A8 (MTK_PIN_NO(86) | 7) + +#define MT6797_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0) +#define MT6797_GPIO87__FUNC_EINT10 (MTK_PIN_NO(87) | 1) +#define MT6797_GPIO87__FUNC_I2S1_MCK (MTK_PIN_NO(87) | 2) +#define MT6797_GPIO87__FUNC_I2S2_MCK (MTK_PIN_NO(87) | 3) +#define MT6797_GPIO87__FUNC_URTS1 (MTK_PIN_NO(87) | 4) +#define MT6797_GPIO87__FUNC_MD_URXD1 (MTK_PIN_NO(87) | 5) +#define MT6797_GPIO87__FUNC_DBG_MON_A9 (MTK_PIN_NO(87) | 7) + +#define MT6797_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0) +#define MT6797_GPIO88__FUNC_EINT11 (MTK_PIN_NO(88) | 1) +#define MT6797_GPIO88__FUNC_I2S1_DO (MTK_PIN_NO(88) | 2) +#define MT6797_GPIO88__FUNC_I2S2_DI (MTK_PIN_NO(88) | 3) +#define MT6797_GPIO88__FUNC_UCTS1 (MTK_PIN_NO(88) | 4) +#define MT6797_GPIO88__FUNC_MD_UTXD1 (MTK_PIN_NO(88) | 5) +#define MT6797_GPIO88__FUNC_DBG_MON_A10 (MTK_PIN_NO(88) | 7) + +#define MT6797_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0) +#define MT6797_GPIO89__FUNC_EINT12 (MTK_PIN_NO(89) | 1) +#define MT6797_GPIO89__FUNC_IRTX_OUT (MTK_PIN_NO(89) | 2) +#define MT6797_GPIO89__FUNC_CLKM0 (MTK_PIN_NO(89) | 3) +#define MT6797_GPIO89__FUNC_PCM1_SYNC (MTK_PIN_NO(89) | 4) +#define MT6797_GPIO89__FUNC_URTS0 (MTK_PIN_NO(89) | 5) +#define MT6797_GPIO89__FUNC_DBG_MON_A11 (MTK_PIN_NO(89) | 7) + +#define MT6797_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0) +#define MT6797_GPIO90__FUNC_EINT13 (MTK_PIN_NO(90) | 1) +#define MT6797_GPIO90__FUNC_CMFLASH (MTK_PIN_NO(90) | 2) +#define MT6797_GPIO90__FUNC_CLKM1 (MTK_PIN_NO(90) | 3) +#define MT6797_GPIO90__FUNC_PCM1_CLK (MTK_PIN_NO(90) | 4) +#define MT6797_GPIO90__FUNC_UCTS0 (MTK_PIN_NO(90) | 5) +#define MT6797_GPIO90__FUNC_C2K_DM_EINT0 (MTK_PIN_NO(90) | 7) + +#define MT6797_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0) +#define MT6797_GPIO91__FUNC_EINT14 (MTK_PIN_NO(91) | 1) +#define MT6797_GPIO91__FUNC_PWM_A (MTK_PIN_NO(91) | 2) +#define MT6797_GPIO91__FUNC_CLKM2 (MTK_PIN_NO(91) | 3) +#define MT6797_GPIO91__FUNC_PCM1_DI (MTK_PIN_NO(91) | 4) +#define MT6797_GPIO91__FUNC_SDA0_3 (MTK_PIN_NO(91) | 5) +#define MT6797_GPIO91__FUNC_C2K_DM_EINT1 (MTK_PIN_NO(91) | 7) + +#define MT6797_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0) +#define MT6797_GPIO92__FUNC_EINT15 (MTK_PIN_NO(92) | 1) +#define MT6797_GPIO92__FUNC_PWM_B (MTK_PIN_NO(92) | 2) +#define MT6797_GPIO92__FUNC_CLKM3 (MTK_PIN_NO(92) | 3) +#define MT6797_GPIO92__FUNC_PCM1_DO0 (MTK_PIN_NO(92) | 4) +#define MT6797_GPIO92__FUNC_SCL0_3 (MTK_PIN_NO(92) | 5) + +#define MT6797_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0) +#define MT6797_GPIO93__FUNC_EINT16 (MTK_PIN_NO(93) | 1) +#define MT6797_GPIO93__FUNC_IDDIG (MTK_PIN_NO(93) | 2) +#define MT6797_GPIO93__FUNC_CLKM4 (MTK_PIN_NO(93) | 3) +#define MT6797_GPIO93__FUNC_PCM1_DO1 (MTK_PIN_NO(93) | 4) +#define MT6797_GPIO93__FUNC_MD_INT2 (MTK_PIN_NO(93) | 5) +#define MT6797_GPIO93__FUNC_DROP_ZONE (MTK_PIN_NO(93) | 7) + +#define MT6797_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0) +#define MT6797_GPIO94__FUNC_USB_DRVVBUS (MTK_PIN_NO(94) | 1) +#define MT6797_GPIO94__FUNC_PWM_C (MTK_PIN_NO(94) | 2) +#define MT6797_GPIO94__FUNC_CLKM5 (MTK_PIN_NO(94) | 3) + +#define MT6797_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0) +#define MT6797_GPIO95__FUNC_SDA2_0 (MTK_PIN_NO(95) | 1) +#define MT6797_GPIO95__FUNC_AUXIF_ST0 (MTK_PIN_NO(95) | 7) + +#define MT6797_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0) +#define MT6797_GPIO96__FUNC_SCL2_0 (MTK_PIN_NO(96) | 1) +#define MT6797_GPIO96__FUNC_AUXIF_CLK0 (MTK_PIN_NO(96) | 7) + +#define MT6797_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0) +#define MT6797_GPIO97__FUNC_URXD0 (MTK_PIN_NO(97) | 1) +#define MT6797_GPIO97__FUNC_UTXD0 (MTK_PIN_NO(97) | 2) +#define MT6797_GPIO97__FUNC_MD_URXD0 (MTK_PIN_NO(97) | 3) +#define MT6797_GPIO97__FUNC_MD_URXD1 (MTK_PIN_NO(97) | 4) +#define MT6797_GPIO97__FUNC_MD_URXD2 (MTK_PIN_NO(97) | 5) +#define MT6797_GPIO97__FUNC_C2K_URXD0 (MTK_PIN_NO(97) | 6) +#define MT6797_GPIO97__FUNC_C2K_URXD1 (MTK_PIN_NO(97) | 7) + +#define MT6797_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0) +#define MT6797_GPIO98__FUNC_UTXD0 (MTK_PIN_NO(98) | 1) +#define MT6797_GPIO98__FUNC_URXD0 (MTK_PIN_NO(98) | 2) +#define MT6797_GPIO98__FUNC_MD_UTXD0 (MTK_PIN_NO(98) | 3) +#define MT6797_GPIO98__FUNC_MD_UTXD1 (MTK_PIN_NO(98) | 4) +#define MT6797_GPIO98__FUNC_MD_UTXD2 (MTK_PIN_NO(98) | 5) +#define MT6797_GPIO98__FUNC_C2K_UTXD0 (MTK_PIN_NO(98) | 6) +#define MT6797_GPIO98__FUNC_C2K_UTXD1 (MTK_PIN_NO(98) | 7) + +#define MT6797_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0) +#define MT6797_GPIO99__FUNC_RTC32K_CK (MTK_PIN_NO(99) | 1) + +#define MT6797_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0) +#define MT6797_GPIO100__FUNC_SRCLKENAI0 (MTK_PIN_NO(100) | 1) + +#define MT6797_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0) +#define MT6797_GPIO101__FUNC_SRCLKENAI1 (MTK_PIN_NO(101) | 1) + +#define MT6797_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0) +#define MT6797_GPIO102__FUNC_SRCLKENA0 (MTK_PIN_NO(102) | 1) + +#define MT6797_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0) +#define MT6797_GPIO103__FUNC_SRCLKENA1 (MTK_PIN_NO(103) | 1) + +#define MT6797_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0) +#define MT6797_GPIO104__FUNC_SYSRSTB (MTK_PIN_NO(104) | 1) + +#define MT6797_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0) +#define MT6797_GPIO105__FUNC_WATCHDOG (MTK_PIN_NO(105) | 1) + +#define MT6797_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0) +#define MT6797_GPIO106__FUNC_KPROW0 (MTK_PIN_NO(106) | 1) +#define MT6797_GPIO106__FUNC_CMFLASH (MTK_PIN_NO(106) | 2) +#define MT6797_GPIO106__FUNC_CLKM4 (MTK_PIN_NO(106) | 3) +#define MT6797_GPIO106__FUNC_TP_GPIO0_AO (MTK_PIN_NO(106) | 4) +#define MT6797_GPIO106__FUNC_IRTX_OUT (MTK_PIN_NO(106) | 5) + +#define MT6797_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0) +#define MT6797_GPIO107__FUNC_KPROW1 (MTK_PIN_NO(107) | 1) +#define MT6797_GPIO107__FUNC_IDDIG (MTK_PIN_NO(107) | 2) +#define MT6797_GPIO107__FUNC_CLKM5 (MTK_PIN_NO(107) | 3) +#define MT6797_GPIO107__FUNC_TP_GPIO1_AO (MTK_PIN_NO(107) | 4) +#define MT6797_GPIO107__FUNC_I2S1_BCK (MTK_PIN_NO(107) | 5) +#define MT6797_GPIO107__FUNC_DAP_SIB1_SWD (MTK_PIN_NO(107) | 7) + +#define MT6797_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0) +#define MT6797_GPIO108__FUNC_KPROW2 (MTK_PIN_NO(108) | 1) +#define MT6797_GPIO108__FUNC_USB_DRVVBUS (MTK_PIN_NO(108) | 2) +#define MT6797_GPIO108__FUNC_PWM_A (MTK_PIN_NO(108) | 3) +#define MT6797_GPIO108__FUNC_CMFLASH (MTK_PIN_NO(108) | 4) +#define MT6797_GPIO108__FUNC_I2S1_LRCK (MTK_PIN_NO(108) | 5) +#define MT6797_GPIO108__FUNC_DAP_SIB1_SWCK (MTK_PIN_NO(108) | 7) + +#define MT6797_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0) +#define MT6797_GPIO109__FUNC_KPCOL0 (MTK_PIN_NO(109) | 1) + +#define MT6797_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0) +#define MT6797_GPIO110__FUNC_KPCOL1 (MTK_PIN_NO(110) | 1) +#define MT6797_GPIO110__FUNC_SDA1_3 (MTK_PIN_NO(110) | 2) +#define MT6797_GPIO110__FUNC_PWM_B (MTK_PIN_NO(110) | 3) +#define MT6797_GPIO110__FUNC_CLKM0 (MTK_PIN_NO(110) | 4) +#define MT6797_GPIO110__FUNC_I2S1_DO (MTK_PIN_NO(110) | 5) +#define MT6797_GPIO110__FUNC_C2K_DM_EINT3 (MTK_PIN_NO(110) | 7) + +#define MT6797_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0) +#define MT6797_GPIO111__FUNC_KPCOL2 (MTK_PIN_NO(111) | 1) +#define MT6797_GPIO111__FUNC_SCL1_3 (MTK_PIN_NO(111) | 2) +#define MT6797_GPIO111__FUNC_PWM_C (MTK_PIN_NO(111) | 3) +#define MT6797_GPIO111__FUNC_DISP_PWM (MTK_PIN_NO(111) | 4) +#define MT6797_GPIO111__FUNC_I2S1_MCK (MTK_PIN_NO(111) | 5) +#define MT6797_GPIO111__FUNC_C2K_DM_EINT2 (MTK_PIN_NO(111) | 7) + +#define MT6797_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0) +#define MT6797_GPIO112__FUNC_MD_INT1_C2K_UIM1_HOT_PLUG_IN (MTK_PIN_NO(112) | 1) +#define MT6797_GPIO112__FUNC_C2K_DM_EINT1 (MTK_PIN_NO(112) | 7) + +#define MT6797_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0) +#define MT6797_GPIO113__FUNC_MD_INT0_C2K_UIM0_HOT_PLUG_IN (MTK_PIN_NO(113) | 1) +#define MT6797_GPIO113__FUNC_C2K_DM_EINT0 (MTK_PIN_NO(113) | 7) + +#define MT6797_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0) +#define MT6797_GPIO114__FUNC_MSDC0_DAT0 (MTK_PIN_NO(114) | 1) + +#define MT6797_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0) +#define MT6797_GPIO115__FUNC_MSDC0_DAT1 (MTK_PIN_NO(115) | 1) + +#define MT6797_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0) +#define MT6797_GPIO116__FUNC_MSDC0_DAT2 (MTK_PIN_NO(116) | 1) + +#define MT6797_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0) +#define MT6797_GPIO117__FUNC_MSDC0_DAT3 (MTK_PIN_NO(117) | 1) + +#define MT6797_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0) +#define MT6797_GPIO118__FUNC_MSDC0_DAT4 (MTK_PIN_NO(118) | 1) + +#define MT6797_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0) +#define MT6797_GPIO119__FUNC_MSDC0_DAT5 (MTK_PIN_NO(119) | 1) + +#define MT6797_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0) +#define MT6797_GPIO120__FUNC_MSDC0_DAT6 (MTK_PIN_NO(120) | 1) + +#define MT6797_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0) +#define MT6797_GPIO121__FUNC_MSDC0_DAT7 (MTK_PIN_NO(121) | 1) + +#define MT6797_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0) +#define MT6797_GPIO122__FUNC_MSDC0_CMD (MTK_PIN_NO(122) | 1) + +#define MT6797_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0) +#define MT6797_GPIO123__FUNC_MSDC0_CLK (MTK_PIN_NO(123) | 1) + +#define MT6797_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0) +#define MT6797_GPIO124__FUNC_MSDC0_DSL (MTK_PIN_NO(124) | 1) + +#define MT6797_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0) +#define MT6797_GPIO125__FUNC_MSDC0_RSTB (MTK_PIN_NO(125) | 1) + +#define MT6797_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0) +#define MT6797_GPIO126__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(126) | 1) +#define MT6797_GPIO126__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(126) | 2) +#define MT6797_GPIO126__FUNC_C2K_UIM0_CLK (MTK_PIN_NO(126) | 3) +#define MT6797_GPIO126__FUNC_C2K_UIM1_CLK (MTK_PIN_NO(126) | 4) + +#define MT6797_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0) +#define MT6797_GPIO127__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(127) | 1) +#define MT6797_GPIO127__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(127) | 2) +#define MT6797_GPIO127__FUNC_C2K_UIM0_RST (MTK_PIN_NO(127) | 3) +#define MT6797_GPIO127__FUNC_C2K_UIM1_RST (MTK_PIN_NO(127) | 4) + +#define MT6797_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0) +#define MT6797_GPIO128__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(128) | 1) +#define MT6797_GPIO128__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(128) | 2) +#define MT6797_GPIO128__FUNC_C2K_UIM0_IO (MTK_PIN_NO(128) | 3) +#define MT6797_GPIO128__FUNC_C2K_UIM1_IO (MTK_PIN_NO(128) | 4) + +#define MT6797_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0) +#define MT6797_GPIO129__FUNC_MSDC1_CMD (MTK_PIN_NO(129) | 1) +#define MT6797_GPIO129__FUNC_CONN_DSP_JMS (MTK_PIN_NO(129) | 2) +#define MT6797_GPIO129__FUNC_LTE_JTAG_TMS (MTK_PIN_NO(129) | 3) +#define MT6797_GPIO129__FUNC_UDI_TMS (MTK_PIN_NO(129) | 4) +#define MT6797_GPIO129__FUNC_C2K_TMS (MTK_PIN_NO(129) | 5) + +#define MT6797_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0) +#define MT6797_GPIO130__FUNC_MSDC1_DAT0 (MTK_PIN_NO(130) | 1) +#define MT6797_GPIO130__FUNC_CONN_DSP_JDI (MTK_PIN_NO(130) | 2) +#define MT6797_GPIO130__FUNC_LTE_JTAG_TDI (MTK_PIN_NO(130) | 3) +#define MT6797_GPIO130__FUNC_UDI_TDI (MTK_PIN_NO(130) | 4) +#define MT6797_GPIO130__FUNC_C2K_TDI (MTK_PIN_NO(130) | 5) + +#define MT6797_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0) +#define MT6797_GPIO131__FUNC_MSDC1_DAT1 (MTK_PIN_NO(131) | 1) +#define MT6797_GPIO131__FUNC_CONN_DSP_JDO (MTK_PIN_NO(131) | 2) +#define MT6797_GPIO131__FUNC_LTE_JTAG_TDO (MTK_PIN_NO(131) | 3) +#define MT6797_GPIO131__FUNC_UDI_TDO (MTK_PIN_NO(131) | 4) +#define MT6797_GPIO131__FUNC_C2K_TDO (MTK_PIN_NO(131) | 5) + +#define MT6797_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0) +#define MT6797_GPIO132__FUNC_MSDC1_DAT2 (MTK_PIN_NO(132) | 1) +#define MT6797_GPIO132__FUNC_C2K_RTCK (MTK_PIN_NO(132) | 5) + +#define MT6797_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0) +#define MT6797_GPIO133__FUNC_MSDC1_DAT3 (MTK_PIN_NO(133) | 1) +#define MT6797_GPIO133__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(133) | 2) +#define MT6797_GPIO133__FUNC_LTE_JTAG_TRSTN (MTK_PIN_NO(133) | 3) +#define MT6797_GPIO133__FUNC_UDI_NTRST (MTK_PIN_NO(133) | 4) +#define MT6797_GPIO133__FUNC_C2K_NTRST (MTK_PIN_NO(133) | 5) + +#define MT6797_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0) +#define MT6797_GPIO134__FUNC_MSDC1_CLK (MTK_PIN_NO(134) | 1) +#define MT6797_GPIO134__FUNC_CONN_DSP_JCK (MTK_PIN_NO(134) | 2) +#define MT6797_GPIO134__FUNC_LTE_JTAG_TCK (MTK_PIN_NO(134) | 3) +#define MT6797_GPIO134__FUNC_UDI_TCK_XI (MTK_PIN_NO(134) | 4) +#define MT6797_GPIO134__FUNC_C2K_TCK (MTK_PIN_NO(134) | 5) + +#define MT6797_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0) +#define MT6797_GPIO135__FUNC_TDM_LRCK (MTK_PIN_NO(135) | 1) +#define MT6797_GPIO135__FUNC_I2S0_LRCK (MTK_PIN_NO(135) | 2) +#define MT6797_GPIO135__FUNC_CLKM0 (MTK_PIN_NO(135) | 3) +#define MT6797_GPIO135__FUNC_PCM1_SYNC (MTK_PIN_NO(135) | 4) +#define MT6797_GPIO135__FUNC_PWM_A (MTK_PIN_NO(135) | 5) +#define MT6797_GPIO135__FUNC_DBG_MON_A12 (MTK_PIN_NO(135) | 7) + +#define MT6797_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0) +#define MT6797_GPIO136__FUNC_TDM_BCK (MTK_PIN_NO(136) | 1) +#define MT6797_GPIO136__FUNC_I2S0_BCK (MTK_PIN_NO(136) | 2) +#define MT6797_GPIO136__FUNC_CLKM1 (MTK_PIN_NO(136) | 3) +#define MT6797_GPIO136__FUNC_PCM1_CLK (MTK_PIN_NO(136) | 4) +#define MT6797_GPIO136__FUNC_PWM_B (MTK_PIN_NO(136) | 5) +#define MT6797_GPIO136__FUNC_DBG_MON_A13 (MTK_PIN_NO(136) | 7) + +#define MT6797_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0) +#define MT6797_GPIO137__FUNC_TDM_MCK (MTK_PIN_NO(137) | 1) +#define MT6797_GPIO137__FUNC_I2S0_MCK (MTK_PIN_NO(137) | 2) +#define MT6797_GPIO137__FUNC_CLKM2 (MTK_PIN_NO(137) | 3) +#define MT6797_GPIO137__FUNC_PCM1_DI (MTK_PIN_NO(137) | 4) +#define MT6797_GPIO137__FUNC_IRTX_OUT (MTK_PIN_NO(137) | 5) +#define MT6797_GPIO137__FUNC_DBG_MON_A14 (MTK_PIN_NO(137) | 7) + +#define MT6797_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0) +#define MT6797_GPIO138__FUNC_TDM_DATA0 (MTK_PIN_NO(138) | 1) +#define MT6797_GPIO138__FUNC_I2S0_DI (MTK_PIN_NO(138) | 2) +#define MT6797_GPIO138__FUNC_CLKM3 (MTK_PIN_NO(138) | 3) +#define MT6797_GPIO138__FUNC_PCM1_DO0 (MTK_PIN_NO(138) | 4) +#define MT6797_GPIO138__FUNC_PWM_C (MTK_PIN_NO(138) | 5) +#define MT6797_GPIO138__FUNC_SDA3_1 (MTK_PIN_NO(138) | 6) +#define MT6797_GPIO138__FUNC_DBG_MON_A15 (MTK_PIN_NO(138) | 7) + +#define MT6797_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0) +#define MT6797_GPIO139__FUNC_TDM_DATA1 (MTK_PIN_NO(139) | 1) +#define MT6797_GPIO139__FUNC_I2S3_DO (MTK_PIN_NO(139) | 2) +#define MT6797_GPIO139__FUNC_CLKM4 (MTK_PIN_NO(139) | 3) +#define MT6797_GPIO139__FUNC_PCM1_DO1 (MTK_PIN_NO(139) | 4) +#define MT6797_GPIO139__FUNC_ANT_SEL2 (MTK_PIN_NO(139) | 5) +#define MT6797_GPIO139__FUNC_SCL3_1 (MTK_PIN_NO(139) | 6) +#define MT6797_GPIO139__FUNC_DBG_MON_A16 (MTK_PIN_NO(139) | 7) + +#define MT6797_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0) +#define MT6797_GPIO140__FUNC_TDM_DATA2 (MTK_PIN_NO(140) | 1) +#define MT6797_GPIO140__FUNC_DISP_PWM (MTK_PIN_NO(140) | 2) +#define MT6797_GPIO140__FUNC_CLKM5 (MTK_PIN_NO(140) | 3) +#define MT6797_GPIO140__FUNC_SDA1_4 (MTK_PIN_NO(140) | 4) +#define MT6797_GPIO140__FUNC_ANT_SEL1 (MTK_PIN_NO(140) | 5) +#define MT6797_GPIO140__FUNC_URXD3 (MTK_PIN_NO(140) | 6) +#define MT6797_GPIO140__FUNC_DBG_MON_A17 (MTK_PIN_NO(140) | 7) + +#define MT6797_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0) +#define MT6797_GPIO141__FUNC_TDM_DATA3 (MTK_PIN_NO(141) | 1) +#define MT6797_GPIO141__FUNC_CMFLASH (MTK_PIN_NO(141) | 2) +#define MT6797_GPIO141__FUNC_IRTX_OUT (MTK_PIN_NO(141) | 3) +#define MT6797_GPIO141__FUNC_SCL1_4 (MTK_PIN_NO(141) | 4) +#define MT6797_GPIO141__FUNC_ANT_SEL0 (MTK_PIN_NO(141) | 5) +#define MT6797_GPIO141__FUNC_UTXD3 (MTK_PIN_NO(141) | 6) +#define MT6797_GPIO141__FUNC_DBG_MON_A18 (MTK_PIN_NO(141) | 7) + +#define MT6797_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0) +#define MT6797_GPIO142__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(142) | 1) +#define MT6797_GPIO142__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(142) | 2) + +#define MT6797_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0) +#define MT6797_GPIO143__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(143) | 1) +#define MT6797_GPIO143__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(143) | 2) + +#define MT6797_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0) +#define MT6797_GPIO144__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(144) | 1) + +#define MT6797_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0) +#define MT6797_GPIO145__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(145) | 1) + +#define MT6797_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0) +#define MT6797_GPIO146__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(146) | 1) + +#define MT6797_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0) +#define MT6797_GPIO147__FUNC_AUD_DAT_MISO (MTK_PIN_NO(147) | 1) +#define MT6797_GPIO147__FUNC_AUD_DAT_MOSI (MTK_PIN_NO(147) | 2) +#define MT6797_GPIO147__FUNC_VOW_DAT_MISO (MTK_PIN_NO(147) | 3) + +#define MT6797_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0) +#define MT6797_GPIO148__FUNC_AUD_DAT_MOSI (MTK_PIN_NO(148) | 1) +#define MT6797_GPIO148__FUNC_AUD_DAT_MISO (MTK_PIN_NO(148) | 2) + +#define MT6797_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0) +#define MT6797_GPIO149__FUNC_VOW_CLK_MISO (MTK_PIN_NO(149) | 1) + +#define MT6797_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0) +#define MT6797_GPIO150__FUNC_ANC_DAT_MOSI (MTK_PIN_NO(150) | 1) + +#define MT6797_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0) +#define MT6797_GPIO151__FUNC_SCL6_0 (MTK_PIN_NO(151) | 1) + +#define MT6797_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0) +#define MT6797_GPIO152__FUNC_SDA6_0 (MTK_PIN_NO(152) | 1) + +#define MT6797_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0) +#define MT6797_GPIO153__FUNC_SCL7_0 (MTK_PIN_NO(153) | 1) + +#define MT6797_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0) +#define MT6797_GPIO154__FUNC_SDA7_0 (MTK_PIN_NO(154) | 1) + +#define MT6797_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0) +#define MT6797_GPIO155__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(155) | 1) +#define MT6797_GPIO155__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(155) | 2) +#define MT6797_GPIO155__FUNC_C2K_UIM0_CLK (MTK_PIN_NO(155) | 3) +#define MT6797_GPIO155__FUNC_C2K_UIM1_CLK (MTK_PIN_NO(155) | 4) + +#define MT6797_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0) +#define MT6797_GPIO156__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(156) | 1) +#define MT6797_GPIO156__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(156) | 2) +#define MT6797_GPIO156__FUNC_C2K_UIM0_RST (MTK_PIN_NO(156) | 3) +#define MT6797_GPIO156__FUNC_C2K_UIM1_RST (MTK_PIN_NO(156) | 4) + +#define MT6797_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0) +#define MT6797_GPIO157__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(157) | 1) +#define MT6797_GPIO157__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(157) | 2) +#define MT6797_GPIO157__FUNC_C2K_UIM0_IO (MTK_PIN_NO(157) | 3) +#define MT6797_GPIO157__FUNC_C2K_UIM1_IO (MTK_PIN_NO(157) | 4) + +#define MT6797_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0) +#define MT6797_GPIO158__FUNC_MIPI_TDP0 (MTK_PIN_NO(158) | 1) + +#define MT6797_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0) +#define MT6797_GPIO159__FUNC_MIPI_TDN0 (MTK_PIN_NO(159) | 1) + +#define MT6797_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0) +#define MT6797_GPIO160__FUNC_MIPI_TDP1 (MTK_PIN_NO(160) | 1) + +#define MT6797_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0) +#define MT6797_GPIO161__FUNC_MIPI_TDN1 (MTK_PIN_NO(161) | 1) + +#define MT6797_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0) +#define MT6797_GPIO162__FUNC_MIPI_TCP (MTK_PIN_NO(162) | 1) + +#define MT6797_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0) +#define MT6797_GPIO163__FUNC_MIPI_TCN (MTK_PIN_NO(163) | 1) + +#define MT6797_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0) +#define MT6797_GPIO164__FUNC_MIPI_TDP2 (MTK_PIN_NO(164) | 1) + +#define MT6797_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0) +#define MT6797_GPIO165__FUNC_MIPI_TDN2 (MTK_PIN_NO(165) | 1) + +#define MT6797_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0) +#define MT6797_GPIO166__FUNC_MIPI_TDP3 (MTK_PIN_NO(166) | 1) + +#define MT6797_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0) +#define MT6797_GPIO167__FUNC_MIPI_TDN3 (MTK_PIN_NO(167) | 1) + +#define MT6797_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0) +#define MT6797_GPIO168__FUNC_MIPI_TDP0_A (MTK_PIN_NO(168) | 1) + +#define MT6797_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0) +#define MT6797_GPIO169__FUNC_MIPI_TDN0_A (MTK_PIN_NO(169) | 1) + +#define MT6797_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0) +#define MT6797_GPIO170__FUNC_MIPI_TDP1_A (MTK_PIN_NO(170) | 1) + +#define MT6797_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0) +#define MT6797_GPIO171__FUNC_MIPI_TDN1_A (MTK_PIN_NO(171) | 1) + +#define MT6797_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0) +#define MT6797_GPIO172__FUNC_MIPI_TCP_A (MTK_PIN_NO(172) | 1) + +#define MT6797_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0) +#define MT6797_GPIO173__FUNC_MIPI_TCN_A (MTK_PIN_NO(173) | 1) + +#define MT6797_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0) +#define MT6797_GPIO174__FUNC_MIPI_TDP2_A (MTK_PIN_NO(174) | 1) + +#define MT6797_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0) +#define MT6797_GPIO175__FUNC_MIPI_TDN2_A (MTK_PIN_NO(175) | 1) + +#define MT6797_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0) +#define MT6797_GPIO176__FUNC_MIPI_TDP3_A (MTK_PIN_NO(176) | 1) + +#define MT6797_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0) +#define MT6797_GPIO177__FUNC_MIPI_TDN3_A (MTK_PIN_NO(177) | 1) + +#define MT6797_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0) +#define MT6797_GPIO178__FUNC_DISP_PWM (MTK_PIN_NO(178) | 1) +#define MT6797_GPIO178__FUNC_PWM_D (MTK_PIN_NO(178) | 2) +#define MT6797_GPIO178__FUNC_CLKM5 (MTK_PIN_NO(178) | 3) +#define MT6797_GPIO178__FUNC_DBG_MON_A19 (MTK_PIN_NO(178) | 7) + +#define MT6797_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0) +#define MT6797_GPIO179__FUNC_DSI_TE0 (MTK_PIN_NO(179) | 1) +#define MT6797_GPIO179__FUNC_DBG_MON_A20 (MTK_PIN_NO(179) | 7) + +#define MT6797_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0) +#define MT6797_GPIO180__FUNC_LCM_RST (MTK_PIN_NO(180) | 1) +#define MT6797_GPIO180__FUNC_DSI_TE1 (MTK_PIN_NO(180) | 2) +#define MT6797_GPIO180__FUNC_DBG_MON_A21 (MTK_PIN_NO(180) | 7) + +#define MT6797_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0) +#define MT6797_GPIO181__FUNC_IDDIG (MTK_PIN_NO(181) | 1) +#define MT6797_GPIO181__FUNC_DSI_TE1 (MTK_PIN_NO(181) | 2) +#define MT6797_GPIO181__FUNC_DBG_MON_A22 (MTK_PIN_NO(181) | 7) + +#define MT6797_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0) +#define MT6797_GPIO182__FUNC_TESTMODE (MTK_PIN_NO(182) | 1) + +#define MT6797_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0) +#define MT6797_GPIO183__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(183) | 1) +#define MT6797_GPIO183__FUNC_SPM_BSI_CK (MTK_PIN_NO(183) | 2) +#define MT6797_GPIO183__FUNC_DBG_MON_B27 (MTK_PIN_NO(183) | 7) + +#define MT6797_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0) +#define MT6797_GPIO184__FUNC_RFIC0_BSI_EN (MTK_PIN_NO(184) | 1) +#define MT6797_GPIO184__FUNC_SPM_BSI_EN (MTK_PIN_NO(184) | 2) +#define MT6797_GPIO184__FUNC_DBG_MON_B28 (MTK_PIN_NO(184) | 7) + +#define MT6797_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0) +#define MT6797_GPIO185__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(185) | 1) +#define MT6797_GPIO185__FUNC_SPM_BSI_D0 (MTK_PIN_NO(185) | 2) +#define MT6797_GPIO185__FUNC_DBG_MON_B29 (MTK_PIN_NO(185) | 7) + +#define MT6797_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0) +#define MT6797_GPIO186__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(186) | 1) +#define MT6797_GPIO186__FUNC_SPM_BSI_D1 (MTK_PIN_NO(186) | 2) +#define MT6797_GPIO186__FUNC_DBG_MON_B30 (MTK_PIN_NO(186) | 7) + +#define MT6797_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0) +#define MT6797_GPIO187__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(187) | 1) +#define MT6797_GPIO187__FUNC_SPM_BSI_D2 (MTK_PIN_NO(187) | 2) +#define MT6797_GPIO187__FUNC_DBG_MON_B31 (MTK_PIN_NO(187) | 7) + +#define MT6797_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0) +#define MT6797_GPIO188__FUNC_MIPI0_SCLK (MTK_PIN_NO(188) | 1) +#define MT6797_GPIO188__FUNC_DBG_MON_B32 (MTK_PIN_NO(188) | 7) + +#define MT6797_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0) +#define MT6797_GPIO189__FUNC_MIPI0_SDATA (MTK_PIN_NO(189) | 1) + +#define MT6797_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0) +#define MT6797_GPIO190__FUNC_MIPI1_SCLK (MTK_PIN_NO(190) | 1) + +#define MT6797_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0) +#define MT6797_GPIO191__FUNC_MIPI1_SDATA (MTK_PIN_NO(191) | 1) + +#define MT6797_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0) +#define MT6797_GPIO192__FUNC_BPI_BUS4 (MTK_PIN_NO(192) | 1) + +#define MT6797_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0) +#define MT6797_GPIO193__FUNC_BPI_BUS5 (MTK_PIN_NO(193) | 1) +#define MT6797_GPIO193__FUNC_DBG_MON_B0 (MTK_PIN_NO(193) | 7) + +#define MT6797_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0) +#define MT6797_GPIO194__FUNC_BPI_BUS6 (MTK_PIN_NO(194) | 1) +#define MT6797_GPIO194__FUNC_DBG_MON_B1 (MTK_PIN_NO(194) | 7) + +#define MT6797_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0) +#define MT6797_GPIO195__FUNC_BPI_BUS7 (MTK_PIN_NO(195) | 1) +#define MT6797_GPIO195__FUNC_DBG_MON_B2 (MTK_PIN_NO(195) | 7) + +#define MT6797_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0) +#define MT6797_GPIO196__FUNC_BPI_BUS8 (MTK_PIN_NO(196) | 1) +#define MT6797_GPIO196__FUNC_DBG_MON_B3 (MTK_PIN_NO(196) | 7) + +#define MT6797_GPIO197__FUNC_GPIO197 (MTK_PIN_NO(197) | 0) +#define MT6797_GPIO197__FUNC_BPI_BUS9 (MTK_PIN_NO(197) | 1) +#define MT6797_GPIO197__FUNC_DBG_MON_B4 (MTK_PIN_NO(197) | 7) + +#define MT6797_GPIO198__FUNC_GPIO198 (MTK_PIN_NO(198) | 0) +#define MT6797_GPIO198__FUNC_BPI_BUS10 (MTK_PIN_NO(198) | 1) +#define MT6797_GPIO198__FUNC_DBG_MON_B5 (MTK_PIN_NO(198) | 7) + +#define MT6797_GPIO199__FUNC_GPIO199 (MTK_PIN_NO(199) | 0) +#define MT6797_GPIO199__FUNC_BPI_BUS11 (MTK_PIN_NO(199) | 1) +#define MT6797_GPIO199__FUNC_DBG_MON_B6 (MTK_PIN_NO(199) | 7) + +#define MT6797_GPIO200__FUNC_GPIO200 (MTK_PIN_NO(200) | 0) +#define MT6797_GPIO200__FUNC_BPI_BUS12 (MTK_PIN_NO(200) | 1) +#define MT6797_GPIO200__FUNC_DBG_MON_B7 (MTK_PIN_NO(200) | 7) + +#define MT6797_GPIO201__FUNC_GPIO201 (MTK_PIN_NO(201) | 0) +#define MT6797_GPIO201__FUNC_BPI_BUS13 (MTK_PIN_NO(201) | 1) +#define MT6797_GPIO201__FUNC_DBG_MON_B8 (MTK_PIN_NO(201) | 7) + +#define MT6797_GPIO202__FUNC_GPIO202 (MTK_PIN_NO(202) | 0) +#define MT6797_GPIO202__FUNC_BPI_BUS14 (MTK_PIN_NO(202) | 1) +#define MT6797_GPIO202__FUNC_DBG_MON_B9 (MTK_PIN_NO(202) | 7) + +#define MT6797_GPIO203__FUNC_GPIO203 (MTK_PIN_NO(203) | 0) +#define MT6797_GPIO203__FUNC_BPI_BUS15 (MTK_PIN_NO(203) | 1) +#define MT6797_GPIO203__FUNC_DBG_MON_B10 (MTK_PIN_NO(203) | 7) + +#define MT6797_GPIO204__FUNC_GPIO204 (MTK_PIN_NO(204) | 0) +#define MT6797_GPIO204__FUNC_BPI_BUS16 (MTK_PIN_NO(204) | 1) +#define MT6797_GPIO204__FUNC_PA_VM0 (MTK_PIN_NO(204) | 2) +#define MT6797_GPIO204__FUNC_DBG_MON_B11 (MTK_PIN_NO(204) | 7) + +#define MT6797_GPIO205__FUNC_GPIO205 (MTK_PIN_NO(205) | 0) +#define MT6797_GPIO205__FUNC_BPI_BUS17 (MTK_PIN_NO(205) | 1) +#define MT6797_GPIO205__FUNC_PA_VM1 (MTK_PIN_NO(205) | 2) +#define MT6797_GPIO205__FUNC_DBG_MON_B12 (MTK_PIN_NO(205) | 7) + +#define MT6797_GPIO206__FUNC_GPIO206 (MTK_PIN_NO(206) | 0) +#define MT6797_GPIO206__FUNC_BPI_BUS18 (MTK_PIN_NO(206) | 1) +#define MT6797_GPIO206__FUNC_TX_SWAP0 (MTK_PIN_NO(206) | 2) +#define MT6797_GPIO206__FUNC_DBG_MON_B13 (MTK_PIN_NO(206) | 7) + +#define MT6797_GPIO207__FUNC_GPIO207 (MTK_PIN_NO(207) | 0) +#define MT6797_GPIO207__FUNC_BPI_BUS19 (MTK_PIN_NO(207) | 1) +#define MT6797_GPIO207__FUNC_TX_SWAP1 (MTK_PIN_NO(207) | 2) +#define MT6797_GPIO207__FUNC_DBG_MON_B14 (MTK_PIN_NO(207) | 7) + +#define MT6797_GPIO208__FUNC_GPIO208 (MTK_PIN_NO(208) | 0) +#define MT6797_GPIO208__FUNC_BPI_BUS20 (MTK_PIN_NO(208) | 1) +#define MT6797_GPIO208__FUNC_TX_SWAP2 (MTK_PIN_NO(208) | 2) +#define MT6797_GPIO208__FUNC_DBG_MON_B15 (MTK_PIN_NO(208) | 7) + +#define MT6797_GPIO209__FUNC_GPIO209 (MTK_PIN_NO(209) | 0) +#define MT6797_GPIO209__FUNC_BPI_BUS21 (MTK_PIN_NO(209) | 1) +#define MT6797_GPIO209__FUNC_TX_SWAP3 (MTK_PIN_NO(209) | 2) +#define MT6797_GPIO209__FUNC_DBG_MON_B16 (MTK_PIN_NO(209) | 7) + +#define MT6797_GPIO210__FUNC_GPIO210 (MTK_PIN_NO(210) | 0) +#define MT6797_GPIO210__FUNC_BPI_BUS22 (MTK_PIN_NO(210) | 1) +#define MT6797_GPIO210__FUNC_DET_BPI0 (MTK_PIN_NO(210) | 2) +#define MT6797_GPIO210__FUNC_DBG_MON_B17 (MTK_PIN_NO(210) | 7) + +#define MT6797_GPIO211__FUNC_GPIO211 (MTK_PIN_NO(211) | 0) +#define MT6797_GPIO211__FUNC_BPI_BUS23 (MTK_PIN_NO(211) | 1) +#define MT6797_GPIO211__FUNC_DET_BPI1 (MTK_PIN_NO(211) | 2) +#define MT6797_GPIO211__FUNC_DBG_MON_B18 (MTK_PIN_NO(211) | 7) + +#define MT6797_GPIO212__FUNC_GPIO212 (MTK_PIN_NO(212) | 0) +#define MT6797_GPIO212__FUNC_BPI_BUS0 (MTK_PIN_NO(212) | 1) +#define MT6797_GPIO212__FUNC_DBG_MON_B19 (MTK_PIN_NO(212) | 7) + +#define MT6797_GPIO213__FUNC_GPIO213 (MTK_PIN_NO(213) | 0) +#define MT6797_GPIO213__FUNC_BPI_BUS1 (MTK_PIN_NO(213) | 1) +#define MT6797_GPIO213__FUNC_DBG_MON_B20 (MTK_PIN_NO(213) | 7) + +#define MT6797_GPIO214__FUNC_GPIO214 (MTK_PIN_NO(214) | 0) +#define MT6797_GPIO214__FUNC_BPI_BUS2 (MTK_PIN_NO(214) | 1) +#define MT6797_GPIO214__FUNC_DBG_MON_B21 (MTK_PIN_NO(214) | 7) + +#define MT6797_GPIO215__FUNC_GPIO215 (MTK_PIN_NO(215) | 0) +#define MT6797_GPIO215__FUNC_BPI_BUS3 (MTK_PIN_NO(215) | 1) +#define MT6797_GPIO215__FUNC_DBG_MON_B22 (MTK_PIN_NO(215) | 7) + +#define MT6797_GPIO216__FUNC_GPIO216 (MTK_PIN_NO(216) | 0) +#define MT6797_GPIO216__FUNC_MIPI2_SCLK (MTK_PIN_NO(216) | 1) +#define MT6797_GPIO216__FUNC_DBG_MON_B23 (MTK_PIN_NO(216) | 7) + +#define MT6797_GPIO217__FUNC_GPIO217 (MTK_PIN_NO(217) | 0) +#define MT6797_GPIO217__FUNC_MIPI2_SDATA (MTK_PIN_NO(217) | 1) +#define MT6797_GPIO217__FUNC_DBG_MON_B24 (MTK_PIN_NO(217) | 7) + +#define MT6797_GPIO218__FUNC_GPIO218 (MTK_PIN_NO(218) | 0) +#define MT6797_GPIO218__FUNC_MIPI3_SCLK (MTK_PIN_NO(218) | 1) +#define MT6797_GPIO218__FUNC_DBG_MON_B25 (MTK_PIN_NO(218) | 7) + +#define MT6797_GPIO219__FUNC_GPIO219 (MTK_PIN_NO(219) | 0) +#define MT6797_GPIO219__FUNC_MIPI3_SDATA (MTK_PIN_NO(219) | 1) +#define MT6797_GPIO219__FUNC_DBG_MON_B26 (MTK_PIN_NO(219) | 7) + +#define MT6797_GPIO220__FUNC_GPIO220 (MTK_PIN_NO(220) | 0) +#define MT6797_GPIO220__FUNC_CONN_WF_IP (MTK_PIN_NO(220) | 1) + +#define MT6797_GPIO221__FUNC_GPIO221 (MTK_PIN_NO(221) | 0) +#define MT6797_GPIO221__FUNC_CONN_WF_IN (MTK_PIN_NO(221) | 1) + +#define MT6797_GPIO222__FUNC_GPIO222 (MTK_PIN_NO(222) | 0) +#define MT6797_GPIO222__FUNC_CONN_WF_QP (MTK_PIN_NO(222) | 1) + +#define MT6797_GPIO223__FUNC_GPIO223 (MTK_PIN_NO(223) | 0) +#define MT6797_GPIO223__FUNC_CONN_WF_QN (MTK_PIN_NO(223) | 1) + +#define MT6797_GPIO224__FUNC_GPIO224 (MTK_PIN_NO(224) | 0) +#define MT6797_GPIO224__FUNC_CONN_BT_IP (MTK_PIN_NO(224) | 1) + +#define MT6797_GPIO225__FUNC_GPIO225 (MTK_PIN_NO(225) | 0) +#define MT6797_GPIO225__FUNC_CONN_BT_IN (MTK_PIN_NO(225) | 1) + +#define MT6797_GPIO226__FUNC_GPIO226 (MTK_PIN_NO(226) | 0) +#define MT6797_GPIO226__FUNC_CONN_BT_QP (MTK_PIN_NO(226) | 1) + +#define MT6797_GPIO227__FUNC_GPIO227 (MTK_PIN_NO(227) | 0) +#define MT6797_GPIO227__FUNC_CONN_BT_QN (MTK_PIN_NO(227) | 1) + +#define MT6797_GPIO228__FUNC_GPIO228 (MTK_PIN_NO(228) | 0) +#define MT6797_GPIO228__FUNC_CONN_GPS_IP (MTK_PIN_NO(228) | 1) + +#define MT6797_GPIO229__FUNC_GPIO229 (MTK_PIN_NO(229) | 0) +#define MT6797_GPIO229__FUNC_CONN_GPS_IN (MTK_PIN_NO(229) | 1) + +#define MT6797_GPIO230__FUNC_GPIO230 (MTK_PIN_NO(230) | 0) +#define MT6797_GPIO230__FUNC_CONN_GPS_QP (MTK_PIN_NO(230) | 1) + +#define MT6797_GPIO231__FUNC_GPIO231 (MTK_PIN_NO(231) | 0) +#define MT6797_GPIO231__FUNC_CONN_GPS_QN (MTK_PIN_NO(231) | 1) + +#define MT6797_GPIO232__FUNC_GPIO232 (MTK_PIN_NO(232) | 0) +#define MT6797_GPIO232__FUNC_URXD1 (MTK_PIN_NO(232) | 1) +#define MT6797_GPIO232__FUNC_UTXD1 (MTK_PIN_NO(232) | 2) +#define MT6797_GPIO232__FUNC_MD_URXD0 (MTK_PIN_NO(232) | 3) +#define MT6797_GPIO232__FUNC_MD_URXD1 (MTK_PIN_NO(232) | 4) +#define MT6797_GPIO232__FUNC_MD_URXD2 (MTK_PIN_NO(232) | 5) +#define MT6797_GPIO232__FUNC_C2K_URXD0 (MTK_PIN_NO(232) | 6) +#define MT6797_GPIO232__FUNC_C2K_URXD1 (MTK_PIN_NO(232) | 7) + +#define MT6797_GPIO233__FUNC_GPIO233 (MTK_PIN_NO(233) | 0) +#define MT6797_GPIO233__FUNC_UTXD1 (MTK_PIN_NO(233) | 1) +#define MT6797_GPIO233__FUNC_URXD1 (MTK_PIN_NO(233) | 2) +#define MT6797_GPIO233__FUNC_MD_UTXD0 (MTK_PIN_NO(233) | 3) +#define MT6797_GPIO233__FUNC_MD_UTXD1 (MTK_PIN_NO(233) | 4) +#define MT6797_GPIO233__FUNC_MD_UTXD2 (MTK_PIN_NO(233) | 5) +#define MT6797_GPIO233__FUNC_C2K_UTXD0 (MTK_PIN_NO(233) | 6) +#define MT6797_GPIO233__FUNC_C2K_UTXD1 (MTK_PIN_NO(233) | 7) + +#define MT6797_GPIO234__FUNC_GPIO234 (MTK_PIN_NO(234) | 0) +#define MT6797_GPIO234__FUNC_SPI1_CLK_B (MTK_PIN_NO(234) | 1) +#define MT6797_GPIO234__FUNC_TP_UTXD1_AO (MTK_PIN_NO(234) | 2) +#define MT6797_GPIO234__FUNC_SCL4_1 (MTK_PIN_NO(234) | 3) +#define MT6797_GPIO234__FUNC_UTXD0 (MTK_PIN_NO(234) | 4) +#define MT6797_GPIO234__FUNC_PWM_A (MTK_PIN_NO(234) | 6) +#define MT6797_GPIO234__FUNC_DBG_MON_A23 (MTK_PIN_NO(234) | 7) + +#define MT6797_GPIO235__FUNC_GPIO235 (MTK_PIN_NO(235) | 0) +#define MT6797_GPIO235__FUNC_SPI1_MI_B (MTK_PIN_NO(235) | 1) +#define MT6797_GPIO235__FUNC_SPI1_MO_B (MTK_PIN_NO(235) | 2) +#define MT6797_GPIO235__FUNC_SDA4_1 (MTK_PIN_NO(235) | 3) +#define MT6797_GPIO235__FUNC_URXD0 (MTK_PIN_NO(235) | 4) +#define MT6797_GPIO235__FUNC_CLKM0 (MTK_PIN_NO(235) | 6) +#define MT6797_GPIO235__FUNC_DBG_MON_A24 (MTK_PIN_NO(235) | 7) + +#define MT6797_GPIO236__FUNC_GPIO236 (MTK_PIN_NO(236) | 0) +#define MT6797_GPIO236__FUNC_SPI1_MO_B (MTK_PIN_NO(236) | 1) +#define MT6797_GPIO236__FUNC_SPI1_MI_B (MTK_PIN_NO(236) | 2) +#define MT6797_GPIO236__FUNC_SCL5_1 (MTK_PIN_NO(236) | 3) +#define MT6797_GPIO236__FUNC_URTS0 (MTK_PIN_NO(236) | 4) +#define MT6797_GPIO236__FUNC_PWM_B (MTK_PIN_NO(236) | 6) +#define MT6797_GPIO236__FUNC_DBG_MON_A25 (MTK_PIN_NO(236) | 7) + +#define MT6797_GPIO237__FUNC_GPIO237 (MTK_PIN_NO(237) | 0) +#define MT6797_GPIO237__FUNC_SPI1_CS_B (MTK_PIN_NO(237) | 1) +#define MT6797_GPIO237__FUNC_TP_URXD1_AO (MTK_PIN_NO(237) | 2) +#define MT6797_GPIO237__FUNC_SDA5_1 (MTK_PIN_NO(237) | 3) +#define MT6797_GPIO237__FUNC_UCTS0 (MTK_PIN_NO(237) | 4) +#define MT6797_GPIO237__FUNC_CLKM1 (MTK_PIN_NO(237) | 6) +#define MT6797_GPIO237__FUNC_DBG_MON_A26 (MTK_PIN_NO(237) | 7) + +#define MT6797_GPIO238__FUNC_GPIO238 (MTK_PIN_NO(238) | 0) +#define MT6797_GPIO238__FUNC_SDA4_0 (MTK_PIN_NO(238) | 1) + +#define MT6797_GPIO239__FUNC_GPIO239 (MTK_PIN_NO(239) | 0) +#define MT6797_GPIO239__FUNC_SCL4_0 (MTK_PIN_NO(239) | 1) + +#define MT6797_GPIO240__FUNC_GPIO240 (MTK_PIN_NO(240) | 0) +#define MT6797_GPIO240__FUNC_SDA5_0 (MTK_PIN_NO(240) | 1) + +#define MT6797_GPIO241__FUNC_GPIO241 (MTK_PIN_NO(241) | 0) +#define MT6797_GPIO241__FUNC_SCL5_0 (MTK_PIN_NO(241) | 1) + +#define MT6797_GPIO242__FUNC_GPIO242 (MTK_PIN_NO(242) | 0) +#define MT6797_GPIO242__FUNC_SPI2_CLK_B (MTK_PIN_NO(242) | 1) +#define MT6797_GPIO242__FUNC_TP_UTXD2_AO (MTK_PIN_NO(242) | 2) +#define MT6797_GPIO242__FUNC_SCL4_2 (MTK_PIN_NO(242) | 3) +#define MT6797_GPIO242__FUNC_UTXD1 (MTK_PIN_NO(242) | 4) +#define MT6797_GPIO242__FUNC_URTS3 (MTK_PIN_NO(242) | 5) +#define MT6797_GPIO242__FUNC_PWM_C (MTK_PIN_NO(242) | 6) +#define MT6797_GPIO242__FUNC_DBG_MON_A27 (MTK_PIN_NO(242) | 7) + +#define MT6797_GPIO243__FUNC_GPIO243 (MTK_PIN_NO(243) | 0) +#define MT6797_GPIO243__FUNC_SPI2_MI_B (MTK_PIN_NO(243) | 1) +#define MT6797_GPIO243__FUNC_SPI2_MO_B (MTK_PIN_NO(243) | 2) +#define MT6797_GPIO243__FUNC_SDA4_2 (MTK_PIN_NO(243) | 3) +#define MT6797_GPIO243__FUNC_URXD1 (MTK_PIN_NO(243) | 4) +#define MT6797_GPIO243__FUNC_UCTS3 (MTK_PIN_NO(243) | 5) +#define MT6797_GPIO243__FUNC_CLKM2 (MTK_PIN_NO(243) | 6) +#define MT6797_GPIO243__FUNC_DBG_MON_A28 (MTK_PIN_NO(243) | 7) + +#define MT6797_GPIO244__FUNC_GPIO244 (MTK_PIN_NO(244) | 0) +#define MT6797_GPIO244__FUNC_SPI2_MO_B (MTK_PIN_NO(244) | 1) +#define MT6797_GPIO244__FUNC_SPI2_MI_B (MTK_PIN_NO(244) | 2) +#define MT6797_GPIO244__FUNC_SCL5_2 (MTK_PIN_NO(244) | 3) +#define MT6797_GPIO244__FUNC_URTS1 (MTK_PIN_NO(244) | 4) +#define MT6797_GPIO244__FUNC_UTXD3 (MTK_PIN_NO(244) | 5) +#define MT6797_GPIO244__FUNC_PWM_D (MTK_PIN_NO(244) | 6) +#define MT6797_GPIO244__FUNC_DBG_MON_A29 (MTK_PIN_NO(244) | 7) + +#define MT6797_GPIO245__FUNC_GPIO245 (MTK_PIN_NO(245) | 0) +#define MT6797_GPIO245__FUNC_SPI2_CS_B (MTK_PIN_NO(245) | 1) +#define MT6797_GPIO245__FUNC_TP_URXD2_AO (MTK_PIN_NO(245) | 2) +#define MT6797_GPIO245__FUNC_SDA5_2 (MTK_PIN_NO(245) | 3) +#define MT6797_GPIO245__FUNC_UCTS1 (MTK_PIN_NO(245) | 4) +#define MT6797_GPIO245__FUNC_URXD3 (MTK_PIN_NO(245) | 5) +#define MT6797_GPIO245__FUNC_CLKM3 (MTK_PIN_NO(245) | 6) +#define MT6797_GPIO245__FUNC_DBG_MON_A30 (MTK_PIN_NO(245) | 7) + +#define MT6797_GPIO246__FUNC_GPIO246 (MTK_PIN_NO(246) | 0) +#define MT6797_GPIO246__FUNC_I2S1_LRCK (MTK_PIN_NO(246) | 1) +#define MT6797_GPIO246__FUNC_I2S2_LRCK (MTK_PIN_NO(246) | 2) +#define MT6797_GPIO246__FUNC_I2S0_LRCK (MTK_PIN_NO(246) | 3) +#define MT6797_GPIO246__FUNC_I2S3_LRCK (MTK_PIN_NO(246) | 4) +#define MT6797_GPIO246__FUNC_PCM0_SYNC (MTK_PIN_NO(246) | 5) +#define MT6797_GPIO246__FUNC_SPI5_CLK_C (MTK_PIN_NO(246) | 6) +#define MT6797_GPIO246__FUNC_DBG_MON_A31 (MTK_PIN_NO(246) | 7) + +#define MT6797_GPIO247__FUNC_GPIO247 (MTK_PIN_NO(247) | 0) +#define MT6797_GPIO247__FUNC_I2S1_BCK (MTK_PIN_NO(247) | 1) +#define MT6797_GPIO247__FUNC_I2S2_BCK (MTK_PIN_NO(247) | 2) +#define MT6797_GPIO247__FUNC_I2S0_BCK (MTK_PIN_NO(247) | 3) +#define MT6797_GPIO247__FUNC_I2S3_BCK (MTK_PIN_NO(247) | 4) +#define MT6797_GPIO247__FUNC_PCM0_CLK (MTK_PIN_NO(247) | 5) +#define MT6797_GPIO247__FUNC_SPI5_MI_C (MTK_PIN_NO(247) | 6) +#define MT6797_GPIO247__FUNC_DBG_MON_A32 (MTK_PIN_NO(247) | 7) + +#define MT6797_GPIO248__FUNC_GPIO248 (MTK_PIN_NO(248) | 0) +/* #define MT6797_GPIO248__FUNC_I2S2_DI (MTK_PIN_NO(248) | 1) */ +#define MT6797_GPIO248__FUNC_I2S2_DI (MTK_PIN_NO(248) | 2) +/* #define MT6797_GPIO248__FUNC_I2S0_DI (MTK_PIN_NO(248) | 3) */ +#define MT6797_GPIO248__FUNC_I2S0_DI (MTK_PIN_NO(248) | 4) +#define MT6797_GPIO248__FUNC_PCM0_DI (MTK_PIN_NO(248) | 5) +#define MT6797_GPIO248__FUNC_SPI5_CS_C (MTK_PIN_NO(248) | 6) + +#define MT6797_GPIO249__FUNC_GPIO249 (MTK_PIN_NO(249) | 0) +/* #define MT6797_GPIO249__FUNC_I2S1_DO (MTK_PIN_NO(249) | 1) */ +#define MT6797_GPIO249__FUNC_I2S1_DO (MTK_PIN_NO(249) | 2) +/* #define MT6797_GPIO249__FUNC_I2S3_DO (MTK_PIN_NO(249) | 3) */ +#define MT6797_GPIO249__FUNC_I2S3_DO (MTK_PIN_NO(249) | 4) +#define MT6797_GPIO249__FUNC_PCM0_DO (MTK_PIN_NO(249) | 5) +#define MT6797_GPIO249__FUNC_SPI5_MO_C (MTK_PIN_NO(249) | 6) +#define MT6797_GPIO249__FUNC_TRAP_SRAM_PWR_BYPASS (MTK_PIN_NO(249) | 7) + +#define MT6797_GPIO250__FUNC_GPIO250 (MTK_PIN_NO(250) | 0) +#define MT6797_GPIO250__FUNC_SPI3_MI (MTK_PIN_NO(250) | 1) +#define MT6797_GPIO250__FUNC_SPI3_MO (MTK_PIN_NO(250) | 2) +#define MT6797_GPIO250__FUNC_IRTX_OUT (MTK_PIN_NO(250) | 3) +#define MT6797_GPIO250__FUNC_TP_URXD1_AO (MTK_PIN_NO(250) | 6) +#define MT6797_GPIO250__FUNC_DROP_ZONE (MTK_PIN_NO(250) | 7) + +#define MT6797_GPIO251__FUNC_GPIO251 (MTK_PIN_NO(251) | 0) +#define MT6797_GPIO251__FUNC_SPI3_MO (MTK_PIN_NO(251) | 1) +#define MT6797_GPIO251__FUNC_SPI3_MI (MTK_PIN_NO(251) | 2) +#define MT6797_GPIO251__FUNC_CMFLASH (MTK_PIN_NO(251) | 3) +#define MT6797_GPIO251__FUNC_TP_UTXD1_AO (MTK_PIN_NO(251) | 6) +#define MT6797_GPIO251__FUNC_C2K_RTCK (MTK_PIN_NO(251) | 7) + +#define MT6797_GPIO252__FUNC_GPIO252 (MTK_PIN_NO(252) | 0) +#define MT6797_GPIO252__FUNC_SPI3_CLK (MTK_PIN_NO(252) | 1) +#define MT6797_GPIO252__FUNC_SCL0_4 (MTK_PIN_NO(252) | 2) +#define MT6797_GPIO252__FUNC_PWM_D (MTK_PIN_NO(252) | 3) +#define MT6797_GPIO252__FUNC_C2K_TMS (MTK_PIN_NO(252) | 7) + +#define MT6797_GPIO253__FUNC_GPIO253 (MTK_PIN_NO(253) | 0) +#define MT6797_GPIO253__FUNC_SPI3_CS (MTK_PIN_NO(253) | 1) +#define MT6797_GPIO253__FUNC_SDA0_4 (MTK_PIN_NO(253) | 2) +#define MT6797_GPIO253__FUNC_PWM_A (MTK_PIN_NO(253) | 3) +#define MT6797_GPIO253__FUNC_C2K_TCK (MTK_PIN_NO(253) | 7) + +#define MT6797_GPIO254__FUNC_GPIO254 (MTK_PIN_NO(254) | 0) +#define MT6797_GPIO254__FUNC_I2S1_MCK (MTK_PIN_NO(254) | 1) +#define MT6797_GPIO254__FUNC_I2S2_MCK (MTK_PIN_NO(254) | 2) +#define MT6797_GPIO254__FUNC_I2S0_MCK (MTK_PIN_NO(254) | 3) +#define MT6797_GPIO254__FUNC_I2S3_MCK (MTK_PIN_NO(254) | 4) +#define MT6797_GPIO254__FUNC_CLKM0 (MTK_PIN_NO(254) | 5) +#define MT6797_GPIO254__FUNC_C2K_TDI (MTK_PIN_NO(254) | 7) + +#define MT6797_GPIO255__FUNC_GPIO255 (MTK_PIN_NO(255) | 0) +#define MT6797_GPIO255__FUNC_CLKM1 (MTK_PIN_NO(255) | 1) +#define MT6797_GPIO255__FUNC_DISP_PWM (MTK_PIN_NO(255) | 2) +#define MT6797_GPIO255__FUNC_PWM_B (MTK_PIN_NO(255) | 3) +#define MT6797_GPIO255__FUNC_TP_GPIO1_AO (MTK_PIN_NO(255) | 6) +#define MT6797_GPIO255__FUNC_C2K_TDO (MTK_PIN_NO(255) | 7) + +#define MT6797_GPIO256__FUNC_GPIO256 (MTK_PIN_NO(256) | 0) +#define MT6797_GPIO256__FUNC_CLKM2 (MTK_PIN_NO(256) | 1) +#define MT6797_GPIO256__FUNC_IRTX_OUT (MTK_PIN_NO(256) | 2) +#define MT6797_GPIO256__FUNC_PWM_C (MTK_PIN_NO(256) | 3) +#define MT6797_GPIO256__FUNC_TP_GPIO0_AO (MTK_PIN_NO(256) | 6) +#define MT6797_GPIO256__FUNC_C2K_NTRST (MTK_PIN_NO(256) | 7) + +#define MT6797_GPIO257__FUNC_GPIO257 (MTK_PIN_NO(257) | 0) +#define MT6797_GPIO257__FUNC_IO_JTAG_TMS (MTK_PIN_NO(257) | 1) +#define MT6797_GPIO257__FUNC_LTE_JTAG_TMS (MTK_PIN_NO(257) | 2) +#define MT6797_GPIO257__FUNC_DFD_TMS (MTK_PIN_NO(257) | 3) +#define MT6797_GPIO257__FUNC_DAP_SIB1_SWD (MTK_PIN_NO(257) | 4) +#define MT6797_GPIO257__FUNC_ANC_JTAG_TMS (MTK_PIN_NO(257) | 5) +#define MT6797_GPIO257__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(257) | 6) +#define MT6797_GPIO257__FUNC_C2K_DM_OTMS (MTK_PIN_NO(257) | 7) + +#define MT6797_GPIO258__FUNC_GPIO258 (MTK_PIN_NO(258) | 0) +#define MT6797_GPIO258__FUNC_IO_JTAG_TCK (MTK_PIN_NO(258) | 1) +#define MT6797_GPIO258__FUNC_LTE_JTAG_TCK (MTK_PIN_NO(258) | 2) +#define MT6797_GPIO258__FUNC_DFD_TCK_XI (MTK_PIN_NO(258) | 3) +#define MT6797_GPIO258__FUNC_DAP_SIB1_SWCK (MTK_PIN_NO(258) | 4) +#define MT6797_GPIO258__FUNC_ANC_JTAG_TCK (MTK_PIN_NO(258) | 5) +#define MT6797_GPIO258__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(258) | 6) +#define MT6797_GPIO258__FUNC_C2K_DM_OTCK (MTK_PIN_NO(258) | 7) + +#define MT6797_GPIO259__FUNC_GPIO259 (MTK_PIN_NO(259) | 0) +#define MT6797_GPIO259__FUNC_IO_JTAG_TDI (MTK_PIN_NO(259) | 1) +#define MT6797_GPIO259__FUNC_LTE_JTAG_TDI (MTK_PIN_NO(259) | 2) +#define MT6797_GPIO259__FUNC_DFD_TDI (MTK_PIN_NO(259) | 3) +#define MT6797_GPIO259__FUNC_ANC_JTAG_TDI (MTK_PIN_NO(259) | 5) +#define MT6797_GPIO259__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(259) | 6) +#define MT6797_GPIO259__FUNC_C2K_DM_OTDI (MTK_PIN_NO(259) | 7) + +#define MT6797_GPIO260__FUNC_GPIO260 (MTK_PIN_NO(260) | 0) +#define MT6797_GPIO260__FUNC_IO_JTAG_TDO (MTK_PIN_NO(260) | 1) +#define MT6797_GPIO260__FUNC_LTE_JTAG_TDO (MTK_PIN_NO(260) | 2) +#define MT6797_GPIO260__FUNC_DFD_TDO (MTK_PIN_NO(260) | 3) +#define MT6797_GPIO260__FUNC_ANC_JTAG_TDO (MTK_PIN_NO(260) | 5) +#define MT6797_GPIO260__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(260) | 6) +#define MT6797_GPIO260__FUNC_C2K_DM_OTDO (MTK_PIN_NO(260) | 7) + +#define MT6797_GPIO261__FUNC_GPIO261 (MTK_PIN_NO(261) | 0) +#define MT6797_GPIO261__FUNC_LTE_JTAG_TRSTN (MTK_PIN_NO(261) | 2) +#define MT6797_GPIO261__FUNC_DFD_NTRST (MTK_PIN_NO(261) | 3) +#define MT6797_GPIO261__FUNC_ANC_JTAG_TRSTN (MTK_PIN_NO(261) | 5) +#define MT6797_GPIO261__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(261) | 6) +#define MT6797_GPIO261__FUNC_C2K_DM_JTINTP (MTK_PIN_NO(261) | 7) + +#endif /* __DTS_MT6797_PINFUNC_H */ diff --git a/include/dt-bindings/pinctrl/pads-imx8qm.h b/include/dt-bindings/pinctrl/pads-imx8qm.h new file mode 100644 index 000000000000..ae7b2942da69 --- /dev/null +++ b/include/dt-bindings/pinctrl/pads-imx8qm.h @@ -0,0 +1,960 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017~2018 NXP + */ + +#ifndef _IMX8QM_PADS_H +#define _IMX8QM_PADS_H + +/* pin id */ +#define IMX8QM_SIM0_CLK 0 +#define IMX8QM_SIM0_RST 1 +#define IMX8QM_SIM0_IO 2 +#define IMX8QM_SIM0_PD 3 +#define IMX8QM_SIM0_POWER_EN 4 +#define IMX8QM_SIM0_GPIO0_00 5 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_SIM 6 +#define IMX8QM_M40_I2C0_SCL 7 +#define IMX8QM_M40_I2C0_SDA 8 +#define IMX8QM_M40_GPIO0_00 9 +#define IMX8QM_M40_GPIO0_01 10 +#define IMX8QM_M41_I2C0_SCL 11 +#define IMX8QM_M41_I2C0_SDA 12 +#define IMX8QM_M41_GPIO0_00 13 +#define IMX8QM_M41_GPIO0_01 14 +#define IMX8QM_GPT0_CLK 15 +#define IMX8QM_GPT0_CAPTURE 16 +#define IMX8QM_GPT0_COMPARE 17 +#define IMX8QM_GPT1_CLK 18 +#define IMX8QM_GPT1_CAPTURE 19 +#define IMX8QM_GPT1_COMPARE 20 +#define IMX8QM_UART0_RX 21 +#define IMX8QM_UART0_TX 22 +#define IMX8QM_UART0_RTS_B 23 +#define IMX8QM_UART0_CTS_B 24 +#define IMX8QM_UART1_TX 25 +#define IMX8QM_UART1_RX 26 +#define IMX8QM_UART1_RTS_B 27 +#define IMX8QM_UART1_CTS_B 28 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOLH 29 +#define IMX8QM_SCU_PMIC_MEMC_ON 30 +#define IMX8QM_SCU_WDOG_OUT 31 +#define IMX8QM_PMIC_I2C_SDA 32 +#define IMX8QM_PMIC_I2C_SCL 33 +#define IMX8QM_PMIC_EARLY_WARNING 34 +#define IMX8QM_PMIC_INT_B 35 +#define IMX8QM_SCU_GPIO0_00 36 +#define IMX8QM_SCU_GPIO0_01 37 +#define IMX8QM_SCU_GPIO0_02 38 +#define IMX8QM_SCU_GPIO0_03 39 +#define IMX8QM_SCU_GPIO0_04 40 +#define IMX8QM_SCU_GPIO0_05 41 +#define IMX8QM_SCU_GPIO0_06 42 +#define IMX8QM_SCU_GPIO0_07 43 +#define IMX8QM_SCU_BOOT_MODE0 44 +#define IMX8QM_SCU_BOOT_MODE1 45 +#define IMX8QM_SCU_BOOT_MODE2 46 +#define IMX8QM_SCU_BOOT_MODE3 47 +#define IMX8QM_SCU_BOOT_MODE4 48 +#define IMX8QM_SCU_BOOT_MODE5 49 +#define IMX8QM_LVDS0_GPIO00 50 +#define IMX8QM_LVDS0_GPIO01 51 +#define IMX8QM_LVDS0_I2C0_SCL 52 +#define IMX8QM_LVDS0_I2C0_SDA 53 +#define IMX8QM_LVDS0_I2C1_SCL 54 +#define IMX8QM_LVDS0_I2C1_SDA 55 +#define IMX8QM_LVDS1_GPIO00 56 +#define IMX8QM_LVDS1_GPIO01 57 +#define IMX8QM_LVDS1_I2C0_SCL 58 +#define IMX8QM_LVDS1_I2C0_SDA 59 +#define IMX8QM_LVDS1_I2C1_SCL 60 +#define IMX8QM_LVDS1_I2C1_SDA 61 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_LVDSGPIO 62 +#define IMX8QM_MIPI_DSI0_I2C0_SCL 63 +#define IMX8QM_MIPI_DSI0_I2C0_SDA 64 +#define IMX8QM_MIPI_DSI0_GPIO0_00 65 +#define IMX8QM_MIPI_DSI0_GPIO0_01 66 +#define IMX8QM_MIPI_DSI1_I2C0_SCL 67 +#define IMX8QM_MIPI_DSI1_I2C0_SDA 68 +#define IMX8QM_MIPI_DSI1_GPIO0_00 69 +#define IMX8QM_MIPI_DSI1_GPIO0_01 70 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_MIPIDSIGPIO 71 +#define IMX8QM_MIPI_CSI0_MCLK_OUT 72 +#define IMX8QM_MIPI_CSI0_I2C0_SCL 73 +#define IMX8QM_MIPI_CSI0_I2C0_SDA 74 +#define IMX8QM_MIPI_CSI0_GPIO0_00 75 +#define IMX8QM_MIPI_CSI0_GPIO0_01 76 +#define IMX8QM_MIPI_CSI1_MCLK_OUT 77 +#define IMX8QM_MIPI_CSI1_GPIO0_00 78 +#define IMX8QM_MIPI_CSI1_GPIO0_01 79 +#define IMX8QM_MIPI_CSI1_I2C0_SCL 80 +#define IMX8QM_MIPI_CSI1_I2C0_SDA 81 +#define IMX8QM_HDMI_TX0_TS_SCL 82 +#define IMX8QM_HDMI_TX0_TS_SDA 83 +#define IMX8QM_COMP_CTL_GPIO_3V3_HDMIGPIO 84 +#define IMX8QM_ESAI1_FSR 85 +#define IMX8QM_ESAI1_FST 86 +#define IMX8QM_ESAI1_SCKR 87 +#define IMX8QM_ESAI1_SCKT 88 +#define IMX8QM_ESAI1_TX0 89 +#define IMX8QM_ESAI1_TX1 90 +#define IMX8QM_ESAI1_TX2_RX3 91 +#define IMX8QM_ESAI1_TX3_RX2 92 +#define IMX8QM_ESAI1_TX4_RX1 93 +#define IMX8QM_ESAI1_TX5_RX0 94 +#define IMX8QM_SPDIF0_RX 95 +#define IMX8QM_SPDIF0_TX 96 +#define IMX8QM_SPDIF0_EXT_CLK 97 +#define IMX8QM_SPI3_SCK 98 +#define IMX8QM_SPI3_SDO 99 +#define IMX8QM_SPI3_SDI 100 +#define IMX8QM_SPI3_CS0 101 +#define IMX8QM_SPI3_CS1 102 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHB 103 +#define IMX8QM_ESAI0_FSR 104 +#define IMX8QM_ESAI0_FST 105 +#define IMX8QM_ESAI0_SCKR 106 +#define IMX8QM_ESAI0_SCKT 107 +#define IMX8QM_ESAI0_TX0 108 +#define IMX8QM_ESAI0_TX1 109 +#define IMX8QM_ESAI0_TX2_RX3 110 +#define IMX8QM_ESAI0_TX3_RX2 111 +#define IMX8QM_ESAI0_TX4_RX1 112 +#define IMX8QM_ESAI0_TX5_RX0 113 +#define IMX8QM_MCLK_IN0 114 +#define IMX8QM_MCLK_OUT0 115 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHC 116 +#define IMX8QM_SPI0_SCK 117 +#define IMX8QM_SPI0_SDO 118 +#define IMX8QM_SPI0_SDI 119 +#define IMX8QM_SPI0_CS0 120 +#define IMX8QM_SPI0_CS1 121 +#define IMX8QM_SPI2_SCK 122 +#define IMX8QM_SPI2_SDO 123 +#define IMX8QM_SPI2_SDI 124 +#define IMX8QM_SPI2_CS0 125 +#define IMX8QM_SPI2_CS1 126 +#define IMX8QM_SAI1_RXC 127 +#define IMX8QM_SAI1_RXD 128 +#define IMX8QM_SAI1_RXFS 129 +#define IMX8QM_SAI1_TXC 130 +#define IMX8QM_SAI1_TXD 131 +#define IMX8QM_SAI1_TXFS 132 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHT 133 +#define IMX8QM_ADC_IN7 134 +#define IMX8QM_ADC_IN6 135 +#define IMX8QM_ADC_IN5 136 +#define IMX8QM_ADC_IN4 137 +#define IMX8QM_ADC_IN3 138 +#define IMX8QM_ADC_IN2 139 +#define IMX8QM_ADC_IN1 140 +#define IMX8QM_ADC_IN0 141 +#define IMX8QM_MLB_SIG 142 +#define IMX8QM_MLB_CLK 143 +#define IMX8QM_MLB_DATA 144 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOLHT 145 +#define IMX8QM_FLEXCAN0_RX 146 +#define IMX8QM_FLEXCAN0_TX 147 +#define IMX8QM_FLEXCAN1_RX 148 +#define IMX8QM_FLEXCAN1_TX 149 +#define IMX8QM_FLEXCAN2_RX 150 +#define IMX8QM_FLEXCAN2_TX 151 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOTHR 152 +#define IMX8QM_USB_SS3_TC0 153 +#define IMX8QM_USB_SS3_TC1 154 +#define IMX8QM_USB_SS3_TC2 155 +#define IMX8QM_USB_SS3_TC3 156 +#define IMX8QM_COMP_CTL_GPIO_3V3_USB3IO 157 +#define IMX8QM_USDHC1_RESET_B 158 +#define IMX8QM_USDHC1_VSELECT 159 +#define IMX8QM_USDHC2_RESET_B 160 +#define IMX8QM_USDHC2_VSELECT 161 +#define IMX8QM_USDHC2_WP 162 +#define IMX8QM_USDHC2_CD_B 163 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSELSEP 164 +#define IMX8QM_ENET0_MDIO 165 +#define IMX8QM_ENET0_MDC 166 +#define IMX8QM_ENET0_REFCLK_125M_25M 167 +#define IMX8QM_ENET1_REFCLK_125M_25M 168 +#define IMX8QM_ENET1_MDIO 169 +#define IMX8QM_ENET1_MDC 170 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOCT 171 +#define IMX8QM_QSPI1A_SS0_B 172 +#define IMX8QM_QSPI1A_SS1_B 173 +#define IMX8QM_QSPI1A_SCLK 174 +#define IMX8QM_QSPI1A_DQS 175 +#define IMX8QM_QSPI1A_DATA3 176 +#define IMX8QM_QSPI1A_DATA2 177 +#define IMX8QM_QSPI1A_DATA1 178 +#define IMX8QM_QSPI1A_DATA0 179 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_QSPI1 180 +#define IMX8QM_QSPI0A_DATA0 181 +#define IMX8QM_QSPI0A_DATA1 182 +#define IMX8QM_QSPI0A_DATA2 183 +#define IMX8QM_QSPI0A_DATA3 184 +#define IMX8QM_QSPI0A_DQS 185 +#define IMX8QM_QSPI0A_SS0_B 186 +#define IMX8QM_QSPI0A_SS1_B 187 +#define IMX8QM_QSPI0A_SCLK 188 +#define IMX8QM_QSPI0B_SCLK 189 +#define IMX8QM_QSPI0B_DATA0 190 +#define IMX8QM_QSPI0B_DATA1 191 +#define IMX8QM_QSPI0B_DATA2 192 +#define IMX8QM_QSPI0B_DATA3 193 +#define IMX8QM_QSPI0B_DQS 194 +#define IMX8QM_QSPI0B_SS0_B 195 +#define IMX8QM_QSPI0B_SS1_B 196 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_QSPI0 197 +#define IMX8QM_PCIE_CTRL0_CLKREQ_B 198 +#define IMX8QM_PCIE_CTRL0_WAKE_B 199 +#define IMX8QM_PCIE_CTRL0_PERST_B 200 +#define IMX8QM_PCIE_CTRL1_CLKREQ_B 201 +#define IMX8QM_PCIE_CTRL1_WAKE_B 202 +#define IMX8QM_PCIE_CTRL1_PERST_B 203 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_PCIESEP 204 +#define IMX8QM_USB_HSIC0_DATA 205 +#define IMX8QM_USB_HSIC0_STROBE 206 +#define IMX8QM_CALIBRATION_0_HSIC 207 +#define IMX8QM_CALIBRATION_1_HSIC 208 +#define IMX8QM_EMMC0_CLK 209 +#define IMX8QM_EMMC0_CMD 210 +#define IMX8QM_EMMC0_DATA0 211 +#define IMX8QM_EMMC0_DATA1 212 +#define IMX8QM_EMMC0_DATA2 213 +#define IMX8QM_EMMC0_DATA3 214 +#define IMX8QM_EMMC0_DATA4 215 +#define IMX8QM_EMMC0_DATA5 216 +#define IMX8QM_EMMC0_DATA6 217 +#define IMX8QM_EMMC0_DATA7 218 +#define IMX8QM_EMMC0_STROBE 219 +#define IMX8QM_EMMC0_RESET_B 220 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_SD1FIX 221 +#define IMX8QM_USDHC1_CLK 222 +#define IMX8QM_USDHC1_CMD 223 +#define IMX8QM_USDHC1_DATA0 224 +#define IMX8QM_USDHC1_DATA1 225 +#define IMX8QM_CTL_NAND_RE_P_N 226 +#define IMX8QM_USDHC1_DATA2 227 +#define IMX8QM_USDHC1_DATA3 228 +#define IMX8QM_CTL_NAND_DQS_P_N 229 +#define IMX8QM_USDHC1_DATA4 230 +#define IMX8QM_USDHC1_DATA5 231 +#define IMX8QM_USDHC1_DATA6 232 +#define IMX8QM_USDHC1_DATA7 233 +#define IMX8QM_USDHC1_STROBE 234 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSEL2 235 +#define IMX8QM_USDHC2_CLK 236 +#define IMX8QM_USDHC2_CMD 237 +#define IMX8QM_USDHC2_DATA0 238 +#define IMX8QM_USDHC2_DATA1 239 +#define IMX8QM_USDHC2_DATA2 240 +#define IMX8QM_USDHC2_DATA3 241 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSEL3 242 +#define IMX8QM_ENET0_RGMII_TXC 243 +#define IMX8QM_ENET0_RGMII_TX_CTL 244 +#define IMX8QM_ENET0_RGMII_TXD0 245 +#define IMX8QM_ENET0_RGMII_TXD1 246 +#define IMX8QM_ENET0_RGMII_TXD2 247 +#define IMX8QM_ENET0_RGMII_TXD3 248 +#define IMX8QM_ENET0_RGMII_RXC 249 +#define IMX8QM_ENET0_RGMII_RX_CTL 250 +#define IMX8QM_ENET0_RGMII_RXD0 251 +#define IMX8QM_ENET0_RGMII_RXD1 252 +#define IMX8QM_ENET0_RGMII_RXD2 253 +#define IMX8QM_ENET0_RGMII_RXD3 254 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB 255 +#define IMX8QM_ENET1_RGMII_TXC 256 +#define IMX8QM_ENET1_RGMII_TX_CTL 257 +#define IMX8QM_ENET1_RGMII_TXD0 258 +#define IMX8QM_ENET1_RGMII_TXD1 259 +#define IMX8QM_ENET1_RGMII_TXD2 260 +#define IMX8QM_ENET1_RGMII_TXD3 261 +#define IMX8QM_ENET1_RGMII_RXC 262 +#define IMX8QM_ENET1_RGMII_RX_CTL 263 +#define IMX8QM_ENET1_RGMII_RXD0 264 +#define IMX8QM_ENET1_RGMII_RXD1 265 +#define IMX8QM_ENET1_RGMII_RXD2 266 +#define IMX8QM_ENET1_RGMII_RXD3 267 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETA 268 + +/* + * format: <pin_id mux_mode> + */ +#define IMX8QM_SIM0_CLK_DMA_SIM0_CLK IMX8QM_SIM0_CLK 0 +#define IMX8QM_SIM0_CLK_LSIO_GPIO0_IO00 IMX8QM_SIM0_CLK 3 +#define IMX8QM_SIM0_RST_DMA_SIM0_RST IMX8QM_SIM0_RST 0 +#define IMX8QM_SIM0_RST_LSIO_GPIO0_IO01 IMX8QM_SIM0_RST 3 +#define IMX8QM_SIM0_IO_DMA_SIM0_IO IMX8QM_SIM0_IO 0 +#define IMX8QM_SIM0_IO_LSIO_GPIO0_IO02 IMX8QM_SIM0_IO 3 +#define IMX8QM_SIM0_PD_DMA_SIM0_PD IMX8QM_SIM0_PD 0 +#define IMX8QM_SIM0_PD_DMA_I2C3_SCL IMX8QM_SIM0_PD 1 +#define IMX8QM_SIM0_PD_LSIO_GPIO0_IO03 IMX8QM_SIM0_PD 3 +#define IMX8QM_SIM0_POWER_EN_DMA_SIM0_POWER_EN IMX8QM_SIM0_POWER_EN 0 +#define IMX8QM_SIM0_POWER_EN_DMA_I2C3_SDA IMX8QM_SIM0_POWER_EN 1 +#define IMX8QM_SIM0_POWER_EN_LSIO_GPIO0_IO04 IMX8QM_SIM0_POWER_EN 3 +#define IMX8QM_SIM0_GPIO0_00_DMA_SIM0_POWER_EN IMX8QM_SIM0_GPIO0_00 0 +#define IMX8QM_SIM0_GPIO0_00_LSIO_GPIO0_IO05 IMX8QM_SIM0_GPIO0_00 3 +#define IMX8QM_M40_I2C0_SCL_M40_I2C0_SCL IMX8QM_M40_I2C0_SCL 0 +#define IMX8QM_M40_I2C0_SCL_M40_UART0_RX IMX8QM_M40_I2C0_SCL 1 +#define IMX8QM_M40_I2C0_SCL_M40_GPIO0_IO02 IMX8QM_M40_I2C0_SCL 2 +#define IMX8QM_M40_I2C0_SCL_LSIO_GPIO0_IO06 IMX8QM_M40_I2C0_SCL 3 +#define IMX8QM_M40_I2C0_SDA_M40_I2C0_SDA IMX8QM_M40_I2C0_SDA 0 +#define IMX8QM_M40_I2C0_SDA_M40_UART0_TX IMX8QM_M40_I2C0_SDA 1 +#define IMX8QM_M40_I2C0_SDA_M40_GPIO0_IO03 IMX8QM_M40_I2C0_SDA 2 +#define IMX8QM_M40_I2C0_SDA_LSIO_GPIO0_IO07 IMX8QM_M40_I2C0_SDA 3 +#define IMX8QM_M40_GPIO0_00_M40_GPIO0_IO00 IMX8QM_M40_GPIO0_00 0 +#define IMX8QM_M40_GPIO0_00_M40_TPM0_CH0 IMX8QM_M40_GPIO0_00 1 +#define IMX8QM_M40_GPIO0_00_DMA_UART4_RX IMX8QM_M40_GPIO0_00 2 +#define IMX8QM_M40_GPIO0_00_LSIO_GPIO0_IO08 IMX8QM_M40_GPIO0_00 3 +#define IMX8QM_M40_GPIO0_01_M40_GPIO0_IO01 IMX8QM_M40_GPIO0_01 0 +#define IMX8QM_M40_GPIO0_01_M40_TPM0_CH1 IMX8QM_M40_GPIO0_01 1 +#define IMX8QM_M40_GPIO0_01_DMA_UART4_TX IMX8QM_M40_GPIO0_01 2 +#define IMX8QM_M40_GPIO0_01_LSIO_GPIO0_IO09 IMX8QM_M40_GPIO0_01 3 +#define IMX8QM_M41_I2C0_SCL_M41_I2C0_SCL IMX8QM_M41_I2C0_SCL 0 +#define IMX8QM_M41_I2C0_SCL_M41_UART0_RX IMX8QM_M41_I2C0_SCL 1 +#define IMX8QM_M41_I2C0_SCL_M41_GPIO0_IO02 IMX8QM_M41_I2C0_SCL 2 +#define IMX8QM_M41_I2C0_SCL_LSIO_GPIO0_IO10 IMX8QM_M41_I2C0_SCL 3 +#define IMX8QM_M41_I2C0_SDA_M41_I2C0_SDA IMX8QM_M41_I2C0_SDA 0 +#define IMX8QM_M41_I2C0_SDA_M41_UART0_TX IMX8QM_M41_I2C0_SDA 1 +#define IMX8QM_M41_I2C0_SDA_M41_GPIO0_IO03 IMX8QM_M41_I2C0_SDA 2 +#define IMX8QM_M41_I2C0_SDA_LSIO_GPIO0_IO11 IMX8QM_M41_I2C0_SDA 3 +#define IMX8QM_M41_GPIO0_00_M41_GPIO0_IO00 IMX8QM_M41_GPIO0_00 0 +#define IMX8QM_M41_GPIO0_00_M41_TPM0_CH0 IMX8QM_M41_GPIO0_00 1 +#define IMX8QM_M41_GPIO0_00_DMA_UART3_RX IMX8QM_M41_GPIO0_00 2 +#define IMX8QM_M41_GPIO0_00_LSIO_GPIO0_IO12 IMX8QM_M41_GPIO0_00 3 +#define IMX8QM_M41_GPIO0_01_M41_GPIO0_IO01 IMX8QM_M41_GPIO0_01 0 +#define IMX8QM_M41_GPIO0_01_M41_TPM0_CH1 IMX8QM_M41_GPIO0_01 1 +#define IMX8QM_M41_GPIO0_01_DMA_UART3_TX IMX8QM_M41_GPIO0_01 2 +#define IMX8QM_M41_GPIO0_01_LSIO_GPIO0_IO13 IMX8QM_M41_GPIO0_01 3 +#define IMX8QM_GPT0_CLK_LSIO_GPT0_CLK IMX8QM_GPT0_CLK 0 +#define IMX8QM_GPT0_CLK_DMA_I2C1_SCL IMX8QM_GPT0_CLK 1 +#define IMX8QM_GPT0_CLK_LSIO_KPP0_COL4 IMX8QM_GPT0_CLK 2 +#define IMX8QM_GPT0_CLK_LSIO_GPIO0_IO14 IMX8QM_GPT0_CLK 3 +#define IMX8QM_GPT0_CAPTURE_LSIO_GPT0_CAPTURE IMX8QM_GPT0_CAPTURE 0 +#define IMX8QM_GPT0_CAPTURE_DMA_I2C1_SDA IMX8QM_GPT0_CAPTURE 1 +#define IMX8QM_GPT0_CAPTURE_LSIO_KPP0_COL5 IMX8QM_GPT0_CAPTURE 2 +#define IMX8QM_GPT0_CAPTURE_LSIO_GPIO0_IO15 IMX8QM_GPT0_CAPTURE 3 +#define IMX8QM_GPT0_COMPARE_LSIO_GPT0_COMPARE IMX8QM_GPT0_COMPARE 0 +#define IMX8QM_GPT0_COMPARE_LSIO_PWM3_OUT IMX8QM_GPT0_COMPARE 1 +#define IMX8QM_GPT0_COMPARE_LSIO_KPP0_COL6 IMX8QM_GPT0_COMPARE 2 +#define IMX8QM_GPT0_COMPARE_LSIO_GPIO0_IO16 IMX8QM_GPT0_COMPARE 3 +#define IMX8QM_GPT1_CLK_LSIO_GPT1_CLK IMX8QM_GPT1_CLK 0 +#define IMX8QM_GPT1_CLK_DMA_I2C2_SCL IMX8QM_GPT1_CLK 1 +#define IMX8QM_GPT1_CLK_LSIO_KPP0_COL7 IMX8QM_GPT1_CLK 2 +#define IMX8QM_GPT1_CLK_LSIO_GPIO0_IO17 IMX8QM_GPT1_CLK 3 +#define IMX8QM_GPT1_CAPTURE_LSIO_GPT1_CAPTURE IMX8QM_GPT1_CAPTURE 0 +#define IMX8QM_GPT1_CAPTURE_DMA_I2C2_SDA IMX8QM_GPT1_CAPTURE 1 +#define IMX8QM_GPT1_CAPTURE_LSIO_KPP0_ROW4 IMX8QM_GPT1_CAPTURE 2 +#define IMX8QM_GPT1_CAPTURE_LSIO_GPIO0_IO18 IMX8QM_GPT1_CAPTURE 3 +#define IMX8QM_GPT1_COMPARE_LSIO_GPT1_COMPARE IMX8QM_GPT1_COMPARE 0 +#define IMX8QM_GPT1_COMPARE_LSIO_PWM2_OUT IMX8QM_GPT1_COMPARE 1 +#define IMX8QM_GPT1_COMPARE_LSIO_KPP0_ROW5 IMX8QM_GPT1_COMPARE 2 +#define IMX8QM_GPT1_COMPARE_LSIO_GPIO0_IO19 IMX8QM_GPT1_COMPARE 3 +#define IMX8QM_UART0_RX_DMA_UART0_RX IMX8QM_UART0_RX 0 +#define IMX8QM_UART0_RX_SCU_UART0_RX IMX8QM_UART0_RX 1 +#define IMX8QM_UART0_RX_LSIO_GPIO0_IO20 IMX8QM_UART0_RX 3 +#define IMX8QM_UART0_TX_DMA_UART0_TX IMX8QM_UART0_TX 0 +#define IMX8QM_UART0_TX_SCU_UART0_TX IMX8QM_UART0_TX 1 +#define IMX8QM_UART0_TX_LSIO_GPIO0_IO21 IMX8QM_UART0_TX 3 +#define IMX8QM_UART0_RTS_B_DMA_UART0_RTS_B IMX8QM_UART0_RTS_B 0 +#define IMX8QM_UART0_RTS_B_LSIO_PWM0_OUT IMX8QM_UART0_RTS_B 1 +#define IMX8QM_UART0_RTS_B_DMA_UART2_RX IMX8QM_UART0_RTS_B 2 +#define IMX8QM_UART0_RTS_B_LSIO_GPIO0_IO22 IMX8QM_UART0_RTS_B 3 +#define IMX8QM_UART0_CTS_B_DMA_UART0_CTS_B IMX8QM_UART0_CTS_B 0 +#define IMX8QM_UART0_CTS_B_LSIO_PWM1_OUT IMX8QM_UART0_CTS_B 1 +#define IMX8QM_UART0_CTS_B_DMA_UART2_TX IMX8QM_UART0_CTS_B 2 +#define IMX8QM_UART0_CTS_B_LSIO_GPIO0_IO23 IMX8QM_UART0_CTS_B 3 +#define IMX8QM_UART1_TX_DMA_UART1_TX IMX8QM_UART1_TX 0 +#define IMX8QM_UART1_TX_DMA_SPI3_SCK IMX8QM_UART1_TX 1 +#define IMX8QM_UART1_TX_LSIO_GPIO0_IO24 IMX8QM_UART1_TX 3 +#define IMX8QM_UART1_RX_DMA_UART1_RX IMX8QM_UART1_RX 0 +#define IMX8QM_UART1_RX_DMA_SPI3_SDO IMX8QM_UART1_RX 1 +#define IMX8QM_UART1_RX_LSIO_GPIO0_IO25 IMX8QM_UART1_RX 3 +#define IMX8QM_UART1_RTS_B_DMA_UART1_RTS_B IMX8QM_UART1_RTS_B 0 +#define IMX8QM_UART1_RTS_B_DMA_SPI3_SDI IMX8QM_UART1_RTS_B 1 +#define IMX8QM_UART1_RTS_B_DMA_UART1_CTS_B IMX8QM_UART1_RTS_B 2 +#define IMX8QM_UART1_RTS_B_LSIO_GPIO0_IO26 IMX8QM_UART1_RTS_B 3 +#define IMX8QM_UART1_CTS_B_DMA_UART1_CTS_B IMX8QM_UART1_CTS_B 0 +#define IMX8QM_UART1_CTS_B_DMA_SPI3_CS0 IMX8QM_UART1_CTS_B 1 +#define IMX8QM_UART1_CTS_B_DMA_UART1_RTS_B IMX8QM_UART1_CTS_B 2 +#define IMX8QM_UART1_CTS_B_LSIO_GPIO0_IO27 IMX8QM_UART1_CTS_B 3 +#define IMX8QM_SCU_PMIC_MEMC_ON_SCU_GPIO0_IOXX_PMIC_MEMC_ON IMX8QM_SCU_PMIC_MEMC_ON 0 +#define IMX8QM_SCU_WDOG_OUT_SCU_WDOG0_WDOG_OUT IMX8QM_SCU_WDOG_OUT 0 +#define IMX8QM_PMIC_I2C_SDA_SCU_PMIC_I2C_SDA IMX8QM_PMIC_I2C_SDA 0 +#define IMX8QM_PMIC_I2C_SCL_SCU_PMIC_I2C_SCL IMX8QM_PMIC_I2C_SCL 0 +#define IMX8QM_PMIC_EARLY_WARNING_SCU_PMIC_EARLY_WARNING IMX8QM_PMIC_EARLY_WARNING 0 +#define IMX8QM_PMIC_INT_B_SCU_DIMX8QMMIC_INT_B IMX8QM_PMIC_INT_B 0 +#define IMX8QM_SCU_GPIO0_00_SCU_GPIO0_IO00 IMX8QM_SCU_GPIO0_00 0 +#define IMX8QM_SCU_GPIO0_00_SCU_UART0_RX IMX8QM_SCU_GPIO0_00 1 +#define IMX8QM_SCU_GPIO0_00_LSIO_GPIO0_IO28 IMX8QM_SCU_GPIO0_00 3 +#define IMX8QM_SCU_GPIO0_01_SCU_GPIO0_IO01 IMX8QM_SCU_GPIO0_01 0 +#define IMX8QM_SCU_GPIO0_01_SCU_UART0_TX IMX8QM_SCU_GPIO0_01 1 +#define IMX8QM_SCU_GPIO0_01_LSIO_GPIO0_IO29 IMX8QM_SCU_GPIO0_01 3 +#define IMX8QM_SCU_GPIO0_02_SCU_GPIO0_IO02 IMX8QM_SCU_GPIO0_02 0 +#define IMX8QM_SCU_GPIO0_02_SCU_GPIO0_IOXX_PMIC_GPU0_ON IMX8QM_SCU_GPIO0_02 1 +#define IMX8QM_SCU_GPIO0_02_LSIO_GPIO0_IO30 IMX8QM_SCU_GPIO0_02 3 +#define IMX8QM_SCU_GPIO0_03_SCU_GPIO0_IO03 IMX8QM_SCU_GPIO0_03 0 +#define IMX8QM_SCU_GPIO0_03_SCU_GPIO0_IOXX_PMIC_GPU1_ON IMX8QM_SCU_GPIO0_03 1 +#define IMX8QM_SCU_GPIO0_03_LSIO_GPIO0_IO31 IMX8QM_SCU_GPIO0_03 3 +#define IMX8QM_SCU_GPIO0_04_SCU_GPIO0_IO04 IMX8QM_SCU_GPIO0_04 0 +#define IMX8QM_SCU_GPIO0_04_SCU_GPIO0_IOXX_PMIC_A72_ON IMX8QM_SCU_GPIO0_04 1 +#define IMX8QM_SCU_GPIO0_04_LSIO_GPIO1_IO00 IMX8QM_SCU_GPIO0_04 3 +#define IMX8QM_SCU_GPIO0_05_SCU_GPIO0_IO05 IMX8QM_SCU_GPIO0_05 0 +#define IMX8QM_SCU_GPIO0_05_SCU_GPIO0_IOXX_PMIC_A53_ON IMX8QM_SCU_GPIO0_05 1 +#define IMX8QM_SCU_GPIO0_05_LSIO_GPIO1_IO01 IMX8QM_SCU_GPIO0_05 3 +#define IMX8QM_SCU_GPIO0_06_SCU_GPIO0_IO06 IMX8QM_SCU_GPIO0_06 0 +#define IMX8QM_SCU_GPIO0_06_SCU_TPM0_CH0 IMX8QM_SCU_GPIO0_06 1 +#define IMX8QM_SCU_GPIO0_06_LSIO_GPIO1_IO02 IMX8QM_SCU_GPIO0_06 3 +#define IMX8QM_SCU_GPIO0_07_SCU_GPIO0_IO07 IMX8QM_SCU_GPIO0_07 0 +#define IMX8QM_SCU_GPIO0_07_SCU_TPM0_CH1 IMX8QM_SCU_GPIO0_07 1 +#define IMX8QM_SCU_GPIO0_07_SCU_DSC_RTC_CLOCK_OUTPUT_32K IMX8QM_SCU_GPIO0_07 2 +#define IMX8QM_SCU_GPIO0_07_LSIO_GPIO1_IO03 IMX8QM_SCU_GPIO0_07 3 +#define IMX8QM_SCU_BOOT_MODE0_SCU_DSC_BOOT_MODE0 IMX8QM_SCU_BOOT_MODE0 0 +#define IMX8QM_SCU_BOOT_MODE1_SCU_DSC_BOOT_MODE1 IMX8QM_SCU_BOOT_MODE1 0 +#define IMX8QM_SCU_BOOT_MODE2_SCU_DSC_BOOT_MODE2 IMX8QM_SCU_BOOT_MODE2 0 +#define IMX8QM_SCU_BOOT_MODE3_SCU_DSC_BOOT_MODE3 IMX8QM_SCU_BOOT_MODE3 0 +#define IMX8QM_SCU_BOOT_MODE4_SCU_DSC_BOOT_MODE4 IMX8QM_SCU_BOOT_MODE4 0 +#define IMX8QM_SCU_BOOT_MODE4_SCU_PMIC_I2C_SCL IMX8QM_SCU_BOOT_MODE4 1 +#define IMX8QM_SCU_BOOT_MODE5_SCU_DSC_BOOT_MODE5 IMX8QM_SCU_BOOT_MODE5 0 +#define IMX8QM_SCU_BOOT_MODE5_SCU_PMIC_I2C_SDA IMX8QM_SCU_BOOT_MODE5 1 +#define IMX8QM_LVDS0_GPIO00_LVDS0_GPIO0_IO00 IMX8QM_LVDS0_GPIO00 0 +#define IMX8QM_LVDS0_GPIO00_LVDS0_PWM0_OUT IMX8QM_LVDS0_GPIO00 1 +#define IMX8QM_LVDS0_GPIO00_LSIO_GPIO1_IO04 IMX8QM_LVDS0_GPIO00 3 +#define IMX8QM_LVDS0_GPIO01_LVDS0_GPIO0_IO01 IMX8QM_LVDS0_GPIO01 0 +#define IMX8QM_LVDS0_GPIO01_LSIO_GPIO1_IO05 IMX8QM_LVDS0_GPIO01 3 +#define IMX8QM_LVDS0_I2C0_SCL_LVDS0_I2C0_SCL IMX8QM_LVDS0_I2C0_SCL 0 +#define IMX8QM_LVDS0_I2C0_SCL_LVDS0_GPIO0_IO02 IMX8QM_LVDS0_I2C0_SCL 1 +#define IMX8QM_LVDS0_I2C0_SCL_LSIO_GPIO1_IO06 IMX8QM_LVDS0_I2C0_SCL 3 +#define IMX8QM_LVDS0_I2C0_SDA_LVDS0_I2C0_SDA IMX8QM_LVDS0_I2C0_SDA 0 +#define IMX8QM_LVDS0_I2C0_SDA_LVDS0_GPIO0_IO03 IMX8QM_LVDS0_I2C0_SDA 1 +#define IMX8QM_LVDS0_I2C0_SDA_LSIO_GPIO1_IO07 IMX8QM_LVDS0_I2C0_SDA 3 +#define IMX8QM_LVDS0_I2C1_SCL_LVDS0_I2C1_SCL IMX8QM_LVDS0_I2C1_SCL 0 +#define IMX8QM_LVDS0_I2C1_SCL_DMA_UART2_TX IMX8QM_LVDS0_I2C1_SCL 1 +#define IMX8QM_LVDS0_I2C1_SCL_LSIO_GPIO1_IO08 IMX8QM_LVDS0_I2C1_SCL 3 +#define IMX8QM_LVDS0_I2C1_SDA_LVDS0_I2C1_SDA IMX8QM_LVDS0_I2C1_SDA 0 +#define IMX8QM_LVDS0_I2C1_SDA_DMA_UART2_RX IMX8QM_LVDS0_I2C1_SDA 1 +#define IMX8QM_LVDS0_I2C1_SDA_LSIO_GPIO1_IO09 IMX8QM_LVDS0_I2C1_SDA 3 +#define IMX8QM_LVDS1_GPIO00_LVDS1_GPIO0_IO00 IMX8QM_LVDS1_GPIO00 0 +#define IMX8QM_LVDS1_GPIO00_LVDS1_PWM0_OUT IMX8QM_LVDS1_GPIO00 1 +#define IMX8QM_LVDS1_GPIO00_LSIO_GPIO1_IO10 IMX8QM_LVDS1_GPIO00 3 +#define IMX8QM_LVDS1_GPIO01_LVDS1_GPIO0_IO01 IMX8QM_LVDS1_GPIO01 0 +#define IMX8QM_LVDS1_GPIO01_LSIO_GPIO1_IO11 IMX8QM_LVDS1_GPIO01 3 +#define IMX8QM_LVDS1_I2C0_SCL_LVDS1_I2C0_SCL IMX8QM_LVDS1_I2C0_SCL 0 +#define IMX8QM_LVDS1_I2C0_SCL_LVDS1_GPIO0_IO02 IMX8QM_LVDS1_I2C0_SCL 1 +#define IMX8QM_LVDS1_I2C0_SCL_LSIO_GPIO1_IO12 IMX8QM_LVDS1_I2C0_SCL 3 +#define IMX8QM_LVDS1_I2C0_SDA_LVDS1_I2C0_SDA IMX8QM_LVDS1_I2C0_SDA 0 +#define IMX8QM_LVDS1_I2C0_SDA_LVDS1_GPIO0_IO03 IMX8QM_LVDS1_I2C0_SDA 1 +#define IMX8QM_LVDS1_I2C0_SDA_LSIO_GPIO1_IO13 IMX8QM_LVDS1_I2C0_SDA 3 +#define IMX8QM_LVDS1_I2C1_SCL_LVDS1_I2C1_SCL IMX8QM_LVDS1_I2C1_SCL 0 +#define IMX8QM_LVDS1_I2C1_SCL_DMA_UART3_TX IMX8QM_LVDS1_I2C1_SCL 1 +#define IMX8QM_LVDS1_I2C1_SCL_LSIO_GPIO1_IO14 IMX8QM_LVDS1_I2C1_SCL 3 +#define IMX8QM_LVDS1_I2C1_SDA_LVDS1_I2C1_SDA IMX8QM_LVDS1_I2C1_SDA 0 +#define IMX8QM_LVDS1_I2C1_SDA_DMA_UART3_RX IMX8QM_LVDS1_I2C1_SDA 1 +#define IMX8QM_LVDS1_I2C1_SDA_LSIO_GPIO1_IO15 IMX8QM_LVDS1_I2C1_SDA 3 +#define IMX8QM_MIPI_DSI0_I2C0_SCL_MIPI_DSI0_I2C0_SCL IMX8QM_MIPI_DSI0_I2C0_SCL 0 +#define IMX8QM_MIPI_DSI0_I2C0_SCL_LSIO_GPIO1_IO16 IMX8QM_MIPI_DSI0_I2C0_SCL 3 +#define IMX8QM_MIPI_DSI0_I2C0_SDA_MIPI_DSI0_I2C0_SDA IMX8QM_MIPI_DSI0_I2C0_SDA 0 +#define IMX8QM_MIPI_DSI0_I2C0_SDA_LSIO_GPIO1_IO17 IMX8QM_MIPI_DSI0_I2C0_SDA 3 +#define IMX8QM_MIPI_DSI0_GPIO0_00_MIPI_DSI0_GPIO0_IO00 IMX8QM_MIPI_DSI0_GPIO0_00 0 +#define IMX8QM_MIPI_DSI0_GPIO0_00_MIPI_DSI0_PWM0_OUT IMX8QM_MIPI_DSI0_GPIO0_00 1 +#define IMX8QM_MIPI_DSI0_GPIO0_00_LSIO_GPIO1_IO18 IMX8QM_MIPI_DSI0_GPIO0_00 3 +#define IMX8QM_MIPI_DSI0_GPIO0_01_MIPI_DSI0_GPIO0_IO01 IMX8QM_MIPI_DSI0_GPIO0_01 0 +#define IMX8QM_MIPI_DSI0_GPIO0_01_LSIO_GPIO1_IO19 IMX8QM_MIPI_DSI0_GPIO0_01 3 +#define IMX8QM_MIPI_DSI1_I2C0_SCL_MIPI_DSI1_I2C0_SCL IMX8QM_MIPI_DSI1_I2C0_SCL 0 +#define IMX8QM_MIPI_DSI1_I2C0_SCL_LSIO_GPIO1_IO20 IMX8QM_MIPI_DSI1_I2C0_SCL 3 +#define IMX8QM_MIPI_DSI1_I2C0_SDA_MIPI_DSI1_I2C0_SDA IMX8QM_MIPI_DSI1_I2C0_SDA 0 +#define IMX8QM_MIPI_DSI1_I2C0_SDA_LSIO_GPIO1_IO21 IMX8QM_MIPI_DSI1_I2C0_SDA 3 +#define IMX8QM_MIPI_DSI1_GPIO0_00_MIPI_DSI1_GPIO0_IO00 IMX8QM_MIPI_DSI1_GPIO0_00 0 +#define IMX8QM_MIPI_DSI1_GPIO0_00_MIPI_DSI1_PWM0_OUT IMX8QM_MIPI_DSI1_GPIO0_00 1 +#define IMX8QM_MIPI_DSI1_GPIO0_00_LSIO_GPIO1_IO22 IMX8QM_MIPI_DSI1_GPIO0_00 3 +#define IMX8QM_MIPI_DSI1_GPIO0_01_MIPI_DSI1_GPIO0_IO01 IMX8QM_MIPI_DSI1_GPIO0_01 0 +#define IMX8QM_MIPI_DSI1_GPIO0_01_LSIO_GPIO1_IO23 IMX8QM_MIPI_DSI1_GPIO0_01 3 +#define IMX8QM_MIPI_CSI0_MCLK_OUT_MIPI_CSI0_ACM_MCLK_OUT IMX8QM_MIPI_CSI0_MCLK_OUT 0 +#define IMX8QM_MIPI_CSI0_MCLK_OUT_LSIO_GPIO1_IO24 IMX8QM_MIPI_CSI0_MCLK_OUT 3 +#define IMX8QM_MIPI_CSI0_I2C0_SCL_MIPI_CSI0_I2C0_SCL IMX8QM_MIPI_CSI0_I2C0_SCL 0 +#define IMX8QM_MIPI_CSI0_I2C0_SCL_LSIO_GPIO1_IO25 IMX8QM_MIPI_CSI0_I2C0_SCL 3 +#define IMX8QM_MIPI_CSI0_I2C0_SDA_MIPI_CSI0_I2C0_SDA IMX8QM_MIPI_CSI0_I2C0_SDA 0 +#define IMX8QM_MIPI_CSI0_I2C0_SDA_LSIO_GPIO1_IO26 IMX8QM_MIPI_CSI0_I2C0_SDA 3 +#define IMX8QM_MIPI_CSI0_GPIO0_00_MIPI_CSI0_GPIO0_IO00 IMX8QM_MIPI_CSI0_GPIO0_00 0 +#define IMX8QM_MIPI_CSI0_GPIO0_00_DMA_I2C0_SCL IMX8QM_MIPI_CSI0_GPIO0_00 1 +#define IMX8QM_MIPI_CSI0_GPIO0_00_MIPI_CSI1_I2C0_SCL IMX8QM_MIPI_CSI0_GPIO0_00 2 +#define IMX8QM_MIPI_CSI0_GPIO0_00_LSIO_GPIO1_IO27 IMX8QM_MIPI_CSI0_GPIO0_00 3 +#define IMX8QM_MIPI_CSI0_GPIO0_01_MIPI_CSI0_GPIO0_IO01 IMX8QM_MIPI_CSI0_GPIO0_01 0 +#define IMX8QM_MIPI_CSI0_GPIO0_01_DMA_I2C0_SDA IMX8QM_MIPI_CSI0_GPIO0_01 1 +#define IMX8QM_MIPI_CSI0_GPIO0_01_MIPI_CSI1_I2C0_SDA IMX8QM_MIPI_CSI0_GPIO0_01 2 +#define IMX8QM_MIPI_CSI0_GPIO0_01_LSIO_GPIO1_IO28 IMX8QM_MIPI_CSI0_GPIO0_01 3 +#define IMX8QM_MIPI_CSI1_MCLK_OUT_MIPI_CSI1_ACM_MCLK_OUT IMX8QM_MIPI_CSI1_MCLK_OUT 0 +#define IMX8QM_MIPI_CSI1_MCLK_OUT_LSIO_GPIO1_IO29 IMX8QM_MIPI_CSI1_MCLK_OUT 3 +#define IMX8QM_MIPI_CSI1_GPIO0_00_MIPI_CSI1_GPIO0_IO00 IMX8QM_MIPI_CSI1_GPIO0_00 0 +#define IMX8QM_MIPI_CSI1_GPIO0_00_DMA_UART4_RX IMX8QM_MIPI_CSI1_GPIO0_00 1 +#define IMX8QM_MIPI_CSI1_GPIO0_00_LSIO_GPIO1_IO30 IMX8QM_MIPI_CSI1_GPIO0_00 3 +#define IMX8QM_MIPI_CSI1_GPIO0_01_MIPI_CSI1_GPIO0_IO01 IMX8QM_MIPI_CSI1_GPIO0_01 0 +#define IMX8QM_MIPI_CSI1_GPIO0_01_DMA_UART4_TX IMX8QM_MIPI_CSI1_GPIO0_01 1 +#define IMX8QM_MIPI_CSI1_GPIO0_01_LSIO_GPIO1_IO31 IMX8QM_MIPI_CSI1_GPIO0_01 3 +#define IMX8QM_MIPI_CSI1_I2C0_SCL_MIPI_CSI1_I2C0_SCL IMX8QM_MIPI_CSI1_I2C0_SCL 0 +#define IMX8QM_MIPI_CSI1_I2C0_SCL_LSIO_GPIO2_IO00 IMX8QM_MIPI_CSI1_I2C0_SCL 3 +#define IMX8QM_MIPI_CSI1_I2C0_SDA_MIPI_CSI1_I2C0_SDA IMX8QM_MIPI_CSI1_I2C0_SDA 0 +#define IMX8QM_MIPI_CSI1_I2C0_SDA_LSIO_GPIO2_IO01 IMX8QM_MIPI_CSI1_I2C0_SDA 3 +#define IMX8QM_HDMI_TX0_TS_SCL_HDMI_TX0_I2C0_SCL IMX8QM_HDMI_TX0_TS_SCL 0 +#define IMX8QM_HDMI_TX0_TS_SCL_DMA_I2C0_SCL IMX8QM_HDMI_TX0_TS_SCL 1 +#define IMX8QM_HDMI_TX0_TS_SCL_LSIO_GPIO2_IO02 IMX8QM_HDMI_TX0_TS_SCL 3 +#define IMX8QM_HDMI_TX0_TS_SDA_HDMI_TX0_I2C0_SDA IMX8QM_HDMI_TX0_TS_SDA 0 +#define IMX8QM_HDMI_TX0_TS_SDA_DMA_I2C0_SDA IMX8QM_HDMI_TX0_TS_SDA 1 +#define IMX8QM_HDMI_TX0_TS_SDA_LSIO_GPIO2_IO03 IMX8QM_HDMI_TX0_TS_SDA 3 +#define IMX8QM_ESAI1_FSR_AUD_ESAI1_FSR IMX8QM_ESAI1_FSR 0 +#define IMX8QM_ESAI1_FSR_LSIO_GPIO2_IO04 IMX8QM_ESAI1_FSR 3 +#define IMX8QM_ESAI1_FST_AUD_ESAI1_FST IMX8QM_ESAI1_FST 0 +#define IMX8QM_ESAI1_FST_AUD_SPDIF0_EXT_CLK IMX8QM_ESAI1_FST 1 +#define IMX8QM_ESAI1_FST_LSIO_GPIO2_IO05 IMX8QM_ESAI1_FST 3 +#define IMX8QM_ESAI1_SCKR_AUD_ESAI1_SCKR IMX8QM_ESAI1_SCKR 0 +#define IMX8QM_ESAI1_SCKR_LSIO_GPIO2_IO06 IMX8QM_ESAI1_SCKR 3 +#define IMX8QM_ESAI1_SCKT_AUD_ESAI1_SCKT IMX8QM_ESAI1_SCKT 0 +#define IMX8QM_ESAI1_SCKT_AUD_SAI2_RXC IMX8QM_ESAI1_SCKT 1 +#define IMX8QM_ESAI1_SCKT_AUD_SPDIF0_EXT_CLK IMX8QM_ESAI1_SCKT 2 +#define IMX8QM_ESAI1_SCKT_LSIO_GPIO2_IO07 IMX8QM_ESAI1_SCKT 3 +#define IMX8QM_ESAI1_TX0_AUD_ESAI1_TX0 IMX8QM_ESAI1_TX0 0 +#define IMX8QM_ESAI1_TX0_AUD_SAI2_RXD IMX8QM_ESAI1_TX0 1 +#define IMX8QM_ESAI1_TX0_AUD_SPDIF0_RX IMX8QM_ESAI1_TX0 2 +#define IMX8QM_ESAI1_TX0_LSIO_GPIO2_IO08 IMX8QM_ESAI1_TX0 3 +#define IMX8QM_ESAI1_TX1_AUD_ESAI1_TX1 IMX8QM_ESAI1_TX1 0 +#define IMX8QM_ESAI1_TX1_AUD_SAI2_RXFS IMX8QM_ESAI1_TX1 1 +#define IMX8QM_ESAI1_TX1_AUD_SPDIF0_TX IMX8QM_ESAI1_TX1 2 +#define IMX8QM_ESAI1_TX1_LSIO_GPIO2_IO09 IMX8QM_ESAI1_TX1 3 +#define IMX8QM_ESAI1_TX2_RX3_AUD_ESAI1_TX2_RX3 IMX8QM_ESAI1_TX2_RX3 0 +#define IMX8QM_ESAI1_TX2_RX3_AUD_SPDIF0_RX IMX8QM_ESAI1_TX2_RX3 1 +#define IMX8QM_ESAI1_TX2_RX3_LSIO_GPIO2_IO10 IMX8QM_ESAI1_TX2_RX3 3 +#define IMX8QM_ESAI1_TX3_RX2_AUD_ESAI1_TX3_RX2 IMX8QM_ESAI1_TX3_RX2 0 +#define IMX8QM_ESAI1_TX3_RX2_AUD_SPDIF0_TX IMX8QM_ESAI1_TX3_RX2 1 +#define IMX8QM_ESAI1_TX3_RX2_LSIO_GPIO2_IO11 IMX8QM_ESAI1_TX3_RX2 3 +#define IMX8QM_ESAI1_TX4_RX1_AUD_ESAI1_TX4_RX1 IMX8QM_ESAI1_TX4_RX1 0 +#define IMX8QM_ESAI1_TX4_RX1_LSIO_GPIO2_IO12 IMX8QM_ESAI1_TX4_RX1 3 +#define IMX8QM_ESAI1_TX5_RX0_AUD_ESAI1_TX5_RX0 IMX8QM_ESAI1_TX5_RX0 0 +#define IMX8QM_ESAI1_TX5_RX0_LSIO_GPIO2_IO13 IMX8QM_ESAI1_TX5_RX0 3 +#define IMX8QM_SPDIF0_RX_AUD_SPDIF0_RX IMX8QM_SPDIF0_RX 0 +#define IMX8QM_SPDIF0_RX_AUD_MQS_R IMX8QM_SPDIF0_RX 1 +#define IMX8QM_SPDIF0_RX_AUD_ACM_MCLK_IN1 IMX8QM_SPDIF0_RX 2 +#define IMX8QM_SPDIF0_RX_LSIO_GPIO2_IO14 IMX8QM_SPDIF0_RX 3 +#define IMX8QM_SPDIF0_TX_AUD_SPDIF0_TX IMX8QM_SPDIF0_TX 0 +#define IMX8QM_SPDIF0_TX_AUD_MQS_L IMX8QM_SPDIF0_TX 1 +#define IMX8QM_SPDIF0_TX_AUD_ACM_MCLK_OUT1 IMX8QM_SPDIF0_TX 2 +#define IMX8QM_SPDIF0_TX_LSIO_GPIO2_IO15 IMX8QM_SPDIF0_TX 3 +#define IMX8QM_SPDIF0_EXT_CLK_AUD_SPDIF0_EXT_CLK IMX8QM_SPDIF0_EXT_CLK 0 +#define IMX8QM_SPDIF0_EXT_CLK_DMA_DMA0_REQ_IN0 IMX8QM_SPDIF0_EXT_CLK 1 +#define IMX8QM_SPDIF0_EXT_CLK_LSIO_GPIO2_IO16 IMX8QM_SPDIF0_EXT_CLK 3 +#define IMX8QM_SPI3_SCK_DMA_SPI3_SCK IMX8QM_SPI3_SCK 0 +#define IMX8QM_SPI3_SCK_LSIO_GPIO2_IO17 IMX8QM_SPI3_SCK 3 +#define IMX8QM_SPI3_SDO_DMA_SPI3_SDO IMX8QM_SPI3_SDO 0 +#define IMX8QM_SPI3_SDO_DMA_FTM_CH0 IMX8QM_SPI3_SDO 1 +#define IMX8QM_SPI3_SDO_LSIO_GPIO2_IO18 IMX8QM_SPI3_SDO 3 +#define IMX8QM_SPI3_SDI_DMA_SPI3_SDI IMX8QM_SPI3_SDI 0 +#define IMX8QM_SPI3_SDI_DMA_FTM_CH1 IMX8QM_SPI3_SDI 1 +#define IMX8QM_SPI3_SDI_LSIO_GPIO2_IO19 IMX8QM_SPI3_SDI 3 +#define IMX8QM_SPI3_CS0_DMA_SPI3_CS0 IMX8QM_SPI3_CS0 0 +#define IMX8QM_SPI3_CS0_DMA_FTM_CH2 IMX8QM_SPI3_CS0 1 +#define IMX8QM_SPI3_CS0_LSIO_GPIO2_IO20 IMX8QM_SPI3_CS0 3 +#define IMX8QM_SPI3_CS1_DMA_SPI3_CS1 IMX8QM_SPI3_CS1 0 +#define IMX8QM_SPI3_CS1_LSIO_GPIO2_IO21 IMX8QM_SPI3_CS1 3 +#define IMX8QM_ESAI0_FSR_AUD_ESAI0_FSR IMX8QM_ESAI0_FSR 0 +#define IMX8QM_ESAI0_FSR_LSIO_GPIO2_IO22 IMX8QM_ESAI0_FSR 3 +#define IMX8QM_ESAI0_FST_AUD_ESAI0_FST IMX8QM_ESAI0_FST 0 +#define IMX8QM_ESAI0_FST_LSIO_GPIO2_IO23 IMX8QM_ESAI0_FST 3 +#define IMX8QM_ESAI0_SCKR_AUD_ESAI0_SCKR IMX8QM_ESAI0_SCKR 0 +#define IMX8QM_ESAI0_SCKR_LSIO_GPIO2_IO24 IMX8QM_ESAI0_SCKR 3 +#define IMX8QM_ESAI0_SCKT_AUD_ESAI0_SCKT IMX8QM_ESAI0_SCKT 0 +#define IMX8QM_ESAI0_SCKT_LSIO_GPIO2_IO25 IMX8QM_ESAI0_SCKT 3 +#define IMX8QM_ESAI0_TX0_AUD_ESAI0_TX0 IMX8QM_ESAI0_TX0 0 +#define IMX8QM_ESAI0_TX0_LSIO_GPIO2_IO26 IMX8QM_ESAI0_TX0 3 +#define IMX8QM_ESAI0_TX1_AUD_ESAI0_TX1 IMX8QM_ESAI0_TX1 0 +#define IMX8QM_ESAI0_TX1_LSIO_GPIO2_IO27 IMX8QM_ESAI0_TX1 3 +#define IMX8QM_ESAI0_TX2_RX3_AUD_ESAI0_TX2_RX3 IMX8QM_ESAI0_TX2_RX3 0 +#define IMX8QM_ESAI0_TX2_RX3_LSIO_GPIO2_IO28 IMX8QM_ESAI0_TX2_RX3 3 +#define IMX8QM_ESAI0_TX3_RX2_AUD_ESAI0_TX3_RX2 IMX8QM_ESAI0_TX3_RX2 0 +#define IMX8QM_ESAI0_TX3_RX2_LSIO_GPIO2_IO29 IMX8QM_ESAI0_TX3_RX2 3 +#define IMX8QM_ESAI0_TX4_RX1_AUD_ESAI0_TX4_RX1 IMX8QM_ESAI0_TX4_RX1 0 +#define IMX8QM_ESAI0_TX4_RX1_LSIO_GPIO2_IO30 IMX8QM_ESAI0_TX4_RX1 3 +#define IMX8QM_ESAI0_TX5_RX0_AUD_ESAI0_TX5_RX0 IMX8QM_ESAI0_TX5_RX0 0 +#define IMX8QM_ESAI0_TX5_RX0_LSIO_GPIO2_IO31 IMX8QM_ESAI0_TX5_RX0 3 +#define IMX8QM_MCLK_IN0_AUD_ACM_MCLK_IN0 IMX8QM_MCLK_IN0 0 +#define IMX8QM_MCLK_IN0_AUD_ESAI0_RX_HF_CLK IMX8QM_MCLK_IN0 1 +#define IMX8QM_MCLK_IN0_AUD_ESAI1_RX_HF_CLK IMX8QM_MCLK_IN0 2 +#define IMX8QM_MCLK_IN0_LSIO_GPIO3_IO00 IMX8QM_MCLK_IN0 3 +#define IMX8QM_MCLK_OUT0_AUD_ACM_MCLK_OUT0 IMX8QM_MCLK_OUT0 0 +#define IMX8QM_MCLK_OUT0_AUD_ESAI0_TX_HF_CLK IMX8QM_MCLK_OUT0 1 +#define IMX8QM_MCLK_OUT0_AUD_ESAI1_TX_HF_CLK IMX8QM_MCLK_OUT0 2 +#define IMX8QM_MCLK_OUT0_LSIO_GPIO3_IO01 IMX8QM_MCLK_OUT0 3 +#define IMX8QM_SPI0_SCK_DMA_SPI0_SCK IMX8QM_SPI0_SCK 0 +#define IMX8QM_SPI0_SCK_AUD_SAI0_RXC IMX8QM_SPI0_SCK 1 +#define IMX8QM_SPI0_SCK_LSIO_GPIO3_IO02 IMX8QM_SPI0_SCK 3 +#define IMX8QM_SPI0_SDO_DMA_SPI0_SDO IMX8QM_SPI0_SDO 0 +#define IMX8QM_SPI0_SDO_AUD_SAI0_TXD IMX8QM_SPI0_SDO 1 +#define IMX8QM_SPI0_SDO_LSIO_GPIO3_IO03 IMX8QM_SPI0_SDO 3 +#define IMX8QM_SPI0_SDI_DMA_SPI0_SDI IMX8QM_SPI0_SDI 0 +#define IMX8QM_SPI0_SDI_AUD_SAI0_RXD IMX8QM_SPI0_SDI 1 +#define IMX8QM_SPI0_SDI_LSIO_GPIO3_IO04 IMX8QM_SPI0_SDI 3 +#define IMX8QM_SPI0_CS0_DMA_SPI0_CS0 IMX8QM_SPI0_CS0 0 +#define IMX8QM_SPI0_CS0_AUD_SAI0_RXFS IMX8QM_SPI0_CS0 1 +#define IMX8QM_SPI0_CS0_LSIO_GPIO3_IO05 IMX8QM_SPI0_CS0 3 +#define IMX8QM_SPI0_CS1_DMA_SPI0_CS1 IMX8QM_SPI0_CS1 0 +#define IMX8QM_SPI0_CS1_AUD_SAI0_TXC IMX8QM_SPI0_CS1 1 +#define IMX8QM_SPI0_CS1_LSIO_GPIO3_IO06 IMX8QM_SPI0_CS1 3 +#define IMX8QM_SPI2_SCK_DMA_SPI2_SCK IMX8QM_SPI2_SCK 0 +#define IMX8QM_SPI2_SCK_LSIO_GPIO3_IO07 IMX8QM_SPI2_SCK 3 +#define IMX8QM_SPI2_SDO_DMA_SPI2_SDO IMX8QM_SPI2_SDO 0 +#define IMX8QM_SPI2_SDO_LSIO_GPIO3_IO08 IMX8QM_SPI2_SDO 3 +#define IMX8QM_SPI2_SDI_DMA_SPI2_SDI IMX8QM_SPI2_SDI 0 +#define IMX8QM_SPI2_SDI_LSIO_GPIO3_IO09 IMX8QM_SPI2_SDI 3 +#define IMX8QM_SPI2_CS0_DMA_SPI2_CS0 IMX8QM_SPI2_CS0 0 +#define IMX8QM_SPI2_CS0_LSIO_GPIO3_IO10 IMX8QM_SPI2_CS0 3 +#define IMX8QM_SPI2_CS1_DMA_SPI2_CS1 IMX8QM_SPI2_CS1 0 +#define IMX8QM_SPI2_CS1_AUD_SAI0_TXFS IMX8QM_SPI2_CS1 1 +#define IMX8QM_SPI2_CS1_LSIO_GPIO3_IO11 IMX8QM_SPI2_CS1 3 +#define IMX8QM_SAI1_RXC_AUD_SAI1_RXC IMX8QM_SAI1_RXC 0 +#define IMX8QM_SAI1_RXC_AUD_SAI0_TXD IMX8QM_SAI1_RXC 1 +#define IMX8QM_SAI1_RXC_LSIO_GPIO3_IO12 IMX8QM_SAI1_RXC 3 +#define IMX8QM_SAI1_RXD_AUD_SAI1_RXD IMX8QM_SAI1_RXD 0 +#define IMX8QM_SAI1_RXD_AUD_SAI0_TXFS IMX8QM_SAI1_RXD 1 +#define IMX8QM_SAI1_RXD_LSIO_GPIO3_IO13 IMX8QM_SAI1_RXD 3 +#define IMX8QM_SAI1_RXFS_AUD_SAI1_RXFS IMX8QM_SAI1_RXFS 0 +#define IMX8QM_SAI1_RXFS_AUD_SAI0_RXD IMX8QM_SAI1_RXFS 1 +#define IMX8QM_SAI1_RXFS_LSIO_GPIO3_IO14 IMX8QM_SAI1_RXFS 3 +#define IMX8QM_SAI1_TXC_AUD_SAI1_TXC IMX8QM_SAI1_TXC 0 +#define IMX8QM_SAI1_TXC_AUD_SAI0_TXC IMX8QM_SAI1_TXC 1 +#define IMX8QM_SAI1_TXC_LSIO_GPIO3_IO15 IMX8QM_SAI1_TXC 3 +#define IMX8QM_SAI1_TXD_AUD_SAI1_TXD IMX8QM_SAI1_TXD 0 +#define IMX8QM_SAI1_TXD_AUD_SAI1_RXC IMX8QM_SAI1_TXD 1 +#define IMX8QM_SAI1_TXD_LSIO_GPIO3_IO16 IMX8QM_SAI1_TXD 3 +#define IMX8QM_SAI1_TXFS_AUD_SAI1_TXFS IMX8QM_SAI1_TXFS 0 +#define IMX8QM_SAI1_TXFS_AUD_SAI1_RXFS IMX8QM_SAI1_TXFS 1 +#define IMX8QM_SAI1_TXFS_LSIO_GPIO3_IO17 IMX8QM_SAI1_TXFS 3 +#define IMX8QM_ADC_IN7_DMA_ADC1_IN3 IMX8QM_ADC_IN7 0 +#define IMX8QM_ADC_IN7_DMA_SPI1_CS1 IMX8QM_ADC_IN7 1 +#define IMX8QM_ADC_IN7_LSIO_KPP0_ROW3 IMX8QM_ADC_IN7 2 +#define IMX8QM_ADC_IN7_LSIO_GPIO3_IO25 IMX8QM_ADC_IN7 3 +#define IMX8QM_ADC_IN6_DMA_ADC1_IN2 IMX8QM_ADC_IN6 0 +#define IMX8QM_ADC_IN6_DMA_SPI1_CS0 IMX8QM_ADC_IN6 1 +#define IMX8QM_ADC_IN6_LSIO_KPP0_ROW2 IMX8QM_ADC_IN6 2 +#define IMX8QM_ADC_IN6_LSIO_GPIO3_IO24 IMX8QM_ADC_IN6 3 +#define IMX8QM_ADC_IN5_DMA_ADC1_IN1 IMX8QM_ADC_IN5 0 +#define IMX8QM_ADC_IN5_DMA_SPI1_SDI IMX8QM_ADC_IN5 1 +#define IMX8QM_ADC_IN5_LSIO_KPP0_ROW1 IMX8QM_ADC_IN5 2 +#define IMX8QM_ADC_IN5_LSIO_GPIO3_IO23 IMX8QM_ADC_IN5 3 +#define IMX8QM_ADC_IN4_DMA_ADC1_IN0 IMX8QM_ADC_IN4 0 +#define IMX8QM_ADC_IN4_DMA_SPI1_SDO IMX8QM_ADC_IN4 1 +#define IMX8QM_ADC_IN4_LSIO_KPP0_ROW0 IMX8QM_ADC_IN4 2 +#define IMX8QM_ADC_IN4_LSIO_GPIO3_IO22 IMX8QM_ADC_IN4 3 +#define IMX8QM_ADC_IN3_DMA_ADC0_IN3 IMX8QM_ADC_IN3 0 +#define IMX8QM_ADC_IN3_DMA_SPI1_SCK IMX8QM_ADC_IN3 1 +#define IMX8QM_ADC_IN3_LSIO_KPP0_COL3 IMX8QM_ADC_IN3 2 +#define IMX8QM_ADC_IN3_LSIO_GPIO3_IO21 IMX8QM_ADC_IN3 3 +#define IMX8QM_ADC_IN2_DMA_ADC0_IN2 IMX8QM_ADC_IN2 0 +#define IMX8QM_ADC_IN2_LSIO_KPP0_COL2 IMX8QM_ADC_IN2 2 +#define IMX8QM_ADC_IN2_LSIO_GPIO3_IO20 IMX8QM_ADC_IN2 3 +#define IMX8QM_ADC_IN1_DMA_ADC0_IN1 IMX8QM_ADC_IN1 0 +#define IMX8QM_ADC_IN1_LSIO_KPP0_COL1 IMX8QM_ADC_IN1 2 +#define IMX8QM_ADC_IN1_LSIO_GPIO3_IO19 IMX8QM_ADC_IN1 3 +#define IMX8QM_ADC_IN0_DMA_ADC0_IN0 IMX8QM_ADC_IN0 0 +#define IMX8QM_ADC_IN0_LSIO_KPP0_COL0 IMX8QM_ADC_IN0 2 +#define IMX8QM_ADC_IN0_LSIO_GPIO3_IO18 IMX8QM_ADC_IN0 3 +#define IMX8QM_MLB_SIG_CONN_MLB_SIG IMX8QM_MLB_SIG 0 +#define IMX8QM_MLB_SIG_AUD_SAI3_RXC IMX8QM_MLB_SIG 1 +#define IMX8QM_MLB_SIG_LSIO_GPIO3_IO26 IMX8QM_MLB_SIG 3 +#define IMX8QM_MLB_CLK_CONN_MLB_CLK IMX8QM_MLB_CLK 0 +#define IMX8QM_MLB_CLK_AUD_SAI3_RXFS IMX8QM_MLB_CLK 1 +#define IMX8QM_MLB_CLK_LSIO_GPIO3_IO27 IMX8QM_MLB_CLK 3 +#define IMX8QM_MLB_DATA_CONN_MLB_DATA IMX8QM_MLB_DATA 0 +#define IMX8QM_MLB_DATA_AUD_SAI3_RXD IMX8QM_MLB_DATA 1 +#define IMX8QM_MLB_DATA_LSIO_GPIO3_IO28 IMX8QM_MLB_DATA 3 +#define IMX8QM_FLEXCAN0_RX_DMA_FLEXCAN0_RX IMX8QM_FLEXCAN0_RX 0 +#define IMX8QM_FLEXCAN0_RX_LSIO_GPIO3_IO29 IMX8QM_FLEXCAN0_RX 3 +#define IMX8QM_FLEXCAN0_TX_DMA_FLEXCAN0_TX IMX8QM_FLEXCAN0_TX 0 +#define IMX8QM_FLEXCAN0_TX_LSIO_GPIO3_IO30 IMX8QM_FLEXCAN0_TX 3 +#define IMX8QM_FLEXCAN1_RX_DMA_FLEXCAN1_RX IMX8QM_FLEXCAN1_RX 0 +#define IMX8QM_FLEXCAN1_RX_LSIO_GPIO3_IO31 IMX8QM_FLEXCAN1_RX 3 +#define IMX8QM_FLEXCAN1_TX_DMA_FLEXCAN1_TX IMX8QM_FLEXCAN1_TX 0 +#define IMX8QM_FLEXCAN1_TX_LSIO_GPIO4_IO00 IMX8QM_FLEXCAN1_TX 3 +#define IMX8QM_FLEXCAN2_RX_DMA_FLEXCAN2_RX IMX8QM_FLEXCAN2_RX 0 +#define IMX8QM_FLEXCAN2_RX_LSIO_GPIO4_IO01 IMX8QM_FLEXCAN2_RX 3 +#define IMX8QM_FLEXCAN2_TX_DMA_FLEXCAN2_TX IMX8QM_FLEXCAN2_TX 0 +#define IMX8QM_FLEXCAN2_TX_LSIO_GPIO4_IO02 IMX8QM_FLEXCAN2_TX 3 +#define IMX8QM_USB_SS3_TC0_DMA_I2C1_SCL IMX8QM_USB_SS3_TC0 0 +#define IMX8QM_USB_SS3_TC0_CONN_USB_OTG1_PWR IMX8QM_USB_SS3_TC0 1 +#define IMX8QM_USB_SS3_TC0_LSIO_GPIO4_IO03 IMX8QM_USB_SS3_TC0 3 +#define IMX8QM_USB_SS3_TC1_DMA_I2C1_SCL IMX8QM_USB_SS3_TC1 0 +#define IMX8QM_USB_SS3_TC1_CONN_USB_OTG2_PWR IMX8QM_USB_SS3_TC1 1 +#define IMX8QM_USB_SS3_TC1_LSIO_GPIO4_IO04 IMX8QM_USB_SS3_TC1 3 +#define IMX8QM_USB_SS3_TC2_DMA_I2C1_SDA IMX8QM_USB_SS3_TC2 0 +#define IMX8QM_USB_SS3_TC2_CONN_USB_OTG1_OC IMX8QM_USB_SS3_TC2 1 +#define IMX8QM_USB_SS3_TC2_LSIO_GPIO4_IO05 IMX8QM_USB_SS3_TC2 3 +#define IMX8QM_USB_SS3_TC3_DMA_I2C1_SDA IMX8QM_USB_SS3_TC3 0 +#define IMX8QM_USB_SS3_TC3_CONN_USB_OTG2_OC IMX8QM_USB_SS3_TC3 1 +#define IMX8QM_USB_SS3_TC3_LSIO_GPIO4_IO06 IMX8QM_USB_SS3_TC3 3 +#define IMX8QM_USDHC1_RESET_B_CONN_USDHC1_RESET_B IMX8QM_USDHC1_RESET_B 0 +#define IMX8QM_USDHC1_RESET_B_LSIO_GPIO4_IO07 IMX8QM_USDHC1_RESET_B 3 +#define IMX8QM_USDHC1_VSELECT_CONN_USDHC1_VSELECT IMX8QM_USDHC1_VSELECT 0 +#define IMX8QM_USDHC1_VSELECT_LSIO_GPIO4_IO08 IMX8QM_USDHC1_VSELECT 3 +#define IMX8QM_USDHC2_RESET_B_CONN_USDHC2_RESET_B IMX8QM_USDHC2_RESET_B 0 +#define IMX8QM_USDHC2_RESET_B_LSIO_GPIO4_IO09 IMX8QM_USDHC2_RESET_B 3 +#define IMX8QM_USDHC2_VSELECT_CONN_USDHC2_VSELECT IMX8QM_USDHC2_VSELECT 0 +#define IMX8QM_USDHC2_VSELECT_LSIO_GPIO4_IO10 IMX8QM_USDHC2_VSELECT 3 +#define IMX8QM_USDHC2_WP_CONN_USDHC2_WP IMX8QM_USDHC2_WP 0 +#define IMX8QM_USDHC2_WP_LSIO_GPIO4_IO11 IMX8QM_USDHC2_WP 3 +#define IMX8QM_USDHC2_CD_B_CONN_USDHC2_CD_B IMX8QM_USDHC2_CD_B 0 +#define IMX8QM_USDHC2_CD_B_LSIO_GPIO4_IO12 IMX8QM_USDHC2_CD_B 3 +#define IMX8QM_ENET0_MDIO_CONN_ENET0_MDIO IMX8QM_ENET0_MDIO 0 +#define IMX8QM_ENET0_MDIO_DMA_I2C4_SDA IMX8QM_ENET0_MDIO 1 +#define IMX8QM_ENET0_MDIO_LSIO_GPIO4_IO13 IMX8QM_ENET0_MDIO 3 +#define IMX8QM_ENET0_MDC_CONN_ENET0_MDC IMX8QM_ENET0_MDC 0 +#define IMX8QM_ENET0_MDC_DMA_I2C4_SCL IMX8QM_ENET0_MDC 1 +#define IMX8QM_ENET0_MDC_LSIO_GPIO4_IO14 IMX8QM_ENET0_MDC 3 +#define IMX8QM_ENET0_REFCLK_125M_25M_CONN_ENET0_REFCLK_125M_25M IMX8QM_ENET0_REFCLK_125M_25M 0 +#define IMX8QM_ENET0_REFCLK_125M_25M_CONN_ENET0_PPS IMX8QM_ENET0_REFCLK_125M_25M 1 +#define IMX8QM_ENET0_REFCLK_125M_25M_LSIO_GPIO4_IO15 IMX8QM_ENET0_REFCLK_125M_25M 3 +#define IMX8QM_ENET1_REFCLK_125M_25M_CONN_ENET1_REFCLK_125M_25M IMX8QM_ENET1_REFCLK_125M_25M 0 +#define IMX8QM_ENET1_REFCLK_125M_25M_CONN_ENET1_PPS IMX8QM_ENET1_REFCLK_125M_25M 1 +#define IMX8QM_ENET1_REFCLK_125M_25M_LSIO_GPIO4_IO16 IMX8QM_ENET1_REFCLK_125M_25M 3 +#define IMX8QM_ENET1_MDIO_CONN_ENET1_MDIO IMX8QM_ENET1_MDIO 0 +#define IMX8QM_ENET1_MDIO_DMA_I2C4_SDA IMX8QM_ENET1_MDIO 1 +#define IMX8QM_ENET1_MDIO_LSIO_GPIO4_IO17 IMX8QM_ENET1_MDIO 3 +#define IMX8QM_ENET1_MDC_CONN_ENET1_MDC IMX8QM_ENET1_MDC 0 +#define IMX8QM_ENET1_MDC_DMA_I2C4_SCL IMX8QM_ENET1_MDC 1 +#define IMX8QM_ENET1_MDC_LSIO_GPIO4_IO18 IMX8QM_ENET1_MDC 3 +#define IMX8QM_QSPI1A_SS0_B_LSIO_QSPI1A_SS0_B IMX8QM_QSPI1A_SS0_B 0 +#define IMX8QM_QSPI1A_SS0_B_LSIO_GPIO4_IO19 IMX8QM_QSPI1A_SS0_B 3 +#define IMX8QM_QSPI1A_SS1_B_LSIO_QSPI1A_SS1_B IMX8QM_QSPI1A_SS1_B 0 +#define IMX8QM_QSPI1A_SS1_B_LSIO_QSPI1A_SCLK2 IMX8QM_QSPI1A_SS1_B 1 +#define IMX8QM_QSPI1A_SS1_B_LSIO_GPIO4_IO20 IMX8QM_QSPI1A_SS1_B 3 +#define IMX8QM_QSPI1A_SCLK_LSIO_QSPI1A_SCLK IMX8QM_QSPI1A_SCLK 0 +#define IMX8QM_QSPI1A_SCLK_LSIO_GPIO4_IO21 IMX8QM_QSPI1A_SCLK 3 +#define IMX8QM_QSPI1A_DQS_LSIO_QSPI1A_DQS IMX8QM_QSPI1A_DQS 0 +#define IMX8QM_QSPI1A_DQS_LSIO_GPIO4_IO22 IMX8QM_QSPI1A_DQS 3 +#define IMX8QM_QSPI1A_DATA3_LSIO_QSPI1A_DATA3 IMX8QM_QSPI1A_DATA3 0 +#define IMX8QM_QSPI1A_DATA3_DMA_I2C1_SDA IMX8QM_QSPI1A_DATA3 1 +#define IMX8QM_QSPI1A_DATA3_CONN_USB_OTG1_OC IMX8QM_QSPI1A_DATA3 2 +#define IMX8QM_QSPI1A_DATA3_LSIO_GPIO4_IO23 IMX8QM_QSPI1A_DATA3 3 +#define IMX8QM_QSPI1A_DATA2_LSIO_QSPI1A_DATA2 IMX8QM_QSPI1A_DATA2 0 +#define IMX8QM_QSPI1A_DATA2_DMA_I2C1_SCL IMX8QM_QSPI1A_DATA2 1 +#define IMX8QM_QSPI1A_DATA2_CONN_USB_OTG2_PWR IMX8QM_QSPI1A_DATA2 2 +#define IMX8QM_QSPI1A_DATA2_LSIO_GPIO4_IO24 IMX8QM_QSPI1A_DATA2 3 +#define IMX8QM_QSPI1A_DATA1_LSIO_QSPI1A_DATA1 IMX8QM_QSPI1A_DATA1 0 +#define IMX8QM_QSPI1A_DATA1_DMA_I2C1_SDA IMX8QM_QSPI1A_DATA1 1 +#define IMX8QM_QSPI1A_DATA1_CONN_USB_OTG2_OC IMX8QM_QSPI1A_DATA1 2 +#define IMX8QM_QSPI1A_DATA1_LSIO_GPIO4_IO25 IMX8QM_QSPI1A_DATA1 3 +#define IMX8QM_QSPI1A_DATA0_LSIO_QSPI1A_DATA0 IMX8QM_QSPI1A_DATA0 0 +#define IMX8QM_QSPI1A_DATA0_LSIO_GPIO4_IO26 IMX8QM_QSPI1A_DATA0 3 +#define IMX8QM_QSPI0A_DATA0_LSIO_QSPI0A_DATA0 IMX8QM_QSPI0A_DATA0 0 +#define IMX8QM_QSPI0A_DATA1_LSIO_QSPI0A_DATA1 IMX8QM_QSPI0A_DATA1 0 +#define IMX8QM_QSPI0A_DATA2_LSIO_QSPI0A_DATA2 IMX8QM_QSPI0A_DATA2 0 +#define IMX8QM_QSPI0A_DATA3_LSIO_QSPI0A_DATA3 IMX8QM_QSPI0A_DATA3 0 +#define IMX8QM_QSPI0A_DQS_LSIO_QSPI0A_DQS IMX8QM_QSPI0A_DQS 0 +#define IMX8QM_QSPI0A_SS0_B_LSIO_QSPI0A_SS0_B IMX8QM_QSPI0A_SS0_B 0 +#define IMX8QM_QSPI0A_SS1_B_LSIO_QSPI0A_SS1_B IMX8QM_QSPI0A_SS1_B 0 +#define IMX8QM_QSPI0A_SS1_B_LSIO_QSPI0A_SCLK2 IMX8QM_QSPI0A_SS1_B 1 +#define IMX8QM_QSPI0A_SCLK_LSIO_QSPI0A_SCLK IMX8QM_QSPI0A_SCLK 0 +#define IMX8QM_QSPI0B_SCLK_LSIO_QSPI0B_SCLK IMX8QM_QSPI0B_SCLK 0 +#define IMX8QM_QSPI0B_DATA0_LSIO_QSPI0B_DATA0 IMX8QM_QSPI0B_DATA0 0 +#define IMX8QM_QSPI0B_DATA1_LSIO_QSPI0B_DATA1 IMX8QM_QSPI0B_DATA1 0 +#define IMX8QM_QSPI0B_DATA2_LSIO_QSPI0B_DATA2 IMX8QM_QSPI0B_DATA2 0 +#define IMX8QM_QSPI0B_DATA3_LSIO_QSPI0B_DATA3 IMX8QM_QSPI0B_DATA3 0 +#define IMX8QM_QSPI0B_DQS_LSIO_QSPI0B_DQS IMX8QM_QSPI0B_DQS 0 +#define IMX8QM_QSPI0B_SS0_B_LSIO_QSPI0B_SS0_B IMX8QM_QSPI0B_SS0_B 0 +#define IMX8QM_QSPI0B_SS1_B_LSIO_QSPI0B_SS1_B IMX8QM_QSPI0B_SS1_B 0 +#define IMX8QM_QSPI0B_SS1_B_LSIO_QSPI0B_SCLK2 IMX8QM_QSPI0B_SS1_B 1 +#define IMX8QM_PCIE_CTRL0_CLKREQ_B_HSIO_PCIE0_CLKREQ_B IMX8QM_PCIE_CTRL0_CLKREQ_B 0 +#define IMX8QM_PCIE_CTRL0_CLKREQ_B_LSIO_GPIO4_IO27 IMX8QM_PCIE_CTRL0_CLKREQ_B 3 +#define IMX8QM_PCIE_CTRL0_WAKE_B_HSIO_PCIE0_WAKE_B IMX8QM_PCIE_CTRL0_WAKE_B 0 +#define IMX8QM_PCIE_CTRL0_WAKE_B_LSIO_GPIO4_IO28 IMX8QM_PCIE_CTRL0_WAKE_B 3 +#define IMX8QM_PCIE_CTRL0_PERST_B_HSIO_PCIE0_PERST_B IMX8QM_PCIE_CTRL0_PERST_B 0 +#define IMX8QM_PCIE_CTRL0_PERST_B_LSIO_GPIO4_IO29 IMX8QM_PCIE_CTRL0_PERST_B 3 +#define IMX8QM_PCIE_CTRL1_CLKREQ_B_HSIO_PCIE1_CLKREQ_B IMX8QM_PCIE_CTRL1_CLKREQ_B 0 +#define IMX8QM_PCIE_CTRL1_CLKREQ_B_DMA_I2C1_SDA IMX8QM_PCIE_CTRL1_CLKREQ_B 1 +#define IMX8QM_PCIE_CTRL1_CLKREQ_B_CONN_USB_OTG2_OC IMX8QM_PCIE_CTRL1_CLKREQ_B 2 +#define IMX8QM_PCIE_CTRL1_CLKREQ_B_LSIO_GPIO4_IO30 IMX8QM_PCIE_CTRL1_CLKREQ_B 3 +#define IMX8QM_PCIE_CTRL1_WAKE_B_HSIO_PCIE1_WAKE_B IMX8QM_PCIE_CTRL1_WAKE_B 0 +#define IMX8QM_PCIE_CTRL1_WAKE_B_DMA_I2C1_SCL IMX8QM_PCIE_CTRL1_WAKE_B 1 +#define IMX8QM_PCIE_CTRL1_WAKE_B_CONN_USB_OTG2_PWR IMX8QM_PCIE_CTRL1_WAKE_B 2 +#define IMX8QM_PCIE_CTRL1_WAKE_B_LSIO_GPIO4_IO31 IMX8QM_PCIE_CTRL1_WAKE_B 3 +#define IMX8QM_PCIE_CTRL1_PERST_B_HSIO_PCIE1_PERST_B IMX8QM_PCIE_CTRL1_PERST_B 0 +#define IMX8QM_PCIE_CTRL1_PERST_B_DMA_I2C1_SCL IMX8QM_PCIE_CTRL1_PERST_B 1 +#define IMX8QM_PCIE_CTRL1_PERST_B_CONN_USB_OTG1_PWR IMX8QM_PCIE_CTRL1_PERST_B 2 +#define IMX8QM_PCIE_CTRL1_PERST_B_LSIO_GPIO5_IO00 IMX8QM_PCIE_CTRL1_PERST_B 3 +#define IMX8QM_USB_HSIC0_DATA_CONN_USB_HSIC0_DATA IMX8QM_USB_HSIC0_DATA 0 +#define IMX8QM_USB_HSIC0_DATA_DMA_I2C1_SDA IMX8QM_USB_HSIC0_DATA 1 +#define IMX8QM_USB_HSIC0_DATA_LSIO_GPIO5_IO01 IMX8QM_USB_HSIC0_DATA 3 +#define IMX8QM_USB_HSIC0_STROBE_CONN_USB_HSIC0_STROBE IMX8QM_USB_HSIC0_STROBE 0 +#define IMX8QM_USB_HSIC0_STROBE_DMA_I2C1_SCL IMX8QM_USB_HSIC0_STROBE 1 +#define IMX8QM_USB_HSIC0_STROBE_LSIO_GPIO5_IO02 IMX8QM_USB_HSIC0_STROBE 3 +#define IMX8QM_EMMC0_CLK_CONN_EMMC0_CLK IMX8QM_EMMC0_CLK 0 +#define IMX8QM_EMMC0_CLK_CONN_NAND_READY_B IMX8QM_EMMC0_CLK 1 +#define IMX8QM_EMMC0_CMD_CONN_EMMC0_CMD IMX8QM_EMMC0_CMD 0 +#define IMX8QM_EMMC0_CMD_CONN_NAND_DQS IMX8QM_EMMC0_CMD 1 +#define IMX8QM_EMMC0_CMD_AUD_MQS_R IMX8QM_EMMC0_CMD 2 +#define IMX8QM_EMMC0_CMD_LSIO_GPIO5_IO03 IMX8QM_EMMC0_CMD 3 +#define IMX8QM_EMMC0_DATA0_CONN_EMMC0_DATA0 IMX8QM_EMMC0_DATA0 0 +#define IMX8QM_EMMC0_DATA0_CONN_NAND_DATA00 IMX8QM_EMMC0_DATA0 1 +#define IMX8QM_EMMC0_DATA0_LSIO_GPIO5_IO04 IMX8QM_EMMC0_DATA0 3 +#define IMX8QM_EMMC0_DATA1_CONN_EMMC0_DATA1 IMX8QM_EMMC0_DATA1 0 +#define IMX8QM_EMMC0_DATA1_CONN_NAND_DATA01 IMX8QM_EMMC0_DATA1 1 +#define IMX8QM_EMMC0_DATA1_LSIO_GPIO5_IO05 IMX8QM_EMMC0_DATA1 3 +#define IMX8QM_EMMC0_DATA2_CONN_EMMC0_DATA2 IMX8QM_EMMC0_DATA2 0 +#define IMX8QM_EMMC0_DATA2_CONN_NAND_DATA02 IMX8QM_EMMC0_DATA2 1 +#define IMX8QM_EMMC0_DATA2_LSIO_GPIO5_IO06 IMX8QM_EMMC0_DATA2 3 +#define IMX8QM_EMMC0_DATA3_CONN_EMMC0_DATA3 IMX8QM_EMMC0_DATA3 0 +#define IMX8QM_EMMC0_DATA3_CONN_NAND_DATA03 IMX8QM_EMMC0_DATA3 1 +#define IMX8QM_EMMC0_DATA3_LSIO_GPIO5_IO07 IMX8QM_EMMC0_DATA3 3 +#define IMX8QM_EMMC0_DATA4_CONN_EMMC0_DATA4 IMX8QM_EMMC0_DATA4 0 +#define IMX8QM_EMMC0_DATA4_CONN_NAND_DATA04 IMX8QM_EMMC0_DATA4 1 +#define IMX8QM_EMMC0_DATA4_LSIO_GPIO5_IO08 IMX8QM_EMMC0_DATA4 3 +#define IMX8QM_EMMC0_DATA5_CONN_EMMC0_DATA5 IMX8QM_EMMC0_DATA5 0 +#define IMX8QM_EMMC0_DATA5_CONN_NAND_DATA05 IMX8QM_EMMC0_DATA5 1 +#define IMX8QM_EMMC0_DATA5_LSIO_GPIO5_IO09 IMX8QM_EMMC0_DATA5 3 +#define IMX8QM_EMMC0_DATA6_CONN_EMMC0_DATA6 IMX8QM_EMMC0_DATA6 0 +#define IMX8QM_EMMC0_DATA6_CONN_NAND_DATA06 IMX8QM_EMMC0_DATA6 1 +#define IMX8QM_EMMC0_DATA6_LSIO_GPIO5_IO10 IMX8QM_EMMC0_DATA6 3 +#define IMX8QM_EMMC0_DATA7_CONN_EMMC0_DATA7 IMX8QM_EMMC0_DATA7 0 +#define IMX8QM_EMMC0_DATA7_CONN_NAND_DATA07 IMX8QM_EMMC0_DATA7 1 +#define IMX8QM_EMMC0_DATA7_LSIO_GPIO5_IO11 IMX8QM_EMMC0_DATA7 3 +#define IMX8QM_EMMC0_STROBE_CONN_EMMC0_STROBE IMX8QM_EMMC0_STROBE 0 +#define IMX8QM_EMMC0_STROBE_CONN_NAND_CLE IMX8QM_EMMC0_STROBE 1 +#define IMX8QM_EMMC0_STROBE_LSIO_GPIO5_IO12 IMX8QM_EMMC0_STROBE 3 +#define IMX8QM_EMMC0_RESET_B_CONN_EMMC0_RESET_B IMX8QM_EMMC0_RESET_B 0 +#define IMX8QM_EMMC0_RESET_B_CONN_NAND_WP_B IMX8QM_EMMC0_RESET_B 1 +#define IMX8QM_EMMC0_RESET_B_CONN_USDHC1_VSELECT IMX8QM_EMMC0_RESET_B 2 +#define IMX8QM_EMMC0_RESET_B_LSIO_GPIO5_IO13 IMX8QM_EMMC0_RESET_B 3 +#define IMX8QM_USDHC1_CLK_CONN_USDHC1_CLK IMX8QM_USDHC1_CLK 0 +#define IMX8QM_USDHC1_CLK_AUD_MQS_R IMX8QM_USDHC1_CLK 1 +#define IMX8QM_USDHC1_CMD_CONN_USDHC1_CMD IMX8QM_USDHC1_CMD 0 +#define IMX8QM_USDHC1_CMD_AUD_MQS_L IMX8QM_USDHC1_CMD 1 +#define IMX8QM_USDHC1_CMD_LSIO_GPIO5_IO14 IMX8QM_USDHC1_CMD 3 +#define IMX8QM_USDHC1_DATA0_CONN_USDHC1_DATA0 IMX8QM_USDHC1_DATA0 0 +#define IMX8QM_USDHC1_DATA0_CONN_NAND_RE_N IMX8QM_USDHC1_DATA0 1 +#define IMX8QM_USDHC1_DATA0_LSIO_GPIO5_IO15 IMX8QM_USDHC1_DATA0 3 +#define IMX8QM_USDHC1_DATA1_CONN_USDHC1_DATA1 IMX8QM_USDHC1_DATA1 0 +#define IMX8QM_USDHC1_DATA1_CONN_NAND_RE_P IMX8QM_USDHC1_DATA1 1 +#define IMX8QM_USDHC1_DATA1_LSIO_GPIO5_IO16 IMX8QM_USDHC1_DATA1 3 +#define IMX8QM_USDHC1_DATA2_CONN_USDHC1_DATA2 IMX8QM_USDHC1_DATA2 0 +#define IMX8QM_USDHC1_DATA2_CONN_NAND_DQS_N IMX8QM_USDHC1_DATA2 1 +#define IMX8QM_USDHC1_DATA2_LSIO_GPIO5_IO17 IMX8QM_USDHC1_DATA2 3 +#define IMX8QM_USDHC1_DATA3_CONN_USDHC1_DATA3 IMX8QM_USDHC1_DATA3 0 +#define IMX8QM_USDHC1_DATA3_CONN_NAND_DQS_P IMX8QM_USDHC1_DATA3 1 +#define IMX8QM_USDHC1_DATA3_LSIO_GPIO5_IO18 IMX8QM_USDHC1_DATA3 3 +#define IMX8QM_USDHC1_DATA4_CONN_USDHC1_DATA4 IMX8QM_USDHC1_DATA4 0 +#define IMX8QM_USDHC1_DATA4_CONN_NAND_CE0_B IMX8QM_USDHC1_DATA4 1 +#define IMX8QM_USDHC1_DATA4_AUD_MQS_R IMX8QM_USDHC1_DATA4 2 +#define IMX8QM_USDHC1_DATA4_LSIO_GPIO5_IO19 IMX8QM_USDHC1_DATA4 3 +#define IMX8QM_USDHC1_DATA5_CONN_USDHC1_DATA5 IMX8QM_USDHC1_DATA5 0 +#define IMX8QM_USDHC1_DATA5_CONN_NAND_RE_B IMX8QM_USDHC1_DATA5 1 +#define IMX8QM_USDHC1_DATA5_AUD_MQS_L IMX8QM_USDHC1_DATA5 2 +#define IMX8QM_USDHC1_DATA5_LSIO_GPIO5_IO20 IMX8QM_USDHC1_DATA5 3 +#define IMX8QM_USDHC1_DATA6_CONN_USDHC1_DATA6 IMX8QM_USDHC1_DATA6 0 +#define IMX8QM_USDHC1_DATA6_CONN_NAND_WE_B IMX8QM_USDHC1_DATA6 1 +#define IMX8QM_USDHC1_DATA6_CONN_USDHC1_WP IMX8QM_USDHC1_DATA6 2 +#define IMX8QM_USDHC1_DATA6_LSIO_GPIO5_IO21 IMX8QM_USDHC1_DATA6 3 +#define IMX8QM_USDHC1_DATA7_CONN_USDHC1_DATA7 IMX8QM_USDHC1_DATA7 0 +#define IMX8QM_USDHC1_DATA7_CONN_NAND_ALE IMX8QM_USDHC1_DATA7 1 +#define IMX8QM_USDHC1_DATA7_CONN_USDHC1_CD_B IMX8QM_USDHC1_DATA7 2 +#define IMX8QM_USDHC1_DATA7_LSIO_GPIO5_IO22 IMX8QM_USDHC1_DATA7 3 +#define IMX8QM_USDHC1_STROBE_CONN_USDHC1_STROBE IMX8QM_USDHC1_STROBE 0 +#define IMX8QM_USDHC1_STROBE_CONN_NAND_CE1_B IMX8QM_USDHC1_STROBE 1 +#define IMX8QM_USDHC1_STROBE_CONN_USDHC1_RESET_B IMX8QM_USDHC1_STROBE 2 +#define IMX8QM_USDHC1_STROBE_LSIO_GPIO5_IO23 IMX8QM_USDHC1_STROBE 3 +#define IMX8QM_USDHC2_CLK_CONN_USDHC2_CLK IMX8QM_USDHC2_CLK 0 +#define IMX8QM_USDHC2_CLK_AUD_MQS_R IMX8QM_USDHC2_CLK 1 +#define IMX8QM_USDHC2_CLK_LSIO_GPIO5_IO24 IMX8QM_USDHC2_CLK 3 +#define IMX8QM_USDHC2_CMD_CONN_USDHC2_CMD IMX8QM_USDHC2_CMD 0 +#define IMX8QM_USDHC2_CMD_AUD_MQS_L IMX8QM_USDHC2_CMD 1 +#define IMX8QM_USDHC2_CMD_LSIO_GPIO5_IO25 IMX8QM_USDHC2_CMD 3 +#define IMX8QM_USDHC2_DATA0_CONN_USDHC2_DATA0 IMX8QM_USDHC2_DATA0 0 +#define IMX8QM_USDHC2_DATA0_DMA_UART4_RX IMX8QM_USDHC2_DATA0 1 +#define IMX8QM_USDHC2_DATA0_LSIO_GPIO5_IO26 IMX8QM_USDHC2_DATA0 3 +#define IMX8QM_USDHC2_DATA1_CONN_USDHC2_DATA1 IMX8QM_USDHC2_DATA1 0 +#define IMX8QM_USDHC2_DATA1_DMA_UART4_TX IMX8QM_USDHC2_DATA1 1 +#define IMX8QM_USDHC2_DATA1_LSIO_GPIO5_IO27 IMX8QM_USDHC2_DATA1 3 +#define IMX8QM_USDHC2_DATA2_CONN_USDHC2_DATA2 IMX8QM_USDHC2_DATA2 0 +#define IMX8QM_USDHC2_DATA2_DMA_UART4_CTS_B IMX8QM_USDHC2_DATA2 1 +#define IMX8QM_USDHC2_DATA2_LSIO_GPIO5_IO28 IMX8QM_USDHC2_DATA2 3 +#define IMX8QM_USDHC2_DATA3_CONN_USDHC2_DATA3 IMX8QM_USDHC2_DATA3 0 +#define IMX8QM_USDHC2_DATA3_DMA_UART4_RTS_B IMX8QM_USDHC2_DATA3 1 +#define IMX8QM_USDHC2_DATA3_LSIO_GPIO5_IO29 IMX8QM_USDHC2_DATA3 3 +#define IMX8QM_ENET0_RGMII_TXC_CONN_ENET0_RGMII_TXC IMX8QM_ENET0_RGMII_TXC 0 +#define IMX8QM_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_OUT IMX8QM_ENET0_RGMII_TXC 1 +#define IMX8QM_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_IN IMX8QM_ENET0_RGMII_TXC 2 +#define IMX8QM_ENET0_RGMII_TXC_LSIO_GPIO5_IO30 IMX8QM_ENET0_RGMII_TXC 3 +#define IMX8QM_ENET0_RGMII_TX_CTL_CONN_ENET0_RGMII_TX_CTL IMX8QM_ENET0_RGMII_TX_CTL 0 +#define IMX8QM_ENET0_RGMII_TX_CTL_LSIO_GPIO5_IO31 IMX8QM_ENET0_RGMII_TX_CTL 3 +#define IMX8QM_ENET0_RGMII_TXD0_CONN_ENET0_RGMII_TXD0 IMX8QM_ENET0_RGMII_TXD0 0 +#define IMX8QM_ENET0_RGMII_TXD0_LSIO_GPIO6_IO00 IMX8QM_ENET0_RGMII_TXD0 3 +#define IMX8QM_ENET0_RGMII_TXD1_CONN_ENET0_RGMII_TXD1 IMX8QM_ENET0_RGMII_TXD1 0 +#define IMX8QM_ENET0_RGMII_TXD1_LSIO_GPIO6_IO01 IMX8QM_ENET0_RGMII_TXD1 3 +#define IMX8QM_ENET0_RGMII_TXD2_CONN_ENET0_RGMII_TXD2 IMX8QM_ENET0_RGMII_TXD2 0 +#define IMX8QM_ENET0_RGMII_TXD2_DMA_UART3_TX IMX8QM_ENET0_RGMII_TXD2 1 +#define IMX8QM_ENET0_RGMII_TXD2_VPU_TSI_S1_VID IMX8QM_ENET0_RGMII_TXD2 2 +#define IMX8QM_ENET0_RGMII_TXD2_LSIO_GPIO6_IO02 IMX8QM_ENET0_RGMII_TXD2 3 +#define IMX8QM_ENET0_RGMII_TXD3_CONN_ENET0_RGMII_TXD3 IMX8QM_ENET0_RGMII_TXD3 0 +#define IMX8QM_ENET0_RGMII_TXD3_DMA_UART3_RTS_B IMX8QM_ENET0_RGMII_TXD3 1 +#define IMX8QM_ENET0_RGMII_TXD3_VPU_TSI_S1_SYNC IMX8QM_ENET0_RGMII_TXD3 2 +#define IMX8QM_ENET0_RGMII_TXD3_LSIO_GPIO6_IO03 IMX8QM_ENET0_RGMII_TXD3 3 +#define IMX8QM_ENET0_RGMII_RXC_CONN_ENET0_RGMII_RXC IMX8QM_ENET0_RGMII_RXC 0 +#define IMX8QM_ENET0_RGMII_RXC_DMA_UART3_CTS_B IMX8QM_ENET0_RGMII_RXC 1 +#define IMX8QM_ENET0_RGMII_RXC_VPU_TSI_S1_DATA IMX8QM_ENET0_RGMII_RXC 2 +#define IMX8QM_ENET0_RGMII_RXC_LSIO_GPIO6_IO04 IMX8QM_ENET0_RGMII_RXC 3 +#define IMX8QM_ENET0_RGMII_RX_CTL_CONN_ENET0_RGMII_RX_CTL IMX8QM_ENET0_RGMII_RX_CTL 0 +#define IMX8QM_ENET0_RGMII_RX_CTL_VPU_TSI_S0_VID IMX8QM_ENET0_RGMII_RX_CTL 2 +#define IMX8QM_ENET0_RGMII_RX_CTL_LSIO_GPIO6_IO05 IMX8QM_ENET0_RGMII_RX_CTL 3 +#define IMX8QM_ENET0_RGMII_RXD0_CONN_ENET0_RGMII_RXD0 IMX8QM_ENET0_RGMII_RXD0 0 +#define IMX8QM_ENET0_RGMII_RXD0_VPU_TSI_S0_SYNC IMX8QM_ENET0_RGMII_RXD0 2 +#define IMX8QM_ENET0_RGMII_RXD0_LSIO_GPIO6_IO06 IMX8QM_ENET0_RGMII_RXD0 3 +#define IMX8QM_ENET0_RGMII_RXD1_CONN_ENET0_RGMII_RXD1 IMX8QM_ENET0_RGMII_RXD1 0 +#define IMX8QM_ENET0_RGMII_RXD1_VPU_TSI_S0_DATA IMX8QM_ENET0_RGMII_RXD1 2 +#define IMX8QM_ENET0_RGMII_RXD1_LSIO_GPIO6_IO07 IMX8QM_ENET0_RGMII_RXD1 3 +#define IMX8QM_ENET0_RGMII_RXD2_CONN_ENET0_RGMII_RXD2 IMX8QM_ENET0_RGMII_RXD2 0 +#define IMX8QM_ENET0_RGMII_RXD2_CONN_ENET0_RMII_RX_ER IMX8QM_ENET0_RGMII_RXD2 1 +#define IMX8QM_ENET0_RGMII_RXD2_VPU_TSI_S0_CLK IMX8QM_ENET0_RGMII_RXD2 2 +#define IMX8QM_ENET0_RGMII_RXD2_LSIO_GPIO6_IO08 IMX8QM_ENET0_RGMII_RXD2 3 +#define IMX8QM_ENET0_RGMII_RXD3_CONN_ENET0_RGMII_RXD3 IMX8QM_ENET0_RGMII_RXD3 0 +#define IMX8QM_ENET0_RGMII_RXD3_DMA_UART3_RX IMX8QM_ENET0_RGMII_RXD3 1 +#define IMX8QM_ENET0_RGMII_RXD3_VPU_TSI_S1_CLK IMX8QM_ENET0_RGMII_RXD3 2 +#define IMX8QM_ENET0_RGMII_RXD3_LSIO_GPIO6_IO09 IMX8QM_ENET0_RGMII_RXD3 3 +#define IMX8QM_ENET1_RGMII_TXC_CONN_ENET1_RGMII_TXC IMX8QM_ENET1_RGMII_TXC 0 +#define IMX8QM_ENET1_RGMII_TXC_CONN_ENET1_RCLK50M_OUT IMX8QM_ENET1_RGMII_TXC 1 +#define IMX8QM_ENET1_RGMII_TXC_CONN_ENET1_RCLK50M_IN IMX8QM_ENET1_RGMII_TXC 2 +#define IMX8QM_ENET1_RGMII_TXC_LSIO_GPIO6_IO10 IMX8QM_ENET1_RGMII_TXC 3 +#define IMX8QM_ENET1_RGMII_TX_CTL_CONN_ENET1_RGMII_TX_CTL IMX8QM_ENET1_RGMII_TX_CTL 0 +#define IMX8QM_ENET1_RGMII_TX_CTL_LSIO_GPIO6_IO11 IMX8QM_ENET1_RGMII_TX_CTL 3 +#define IMX8QM_ENET1_RGMII_TXD0_CONN_ENET1_RGMII_TXD0 IMX8QM_ENET1_RGMII_TXD0 0 +#define IMX8QM_ENET1_RGMII_TXD0_LSIO_GPIO6_IO12 IMX8QM_ENET1_RGMII_TXD0 3 +#define IMX8QM_ENET1_RGMII_TXD1_CONN_ENET1_RGMII_TXD1 IMX8QM_ENET1_RGMII_TXD1 0 +#define IMX8QM_ENET1_RGMII_TXD1_LSIO_GPIO6_IO13 IMX8QM_ENET1_RGMII_TXD1 3 +#define IMX8QM_ENET1_RGMII_TXD2_CONN_ENET1_RGMII_TXD2 IMX8QM_ENET1_RGMII_TXD2 0 +#define IMX8QM_ENET1_RGMII_TXD2_DMA_UART3_TX IMX8QM_ENET1_RGMII_TXD2 1 +#define IMX8QM_ENET1_RGMII_TXD2_VPU_TSI_S1_VID IMX8QM_ENET1_RGMII_TXD2 2 +#define IMX8QM_ENET1_RGMII_TXD2_LSIO_GPIO6_IO14 IMX8QM_ENET1_RGMII_TXD2 3 +#define IMX8QM_ENET1_RGMII_TXD3_CONN_ENET1_RGMII_TXD3 IMX8QM_ENET1_RGMII_TXD3 0 +#define IMX8QM_ENET1_RGMII_TXD3_DMA_UART3_RTS_B IMX8QM_ENET1_RGMII_TXD3 1 +#define IMX8QM_ENET1_RGMII_TXD3_VPU_TSI_S1_SYNC IMX8QM_ENET1_RGMII_TXD3 2 +#define IMX8QM_ENET1_RGMII_TXD3_LSIO_GPIO6_IO15 IMX8QM_ENET1_RGMII_TXD3 3 +#define IMX8QM_ENET1_RGMII_RXC_CONN_ENET1_RGMII_RXC IMX8QM_ENET1_RGMII_RXC 0 +#define IMX8QM_ENET1_RGMII_RXC_DMA_UART3_CTS_B IMX8QM_ENET1_RGMII_RXC 1 +#define IMX8QM_ENET1_RGMII_RXC_VPU_TSI_S1_DATA IMX8QM_ENET1_RGMII_RXC 2 +#define IMX8QM_ENET1_RGMII_RXC_LSIO_GPIO6_IO16 IMX8QM_ENET1_RGMII_RXC 3 +#define IMX8QM_ENET1_RGMII_RX_CTL_CONN_ENET1_RGMII_RX_CTL IMX8QM_ENET1_RGMII_RX_CTL 0 +#define IMX8QM_ENET1_RGMII_RX_CTL_VPU_TSI_S0_VID IMX8QM_ENET1_RGMII_RX_CTL 2 +#define IMX8QM_ENET1_RGMII_RX_CTL_LSIO_GPIO6_IO17 IMX8QM_ENET1_RGMII_RX_CTL 3 +#define IMX8QM_ENET1_RGMII_RXD0_CONN_ENET1_RGMII_RXD0 IMX8QM_ENET1_RGMII_RXD0 0 +#define IMX8QM_ENET1_RGMII_RXD0_VPU_TSI_S0_SYNC IMX8QM_ENET1_RGMII_RXD0 2 +#define IMX8QM_ENET1_RGMII_RXD0_LSIO_GPIO6_IO18 IMX8QM_ENET1_RGMII_RXD0 3 +#define IMX8QM_ENET1_RGMII_RXD1_CONN_ENET1_RGMII_RXD1 IMX8QM_ENET1_RGMII_RXD1 0 +#define IMX8QM_ENET1_RGMII_RXD1_VPU_TSI_S0_DATA IMX8QM_ENET1_RGMII_RXD1 2 +#define IMX8QM_ENET1_RGMII_RXD1_LSIO_GPIO6_IO19 IMX8QM_ENET1_RGMII_RXD1 3 +#define IMX8QM_ENET1_RGMII_RXD2_CONN_ENET1_RGMII_RXD2 IMX8QM_ENET1_RGMII_RXD2 0 +#define IMX8QM_ENET1_RGMII_RXD2_CONN_ENET1_RMII_RX_ER IMX8QM_ENET1_RGMII_RXD2 1 +#define IMX8QM_ENET1_RGMII_RXD2_VPU_TSI_S0_CLK IMX8QM_ENET1_RGMII_RXD2 2 +#define IMX8QM_ENET1_RGMII_RXD2_LSIO_GPIO6_IO20 IMX8QM_ENET1_RGMII_RXD2 3 +#define IMX8QM_ENET1_RGMII_RXD3_CONN_ENET1_RGMII_RXD3 IMX8QM_ENET1_RGMII_RXD3 0 +#define IMX8QM_ENET1_RGMII_RXD3_DMA_UART3_RX IMX8QM_ENET1_RGMII_RXD3 1 +#define IMX8QM_ENET1_RGMII_RXD3_VPU_TSI_S1_CLK IMX8QM_ENET1_RGMII_RXD3 2 +#define IMX8QM_ENET1_RGMII_RXD3_LSIO_GPIO6_IO21 IMX8QM_ENET1_RGMII_RXD3 3 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB_PAD IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB 0 +#define IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETA_PAD IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETA 0 + +#endif /* _IMX8QM_PADS_H */ diff --git a/include/dt-bindings/pinctrl/pads-imx8qxp.h b/include/dt-bindings/pinctrl/pads-imx8qxp.h new file mode 100644 index 000000000000..fbfee7ecf844 --- /dev/null +++ b/include/dt-bindings/pinctrl/pads-imx8qxp.h @@ -0,0 +1,751 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017~2018 NXP + */ + +#ifndef _IMX8QXP_PADS_H +#define _IMX8QXP_PADS_H + +/* pin id */ +#define IMX8QXP_PCIE_CTRL0_PERST_B 0 +#define IMX8QXP_PCIE_CTRL0_CLKREQ_B 1 +#define IMX8QXP_PCIE_CTRL0_WAKE_B 2 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_PCIESEP 3 +#define IMX8QXP_USB_SS3_TC0 4 +#define IMX8QXP_USB_SS3_TC1 5 +#define IMX8QXP_USB_SS3_TC2 6 +#define IMX8QXP_USB_SS3_TC3 7 +#define IMX8QXP_COMP_CTL_GPIO_3V3_USB3IO 8 +#define IMX8QXP_EMMC0_CLK 9 +#define IMX8QXP_EMMC0_CMD 10 +#define IMX8QXP_EMMC0_DATA0 11 +#define IMX8QXP_EMMC0_DATA1 12 +#define IMX8QXP_EMMC0_DATA2 13 +#define IMX8QXP_EMMC0_DATA3 14 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_SD1FIX0 15 +#define IMX8QXP_EMMC0_DATA4 16 +#define IMX8QXP_EMMC0_DATA5 17 +#define IMX8QXP_EMMC0_DATA6 18 +#define IMX8QXP_EMMC0_DATA7 19 +#define IMX8QXP_EMMC0_STROBE 20 +#define IMX8QXP_EMMC0_RESET_B 21 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_SD1FIX1 22 +#define IMX8QXP_USDHC1_RESET_B 23 +#define IMX8QXP_USDHC1_VSELECT 24 +#define IMX8QXP_CTL_NAND_RE_P_N 25 +#define IMX8QXP_USDHC1_WP 26 +#define IMX8QXP_USDHC1_CD_B 27 +#define IMX8QXP_CTL_NAND_DQS_P_N 28 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_VSELSEP 29 +#define IMX8QXP_USDHC1_CLK 30 +#define IMX8QXP_USDHC1_CMD 31 +#define IMX8QXP_USDHC1_DATA0 32 +#define IMX8QXP_USDHC1_DATA1 33 +#define IMX8QXP_USDHC1_DATA2 34 +#define IMX8QXP_USDHC1_DATA3 35 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_VSEL3 36 +#define IMX8QXP_ENET0_RGMII_TXC 37 +#define IMX8QXP_ENET0_RGMII_TX_CTL 38 +#define IMX8QXP_ENET0_RGMII_TXD0 39 +#define IMX8QXP_ENET0_RGMII_TXD1 40 +#define IMX8QXP_ENET0_RGMII_TXD2 41 +#define IMX8QXP_ENET0_RGMII_TXD3 42 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB0 43 +#define IMX8QXP_ENET0_RGMII_RXC 44 +#define IMX8QXP_ENET0_RGMII_RX_CTL 45 +#define IMX8QXP_ENET0_RGMII_RXD0 46 +#define IMX8QXP_ENET0_RGMII_RXD1 47 +#define IMX8QXP_ENET0_RGMII_RXD2 48 +#define IMX8QXP_ENET0_RGMII_RXD3 49 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB1 50 +#define IMX8QXP_ENET0_REFCLK_125M_25M 51 +#define IMX8QXP_ENET0_MDIO 52 +#define IMX8QXP_ENET0_MDC 53 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIOCT 54 +#define IMX8QXP_ESAI0_FSR 55 +#define IMX8QXP_ESAI0_FST 56 +#define IMX8QXP_ESAI0_SCKR 57 +#define IMX8QXP_ESAI0_SCKT 58 +#define IMX8QXP_ESAI0_TX0 59 +#define IMX8QXP_ESAI0_TX1 60 +#define IMX8QXP_ESAI0_TX2_RX3 61 +#define IMX8QXP_ESAI0_TX3_RX2 62 +#define IMX8QXP_ESAI0_TX4_RX1 63 +#define IMX8QXP_ESAI0_TX5_RX0 64 +#define IMX8QXP_SPDIF0_RX 65 +#define IMX8QXP_SPDIF0_TX 66 +#define IMX8QXP_SPDIF0_EXT_CLK 67 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIORHB 68 +#define IMX8QXP_SPI3_SCK 69 +#define IMX8QXP_SPI3_SDO 70 +#define IMX8QXP_SPI3_SDI 71 +#define IMX8QXP_SPI3_CS0 72 +#define IMX8QXP_SPI3_CS1 73 +#define IMX8QXP_MCLK_IN1 74 +#define IMX8QXP_MCLK_IN0 75 +#define IMX8QXP_MCLK_OUT0 76 +#define IMX8QXP_UART1_TX 77 +#define IMX8QXP_UART1_RX 78 +#define IMX8QXP_UART1_RTS_B 79 +#define IMX8QXP_UART1_CTS_B 80 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIORHK 81 +#define IMX8QXP_SAI0_TXD 82 +#define IMX8QXP_SAI0_TXC 83 +#define IMX8QXP_SAI0_RXD 84 +#define IMX8QXP_SAI0_TXFS 85 +#define IMX8QXP_SAI1_RXD 86 +#define IMX8QXP_SAI1_RXC 87 +#define IMX8QXP_SAI1_RXFS 88 +#define IMX8QXP_SPI2_CS0 89 +#define IMX8QXP_SPI2_SDO 90 +#define IMX8QXP_SPI2_SDI 91 +#define IMX8QXP_SPI2_SCK 92 +#define IMX8QXP_SPI0_SCK 93 +#define IMX8QXP_SPI0_SDI 94 +#define IMX8QXP_SPI0_SDO 95 +#define IMX8QXP_SPI0_CS1 96 +#define IMX8QXP_SPI0_CS0 97 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIORHT 98 +#define IMX8QXP_ADC_IN1 99 +#define IMX8QXP_ADC_IN0 100 +#define IMX8QXP_ADC_IN3 101 +#define IMX8QXP_ADC_IN2 102 +#define IMX8QXP_ADC_IN5 103 +#define IMX8QXP_ADC_IN4 104 +#define IMX8QXP_FLEXCAN0_RX 105 +#define IMX8QXP_FLEXCAN0_TX 106 +#define IMX8QXP_FLEXCAN1_RX 107 +#define IMX8QXP_FLEXCAN1_TX 108 +#define IMX8QXP_FLEXCAN2_RX 109 +#define IMX8QXP_FLEXCAN2_TX 110 +#define IMX8QXP_UART0_RX 111 +#define IMX8QXP_UART0_TX 112 +#define IMX8QXP_UART2_TX 113 +#define IMX8QXP_UART2_RX 114 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIOLH 115 +#define IMX8QXP_MIPI_DSI0_I2C0_SCL 116 +#define IMX8QXP_MIPI_DSI0_I2C0_SDA 117 +#define IMX8QXP_MIPI_DSI0_GPIO0_00 118 +#define IMX8QXP_MIPI_DSI0_GPIO0_01 119 +#define IMX8QXP_MIPI_DSI1_I2C0_SCL 120 +#define IMX8QXP_MIPI_DSI1_I2C0_SDA 121 +#define IMX8QXP_MIPI_DSI1_GPIO0_00 122 +#define IMX8QXP_MIPI_DSI1_GPIO0_01 123 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_MIPIDSIGPIO 124 +#define IMX8QXP_JTAG_TRST_B 125 +#define IMX8QXP_PMIC_I2C_SCL 126 +#define IMX8QXP_PMIC_I2C_SDA 127 +#define IMX8QXP_PMIC_INT_B 128 +#define IMX8QXP_SCU_GPIO0_00 129 +#define IMX8QXP_SCU_GPIO0_01 130 +#define IMX8QXP_SCU_PMIC_STANDBY 131 +#define IMX8QXP_SCU_BOOT_MODE0 132 +#define IMX8QXP_SCU_BOOT_MODE1 133 +#define IMX8QXP_SCU_BOOT_MODE2 134 +#define IMX8QXP_SCU_BOOT_MODE3 135 +#define IMX8QXP_CSI_D00 136 +#define IMX8QXP_CSI_D01 137 +#define IMX8QXP_CSI_D02 138 +#define IMX8QXP_CSI_D03 139 +#define IMX8QXP_CSI_D04 140 +#define IMX8QXP_CSI_D05 141 +#define IMX8QXP_CSI_D06 142 +#define IMX8QXP_CSI_D07 143 +#define IMX8QXP_CSI_HSYNC 144 +#define IMX8QXP_CSI_VSYNC 145 +#define IMX8QXP_CSI_PCLK 146 +#define IMX8QXP_CSI_MCLK 147 +#define IMX8QXP_CSI_EN 148 +#define IMX8QXP_CSI_RESET 149 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_GPIORHD 150 +#define IMX8QXP_MIPI_CSI0_MCLK_OUT 151 +#define IMX8QXP_MIPI_CSI0_I2C0_SCL 152 +#define IMX8QXP_MIPI_CSI0_I2C0_SDA 153 +#define IMX8QXP_MIPI_CSI0_GPIO0_01 154 +#define IMX8QXP_MIPI_CSI0_GPIO0_00 155 +#define IMX8QXP_QSPI0A_DATA0 156 +#define IMX8QXP_QSPI0A_DATA1 157 +#define IMX8QXP_QSPI0A_DATA2 158 +#define IMX8QXP_QSPI0A_DATA3 159 +#define IMX8QXP_QSPI0A_DQS 160 +#define IMX8QXP_QSPI0A_SS0_B 161 +#define IMX8QXP_QSPI0A_SS1_B 162 +#define IMX8QXP_QSPI0A_SCLK 163 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_QSPI0A 164 +#define IMX8QXP_QSPI0B_SCLK 165 +#define IMX8QXP_QSPI0B_DATA0 166 +#define IMX8QXP_QSPI0B_DATA1 167 +#define IMX8QXP_QSPI0B_DATA2 168 +#define IMX8QXP_QSPI0B_DATA3 169 +#define IMX8QXP_QSPI0B_DQS 170 +#define IMX8QXP_QSPI0B_SS0_B 171 +#define IMX8QXP_QSPI0B_SS1_B 172 +#define IMX8QXP_COMP_CTL_GPIO_1V8_3V3_QSPI0B 173 + +/* + * format: <pin_id mux_mode> + */ +#define IMX8QXP_PCIE_CTRL0_PERST_B_HSIO_PCIE0_PERST_B IMX8QXP_PCIE_CTRL0_PERST_B 0 +#define IMX8QXP_PCIE_CTRL0_PERST_B_LSIO_GPIO4_IO00 IMX8QXP_PCIE_CTRL0_PERST_B 4 +#define IMX8QXP_PCIE_CTRL0_CLKREQ_B_HSIO_PCIE0_CLKREQ_B IMX8QXP_PCIE_CTRL0_CLKREQ_B 0 +#define IMX8QXP_PCIE_CTRL0_CLKREQ_B_LSIO_GPIO4_IO01 IMX8QXP_PCIE_CTRL0_CLKREQ_B 4 +#define IMX8QXP_PCIE_CTRL0_WAKE_B_HSIO_PCIE0_WAKE_B IMX8QXP_PCIE_CTRL0_WAKE_B 0 +#define IMX8QXP_PCIE_CTRL0_WAKE_B_LSIO_GPIO4_IO02 IMX8QXP_PCIE_CTRL0_WAKE_B 4 +#define IMX8QXP_USB_SS3_TC0_ADMA_I2C1_SCL IMX8QXP_USB_SS3_TC0 0 +#define IMX8QXP_USB_SS3_TC0_CONN_USB_OTG1_PWR IMX8QXP_USB_SS3_TC0 1 +#define IMX8QXP_USB_SS3_TC0_CONN_USB_OTG2_PWR IMX8QXP_USB_SS3_TC0 2 +#define IMX8QXP_USB_SS3_TC0_LSIO_GPIO4_IO03 IMX8QXP_USB_SS3_TC0 4 +#define IMX8QXP_USB_SS3_TC1_ADMA_I2C1_SCL IMX8QXP_USB_SS3_TC1 0 +#define IMX8QXP_USB_SS3_TC1_CONN_USB_OTG2_PWR IMX8QXP_USB_SS3_TC1 1 +#define IMX8QXP_USB_SS3_TC1_LSIO_GPIO4_IO04 IMX8QXP_USB_SS3_TC1 4 +#define IMX8QXP_USB_SS3_TC2_ADMA_I2C1_SDA IMX8QXP_USB_SS3_TC2 0 +#define IMX8QXP_USB_SS3_TC2_CONN_USB_OTG1_OC IMX8QXP_USB_SS3_TC2 1 +#define IMX8QXP_USB_SS3_TC2_CONN_USB_OTG2_OC IMX8QXP_USB_SS3_TC2 2 +#define IMX8QXP_USB_SS3_TC2_LSIO_GPIO4_IO05 IMX8QXP_USB_SS3_TC2 4 +#define IMX8QXP_USB_SS3_TC3_ADMA_I2C1_SDA IMX8QXP_USB_SS3_TC3 0 +#define IMX8QXP_USB_SS3_TC3_CONN_USB_OTG2_OC IMX8QXP_USB_SS3_TC3 1 +#define IMX8QXP_USB_SS3_TC3_LSIO_GPIO4_IO06 IMX8QXP_USB_SS3_TC3 4 +#define IMX8QXP_EMMC0_CLK_CONN_EMMC0_CLK IMX8QXP_EMMC0_CLK 0 +#define IMX8QXP_EMMC0_CLK_CONN_NAND_READY_B IMX8QXP_EMMC0_CLK 1 +#define IMX8QXP_EMMC0_CLK_LSIO_GPIO4_IO07 IMX8QXP_EMMC0_CLK 4 +#define IMX8QXP_EMMC0_CMD_CONN_EMMC0_CMD IMX8QXP_EMMC0_CMD 0 +#define IMX8QXP_EMMC0_CMD_CONN_NAND_DQS IMX8QXP_EMMC0_CMD 1 +#define IMX8QXP_EMMC0_CMD_LSIO_GPIO4_IO08 IMX8QXP_EMMC0_CMD 4 +#define IMX8QXP_EMMC0_DATA0_CONN_EMMC0_DATA0 IMX8QXP_EMMC0_DATA0 0 +#define IMX8QXP_EMMC0_DATA0_CONN_NAND_DATA00 IMX8QXP_EMMC0_DATA0 1 +#define IMX8QXP_EMMC0_DATA0_LSIO_GPIO4_IO09 IMX8QXP_EMMC0_DATA0 4 +#define IMX8QXP_EMMC0_DATA1_CONN_EMMC0_DATA1 IMX8QXP_EMMC0_DATA1 0 +#define IMX8QXP_EMMC0_DATA1_CONN_NAND_DATA01 IMX8QXP_EMMC0_DATA1 1 +#define IMX8QXP_EMMC0_DATA1_LSIO_GPIO4_IO10 IMX8QXP_EMMC0_DATA1 4 +#define IMX8QXP_EMMC0_DATA2_CONN_EMMC0_DATA2 IMX8QXP_EMMC0_DATA2 0 +#define IMX8QXP_EMMC0_DATA2_CONN_NAND_DATA02 IMX8QXP_EMMC0_DATA2 1 +#define IMX8QXP_EMMC0_DATA2_LSIO_GPIO4_IO11 IMX8QXP_EMMC0_DATA2 4 +#define IMX8QXP_EMMC0_DATA3_CONN_EMMC0_DATA3 IMX8QXP_EMMC0_DATA3 0 +#define IMX8QXP_EMMC0_DATA3_CONN_NAND_DATA03 IMX8QXP_EMMC0_DATA3 1 +#define IMX8QXP_EMMC0_DATA3_LSIO_GPIO4_IO12 IMX8QXP_EMMC0_DATA3 4 +#define IMX8QXP_EMMC0_DATA4_CONN_EMMC0_DATA4 IMX8QXP_EMMC0_DATA4 0 +#define IMX8QXP_EMMC0_DATA4_CONN_NAND_DATA04 IMX8QXP_EMMC0_DATA4 1 +#define IMX8QXP_EMMC0_DATA4_CONN_EMMC0_WP IMX8QXP_EMMC0_DATA4 3 +#define IMX8QXP_EMMC0_DATA4_LSIO_GPIO4_IO13 IMX8QXP_EMMC0_DATA4 4 +#define IMX8QXP_EMMC0_DATA5_CONN_EMMC0_DATA5 IMX8QXP_EMMC0_DATA5 0 +#define IMX8QXP_EMMC0_DATA5_CONN_NAND_DATA05 IMX8QXP_EMMC0_DATA5 1 +#define IMX8QXP_EMMC0_DATA5_CONN_EMMC0_VSELECT IMX8QXP_EMMC0_DATA5 3 +#define IMX8QXP_EMMC0_DATA5_LSIO_GPIO4_IO14 IMX8QXP_EMMC0_DATA5 4 +#define IMX8QXP_EMMC0_DATA6_CONN_EMMC0_DATA6 IMX8QXP_EMMC0_DATA6 0 +#define IMX8QXP_EMMC0_DATA6_CONN_NAND_DATA06 IMX8QXP_EMMC0_DATA6 1 +#define IMX8QXP_EMMC0_DATA6_CONN_MLB_CLK IMX8QXP_EMMC0_DATA6 3 +#define IMX8QXP_EMMC0_DATA6_LSIO_GPIO4_IO15 IMX8QXP_EMMC0_DATA6 4 +#define IMX8QXP_EMMC0_DATA7_CONN_EMMC0_DATA7 IMX8QXP_EMMC0_DATA7 0 +#define IMX8QXP_EMMC0_DATA7_CONN_NAND_DATA07 IMX8QXP_EMMC0_DATA7 1 +#define IMX8QXP_EMMC0_DATA7_CONN_MLB_SIG IMX8QXP_EMMC0_DATA7 3 +#define IMX8QXP_EMMC0_DATA7_LSIO_GPIO4_IO16 IMX8QXP_EMMC0_DATA7 4 +#define IMX8QXP_EMMC0_STROBE_CONN_EMMC0_STROBE IMX8QXP_EMMC0_STROBE 0 +#define IMX8QXP_EMMC0_STROBE_CONN_NAND_CLE IMX8QXP_EMMC0_STROBE 1 +#define IMX8QXP_EMMC0_STROBE_CONN_MLB_DATA IMX8QXP_EMMC0_STROBE 3 +#define IMX8QXP_EMMC0_STROBE_LSIO_GPIO4_IO17 IMX8QXP_EMMC0_STROBE 4 +#define IMX8QXP_EMMC0_RESET_B_CONN_EMMC0_RESET_B IMX8QXP_EMMC0_RESET_B 0 +#define IMX8QXP_EMMC0_RESET_B_CONN_NAND_WP_B IMX8QXP_EMMC0_RESET_B 1 +#define IMX8QXP_EMMC0_RESET_B_LSIO_GPIO4_IO18 IMX8QXP_EMMC0_RESET_B 4 +#define IMX8QXP_USDHC1_RESET_B_CONN_USDHC1_RESET_B IMX8QXP_USDHC1_RESET_B 0 +#define IMX8QXP_USDHC1_RESET_B_CONN_NAND_RE_N IMX8QXP_USDHC1_RESET_B 1 +#define IMX8QXP_USDHC1_RESET_B_ADMA_SPI2_SCK IMX8QXP_USDHC1_RESET_B 2 +#define IMX8QXP_USDHC1_RESET_B_LSIO_GPIO4_IO19 IMX8QXP_USDHC1_RESET_B 4 +#define IMX8QXP_USDHC1_VSELECT_CONN_USDHC1_VSELECT IMX8QXP_USDHC1_VSELECT 0 +#define IMX8QXP_USDHC1_VSELECT_CONN_NAND_RE_P IMX8QXP_USDHC1_VSELECT 1 +#define IMX8QXP_USDHC1_VSELECT_ADMA_SPI2_SDO IMX8QXP_USDHC1_VSELECT 2 +#define IMX8QXP_USDHC1_VSELECT_CONN_NAND_RE_B IMX8QXP_USDHC1_VSELECT 3 +#define IMX8QXP_USDHC1_VSELECT_LSIO_GPIO4_IO20 IMX8QXP_USDHC1_VSELECT 4 +#define IMX8QXP_USDHC1_WP_CONN_USDHC1_WP IMX8QXP_USDHC1_WP 0 +#define IMX8QXP_USDHC1_WP_CONN_NAND_DQS_N IMX8QXP_USDHC1_WP 1 +#define IMX8QXP_USDHC1_WP_ADMA_SPI2_SDI IMX8QXP_USDHC1_WP 2 +#define IMX8QXP_USDHC1_WP_LSIO_GPIO4_IO21 IMX8QXP_USDHC1_WP 4 +#define IMX8QXP_USDHC1_CD_B_CONN_USDHC1_CD_B IMX8QXP_USDHC1_CD_B 0 +#define IMX8QXP_USDHC1_CD_B_CONN_NAND_DQS_P IMX8QXP_USDHC1_CD_B 1 +#define IMX8QXP_USDHC1_CD_B_ADMA_SPI2_CS0 IMX8QXP_USDHC1_CD_B 2 +#define IMX8QXP_USDHC1_CD_B_CONN_NAND_DQS IMX8QXP_USDHC1_CD_B 3 +#define IMX8QXP_USDHC1_CD_B_LSIO_GPIO4_IO22 IMX8QXP_USDHC1_CD_B 4 +#define IMX8QXP_USDHC1_CLK_CONN_USDHC1_CLK IMX8QXP_USDHC1_CLK 0 +#define IMX8QXP_USDHC1_CLK_ADMA_UART3_RX IMX8QXP_USDHC1_CLK 2 +#define IMX8QXP_USDHC1_CLK_LSIO_GPIO4_IO23 IMX8QXP_USDHC1_CLK 4 +#define IMX8QXP_USDHC1_CMD_CONN_USDHC1_CMD IMX8QXP_USDHC1_CMD 0 +#define IMX8QXP_USDHC1_CMD_CONN_NAND_CE0_B IMX8QXP_USDHC1_CMD 1 +#define IMX8QXP_USDHC1_CMD_ADMA_MQS_R IMX8QXP_USDHC1_CMD 2 +#define IMX8QXP_USDHC1_CMD_LSIO_GPIO4_IO24 IMX8QXP_USDHC1_CMD 4 +#define IMX8QXP_USDHC1_DATA0_CONN_USDHC1_DATA0 IMX8QXP_USDHC1_DATA0 0 +#define IMX8QXP_USDHC1_DATA0_CONN_NAND_CE1_B IMX8QXP_USDHC1_DATA0 1 +#define IMX8QXP_USDHC1_DATA0_ADMA_MQS_L IMX8QXP_USDHC1_DATA0 2 +#define IMX8QXP_USDHC1_DATA0_LSIO_GPIO4_IO25 IMX8QXP_USDHC1_DATA0 4 +#define IMX8QXP_USDHC1_DATA1_CONN_USDHC1_DATA1 IMX8QXP_USDHC1_DATA1 0 +#define IMX8QXP_USDHC1_DATA1_CONN_NAND_RE_B IMX8QXP_USDHC1_DATA1 1 +#define IMX8QXP_USDHC1_DATA1_ADMA_UART3_TX IMX8QXP_USDHC1_DATA1 2 +#define IMX8QXP_USDHC1_DATA1_LSIO_GPIO4_IO26 IMX8QXP_USDHC1_DATA1 4 +#define IMX8QXP_USDHC1_DATA2_CONN_USDHC1_DATA2 IMX8QXP_USDHC1_DATA2 0 +#define IMX8QXP_USDHC1_DATA2_CONN_NAND_WE_B IMX8QXP_USDHC1_DATA2 1 +#define IMX8QXP_USDHC1_DATA2_ADMA_UART3_CTS_B IMX8QXP_USDHC1_DATA2 2 +#define IMX8QXP_USDHC1_DATA2_LSIO_GPIO4_IO27 IMX8QXP_USDHC1_DATA2 4 +#define IMX8QXP_USDHC1_DATA3_CONN_USDHC1_DATA3 IMX8QXP_USDHC1_DATA3 0 +#define IMX8QXP_USDHC1_DATA3_CONN_NAND_ALE IMX8QXP_USDHC1_DATA3 1 +#define IMX8QXP_USDHC1_DATA3_ADMA_UART3_RTS_B IMX8QXP_USDHC1_DATA3 2 +#define IMX8QXP_USDHC1_DATA3_LSIO_GPIO4_IO28 IMX8QXP_USDHC1_DATA3 4 +#define IMX8QXP_ENET0_RGMII_TXC_CONN_ENET0_RGMII_TXC IMX8QXP_ENET0_RGMII_TXC 0 +#define IMX8QXP_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_OUT IMX8QXP_ENET0_RGMII_TXC 1 +#define IMX8QXP_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_IN IMX8QXP_ENET0_RGMII_TXC 2 +#define IMX8QXP_ENET0_RGMII_TXC_CONN_NAND_CE1_B IMX8QXP_ENET0_RGMII_TXC 3 +#define IMX8QXP_ENET0_RGMII_TXC_LSIO_GPIO4_IO29 IMX8QXP_ENET0_RGMII_TXC 4 +#define IMX8QXP_ENET0_RGMII_TX_CTL_CONN_ENET0_RGMII_TX_CTL IMX8QXP_ENET0_RGMII_TX_CTL 0 +#define IMX8QXP_ENET0_RGMII_TX_CTL_CONN_USDHC1_RESET_B IMX8QXP_ENET0_RGMII_TX_CTL 3 +#define IMX8QXP_ENET0_RGMII_TX_CTL_LSIO_GPIO4_IO30 IMX8QXP_ENET0_RGMII_TX_CTL 4 +#define IMX8QXP_ENET0_RGMII_TXD0_CONN_ENET0_RGMII_TXD0 IMX8QXP_ENET0_RGMII_TXD0 0 +#define IMX8QXP_ENET0_RGMII_TXD0_CONN_USDHC1_VSELECT IMX8QXP_ENET0_RGMII_TXD0 3 +#define IMX8QXP_ENET0_RGMII_TXD0_LSIO_GPIO4_IO31 IMX8QXP_ENET0_RGMII_TXD0 4 +#define IMX8QXP_ENET0_RGMII_TXD1_CONN_ENET0_RGMII_TXD1 IMX8QXP_ENET0_RGMII_TXD1 0 +#define IMX8QXP_ENET0_RGMII_TXD1_CONN_USDHC1_WP IMX8QXP_ENET0_RGMII_TXD1 3 +#define IMX8QXP_ENET0_RGMII_TXD1_LSIO_GPIO5_IO00 IMX8QXP_ENET0_RGMII_TXD1 4 +#define IMX8QXP_ENET0_RGMII_TXD2_CONN_ENET0_RGMII_TXD2 IMX8QXP_ENET0_RGMII_TXD2 0 +#define IMX8QXP_ENET0_RGMII_TXD2_CONN_MLB_CLK IMX8QXP_ENET0_RGMII_TXD2 1 +#define IMX8QXP_ENET0_RGMII_TXD2_CONN_NAND_CE0_B IMX8QXP_ENET0_RGMII_TXD2 2 +#define IMX8QXP_ENET0_RGMII_TXD2_CONN_USDHC1_CD_B IMX8QXP_ENET0_RGMII_TXD2 3 +#define IMX8QXP_ENET0_RGMII_TXD2_LSIO_GPIO5_IO01 IMX8QXP_ENET0_RGMII_TXD2 4 +#define IMX8QXP_ENET0_RGMII_TXD3_CONN_ENET0_RGMII_TXD3 IMX8QXP_ENET0_RGMII_TXD3 0 +#define IMX8QXP_ENET0_RGMII_TXD3_CONN_MLB_SIG IMX8QXP_ENET0_RGMII_TXD3 1 +#define IMX8QXP_ENET0_RGMII_TXD3_CONN_NAND_RE_B IMX8QXP_ENET0_RGMII_TXD3 2 +#define IMX8QXP_ENET0_RGMII_TXD3_LSIO_GPIO5_IO02 IMX8QXP_ENET0_RGMII_TXD3 4 +#define IMX8QXP_ENET0_RGMII_RXC_CONN_ENET0_RGMII_RXC IMX8QXP_ENET0_RGMII_RXC 0 +#define IMX8QXP_ENET0_RGMII_RXC_CONN_MLB_DATA IMX8QXP_ENET0_RGMII_RXC 1 +#define IMX8QXP_ENET0_RGMII_RXC_CONN_NAND_WE_B IMX8QXP_ENET0_RGMII_RXC 2 +#define IMX8QXP_ENET0_RGMII_RXC_CONN_USDHC1_CLK IMX8QXP_ENET0_RGMII_RXC 3 +#define IMX8QXP_ENET0_RGMII_RXC_LSIO_GPIO5_IO03 IMX8QXP_ENET0_RGMII_RXC 4 +#define IMX8QXP_ENET0_RGMII_RX_CTL_CONN_ENET0_RGMII_RX_CTL IMX8QXP_ENET0_RGMII_RX_CTL 0 +#define IMX8QXP_ENET0_RGMII_RX_CTL_CONN_USDHC1_CMD IMX8QXP_ENET0_RGMII_RX_CTL 3 +#define IMX8QXP_ENET0_RGMII_RX_CTL_LSIO_GPIO5_IO04 IMX8QXP_ENET0_RGMII_RX_CTL 4 +#define IMX8QXP_ENET0_RGMII_RXD0_CONN_ENET0_RGMII_RXD0 IMX8QXP_ENET0_RGMII_RXD0 0 +#define IMX8QXP_ENET0_RGMII_RXD0_CONN_USDHC1_DATA0 IMX8QXP_ENET0_RGMII_RXD0 3 +#define IMX8QXP_ENET0_RGMII_RXD0_LSIO_GPIO5_IO05 IMX8QXP_ENET0_RGMII_RXD0 4 +#define IMX8QXP_ENET0_RGMII_RXD1_CONN_ENET0_RGMII_RXD1 IMX8QXP_ENET0_RGMII_RXD1 0 +#define IMX8QXP_ENET0_RGMII_RXD1_CONN_USDHC1_DATA1 IMX8QXP_ENET0_RGMII_RXD1 3 +#define IMX8QXP_ENET0_RGMII_RXD1_LSIO_GPIO5_IO06 IMX8QXP_ENET0_RGMII_RXD1 4 +#define IMX8QXP_ENET0_RGMII_RXD2_CONN_ENET0_RGMII_RXD2 IMX8QXP_ENET0_RGMII_RXD2 0 +#define IMX8QXP_ENET0_RGMII_RXD2_CONN_ENET0_RMII_RX_ER IMX8QXP_ENET0_RGMII_RXD2 1 +#define IMX8QXP_ENET0_RGMII_RXD2_CONN_USDHC1_DATA2 IMX8QXP_ENET0_RGMII_RXD2 3 +#define IMX8QXP_ENET0_RGMII_RXD2_LSIO_GPIO5_IO07 IMX8QXP_ENET0_RGMII_RXD2 4 +#define IMX8QXP_ENET0_RGMII_RXD3_CONN_ENET0_RGMII_RXD3 IMX8QXP_ENET0_RGMII_RXD3 0 +#define IMX8QXP_ENET0_RGMII_RXD3_CONN_NAND_ALE IMX8QXP_ENET0_RGMII_RXD3 2 +#define IMX8QXP_ENET0_RGMII_RXD3_CONN_USDHC1_DATA3 IMX8QXP_ENET0_RGMII_RXD3 3 +#define IMX8QXP_ENET0_RGMII_RXD3_LSIO_GPIO5_IO08 IMX8QXP_ENET0_RGMII_RXD3 4 +#define IMX8QXP_ENET0_REFCLK_125M_25M_CONN_ENET0_REFCLK_125M_25M IMX8QXP_ENET0_REFCLK_125M_25M 0 +#define IMX8QXP_ENET0_REFCLK_125M_25M_CONN_ENET0_PPS IMX8QXP_ENET0_REFCLK_125M_25M 1 +#define IMX8QXP_ENET0_REFCLK_125M_25M_CONN_ENET1_PPS IMX8QXP_ENET0_REFCLK_125M_25M 2 +#define IMX8QXP_ENET0_REFCLK_125M_25M_LSIO_GPIO5_IO09 IMX8QXP_ENET0_REFCLK_125M_25M 4 +#define IMX8QXP_ENET0_MDIO_CONN_ENET0_MDIO IMX8QXP_ENET0_MDIO 0 +#define IMX8QXP_ENET0_MDIO_ADMA_I2C3_SDA IMX8QXP_ENET0_MDIO 1 +#define IMX8QXP_ENET0_MDIO_CONN_ENET1_MDIO IMX8QXP_ENET0_MDIO 2 +#define IMX8QXP_ENET0_MDIO_LSIO_GPIO5_IO10 IMX8QXP_ENET0_MDIO 4 +#define IMX8QXP_ENET0_MDC_CONN_ENET0_MDC IMX8QXP_ENET0_MDC 0 +#define IMX8QXP_ENET0_MDC_ADMA_I2C3_SCL IMX8QXP_ENET0_MDC 1 +#define IMX8QXP_ENET0_MDC_CONN_ENET1_MDC IMX8QXP_ENET0_MDC 2 +#define IMX8QXP_ENET0_MDC_LSIO_GPIO5_IO11 IMX8QXP_ENET0_MDC 4 +#define IMX8QXP_ESAI0_FSR_ADMA_ESAI0_FSR IMX8QXP_ESAI0_FSR 0 +#define IMX8QXP_ESAI0_FSR_CONN_ENET1_RCLK50M_OUT IMX8QXP_ESAI0_FSR 1 +#define IMX8QXP_ESAI0_FSR_ADMA_LCDIF_D00 IMX8QXP_ESAI0_FSR 2 +#define IMX8QXP_ESAI0_FSR_CONN_ENET1_RGMII_TXC IMX8QXP_ESAI0_FSR 3 +#define IMX8QXP_ESAI0_FSR_CONN_ENET1_RCLK50M_IN IMX8QXP_ESAI0_FSR 4 +#define IMX8QXP_ESAI0_FST_ADMA_ESAI0_FST IMX8QXP_ESAI0_FST 0 +#define IMX8QXP_ESAI0_FST_CONN_MLB_CLK IMX8QXP_ESAI0_FST 1 +#define IMX8QXP_ESAI0_FST_ADMA_LCDIF_D01 IMX8QXP_ESAI0_FST 2 +#define IMX8QXP_ESAI0_FST_CONN_ENET1_RGMII_TXD2 IMX8QXP_ESAI0_FST 3 +#define IMX8QXP_ESAI0_FST_LSIO_GPIO0_IO01 IMX8QXP_ESAI0_FST 4 +#define IMX8QXP_ESAI0_SCKR_ADMA_ESAI0_SCKR IMX8QXP_ESAI0_SCKR 0 +#define IMX8QXP_ESAI0_SCKR_ADMA_LCDIF_D02 IMX8QXP_ESAI0_SCKR 2 +#define IMX8QXP_ESAI0_SCKR_CONN_ENET1_RGMII_TX_CTL IMX8QXP_ESAI0_SCKR 3 +#define IMX8QXP_ESAI0_SCKR_LSIO_GPIO0_IO02 IMX8QXP_ESAI0_SCKR 4 +#define IMX8QXP_ESAI0_SCKT_ADMA_ESAI0_SCKT IMX8QXP_ESAI0_SCKT 0 +#define IMX8QXP_ESAI0_SCKT_CONN_MLB_SIG IMX8QXP_ESAI0_SCKT 1 +#define IMX8QXP_ESAI0_SCKT_ADMA_LCDIF_D03 IMX8QXP_ESAI0_SCKT 2 +#define IMX8QXP_ESAI0_SCKT_CONN_ENET1_RGMII_TXD3 IMX8QXP_ESAI0_SCKT 3 +#define IMX8QXP_ESAI0_SCKT_LSIO_GPIO0_IO03 IMX8QXP_ESAI0_SCKT 4 +#define IMX8QXP_ESAI0_TX0_ADMA_ESAI0_TX0 IMX8QXP_ESAI0_TX0 0 +#define IMX8QXP_ESAI0_TX0_CONN_MLB_DATA IMX8QXP_ESAI0_TX0 1 +#define IMX8QXP_ESAI0_TX0_ADMA_LCDIF_D04 IMX8QXP_ESAI0_TX0 2 +#define IMX8QXP_ESAI0_TX0_CONN_ENET1_RGMII_RXC IMX8QXP_ESAI0_TX0 3 +#define IMX8QXP_ESAI0_TX0_LSIO_GPIO0_IO04 IMX8QXP_ESAI0_TX0 4 +#define IMX8QXP_ESAI0_TX1_ADMA_ESAI0_TX1 IMX8QXP_ESAI0_TX1 0 +#define IMX8QXP_ESAI0_TX1_ADMA_LCDIF_D05 IMX8QXP_ESAI0_TX1 2 +#define IMX8QXP_ESAI0_TX1_CONN_ENET1_RGMII_RXD3 IMX8QXP_ESAI0_TX1 3 +#define IMX8QXP_ESAI0_TX1_LSIO_GPIO0_IO05 IMX8QXP_ESAI0_TX1 4 +#define IMX8QXP_ESAI0_TX2_RX3_ADMA_ESAI0_TX2_RX3 IMX8QXP_ESAI0_TX2_RX3 0 +#define IMX8QXP_ESAI0_TX2_RX3_CONN_ENET1_RMII_RX_ER IMX8QXP_ESAI0_TX2_RX3 1 +#define IMX8QXP_ESAI0_TX2_RX3_ADMA_LCDIF_D06 IMX8QXP_ESAI0_TX2_RX3 2 +#define IMX8QXP_ESAI0_TX2_RX3_CONN_ENET1_RGMII_RXD2 IMX8QXP_ESAI0_TX2_RX3 3 +#define IMX8QXP_ESAI0_TX2_RX3_LSIO_GPIO0_IO06 IMX8QXP_ESAI0_TX2_RX3 4 +#define IMX8QXP_ESAI0_TX3_RX2_ADMA_ESAI0_TX3_RX2 IMX8QXP_ESAI0_TX3_RX2 0 +#define IMX8QXP_ESAI0_TX3_RX2_ADMA_LCDIF_D07 IMX8QXP_ESAI0_TX3_RX2 2 +#define IMX8QXP_ESAI0_TX3_RX2_CONN_ENET1_RGMII_RXD1 IMX8QXP_ESAI0_TX3_RX2 3 +#define IMX8QXP_ESAI0_TX3_RX2_LSIO_GPIO0_IO07 IMX8QXP_ESAI0_TX3_RX2 4 +#define IMX8QXP_ESAI0_TX4_RX1_ADMA_ESAI0_TX4_RX1 IMX8QXP_ESAI0_TX4_RX1 0 +#define IMX8QXP_ESAI0_TX4_RX1_ADMA_LCDIF_D08 IMX8QXP_ESAI0_TX4_RX1 2 +#define IMX8QXP_ESAI0_TX4_RX1_CONN_ENET1_RGMII_TXD0 IMX8QXP_ESAI0_TX4_RX1 3 +#define IMX8QXP_ESAI0_TX4_RX1_LSIO_GPIO0_IO08 IMX8QXP_ESAI0_TX4_RX1 4 +#define IMX8QXP_ESAI0_TX5_RX0_ADMA_ESAI0_TX5_RX0 IMX8QXP_ESAI0_TX5_RX0 0 +#define IMX8QXP_ESAI0_TX5_RX0_ADMA_LCDIF_D09 IMX8QXP_ESAI0_TX5_RX0 2 +#define IMX8QXP_ESAI0_TX5_RX0_CONN_ENET1_RGMII_TXD1 IMX8QXP_ESAI0_TX5_RX0 3 +#define IMX8QXP_ESAI0_TX5_RX0_LSIO_GPIO0_IO09 IMX8QXP_ESAI0_TX5_RX0 4 +#define IMX8QXP_SPDIF0_RX_ADMA_SPDIF0_RX IMX8QXP_SPDIF0_RX 0 +#define IMX8QXP_SPDIF0_RX_ADMA_MQS_R IMX8QXP_SPDIF0_RX 1 +#define IMX8QXP_SPDIF0_RX_ADMA_LCDIF_D10 IMX8QXP_SPDIF0_RX 2 +#define IMX8QXP_SPDIF0_RX_CONN_ENET1_RGMII_RXD0 IMX8QXP_SPDIF0_RX 3 +#define IMX8QXP_SPDIF0_RX_LSIO_GPIO0_IO10 IMX8QXP_SPDIF0_RX 4 +#define IMX8QXP_SPDIF0_TX_ADMA_SPDIF0_TX IMX8QXP_SPDIF0_TX 0 +#define IMX8QXP_SPDIF0_TX_ADMA_MQS_L IMX8QXP_SPDIF0_TX 1 +#define IMX8QXP_SPDIF0_TX_ADMA_LCDIF_D11 IMX8QXP_SPDIF0_TX 2 +#define IMX8QXP_SPDIF0_TX_CONN_ENET1_RGMII_RX_CTL IMX8QXP_SPDIF0_TX 3 +#define IMX8QXP_SPDIF0_TX_LSIO_GPIO0_IO11 IMX8QXP_SPDIF0_TX 4 +#define IMX8QXP_SPDIF0_EXT_CLK_ADMA_SPDIF0_EXT_CLK IMX8QXP_SPDIF0_EXT_CLK 0 +#define IMX8QXP_SPDIF0_EXT_CLK_ADMA_LCDIF_D12 IMX8QXP_SPDIF0_EXT_CLK 2 +#define IMX8QXP_SPDIF0_EXT_CLK_CONN_ENET1_REFCLK_125M_25M IMX8QXP_SPDIF0_EXT_CLK 3 +#define IMX8QXP_SPDIF0_EXT_CLK_LSIO_GPIO0_IO12 IMX8QXP_SPDIF0_EXT_CLK 4 +#define IMX8QXP_SPI3_SCK_ADMA_SPI3_SCK IMX8QXP_SPI3_SCK 0 +#define IMX8QXP_SPI3_SCK_ADMA_LCDIF_D13 IMX8QXP_SPI3_SCK 2 +#define IMX8QXP_SPI3_SCK_LSIO_GPIO0_IO13 IMX8QXP_SPI3_SCK 4 +#define IMX8QXP_SPI3_SDO_ADMA_SPI3_SDO IMX8QXP_SPI3_SDO 0 +#define IMX8QXP_SPI3_SDO_ADMA_LCDIF_D14 IMX8QXP_SPI3_SDO 2 +#define IMX8QXP_SPI3_SDO_LSIO_GPIO0_IO14 IMX8QXP_SPI3_SDO 4 +#define IMX8QXP_SPI3_SDI_ADMA_SPI3_SDI IMX8QXP_SPI3_SDI 0 +#define IMX8QXP_SPI3_SDI_ADMA_LCDIF_D15 IMX8QXP_SPI3_SDI 2 +#define IMX8QXP_SPI3_SDI_LSIO_GPIO0_IO15 IMX8QXP_SPI3_SDI 4 +#define IMX8QXP_SPI3_CS0_ADMA_SPI3_CS0 IMX8QXP_SPI3_CS0 0 +#define IMX8QXP_SPI3_CS0_ADMA_ACM_MCLK_OUT1 IMX8QXP_SPI3_CS0 1 +#define IMX8QXP_SPI3_CS0_ADMA_LCDIF_HSYNC IMX8QXP_SPI3_CS0 2 +#define IMX8QXP_SPI3_CS0_LSIO_GPIO0_IO16 IMX8QXP_SPI3_CS0 4 +#define IMX8QXP_SPI3_CS1_ADMA_SPI3_CS1 IMX8QXP_SPI3_CS1 0 +#define IMX8QXP_SPI3_CS1_ADMA_I2C3_SCL IMX8QXP_SPI3_CS1 1 +#define IMX8QXP_SPI3_CS1_ADMA_LCDIF_RESET IMX8QXP_SPI3_CS1 2 +#define IMX8QXP_SPI3_CS1_ADMA_SPI2_CS0 IMX8QXP_SPI3_CS1 3 +#define IMX8QXP_SPI3_CS1_ADMA_LCDIF_D16 IMX8QXP_SPI3_CS1 4 +#define IMX8QXP_MCLK_IN1_ADMA_ACM_MCLK_IN1 IMX8QXP_MCLK_IN1 0 +#define IMX8QXP_MCLK_IN1_ADMA_I2C3_SDA IMX8QXP_MCLK_IN1 1 +#define IMX8QXP_MCLK_IN1_ADMA_LCDIF_EN IMX8QXP_MCLK_IN1 2 +#define IMX8QXP_MCLK_IN1_ADMA_SPI2_SCK IMX8QXP_MCLK_IN1 3 +#define IMX8QXP_MCLK_IN1_ADMA_LCDIF_D17 IMX8QXP_MCLK_IN1 4 +#define IMX8QXP_MCLK_IN0_ADMA_ACM_MCLK_IN0 IMX8QXP_MCLK_IN0 0 +#define IMX8QXP_MCLK_IN0_ADMA_ESAI0_RX_HF_CLK IMX8QXP_MCLK_IN0 1 +#define IMX8QXP_MCLK_IN0_ADMA_LCDIF_VSYNC IMX8QXP_MCLK_IN0 2 +#define IMX8QXP_MCLK_IN0_ADMA_SPI2_SDI IMX8QXP_MCLK_IN0 3 +#define IMX8QXP_MCLK_IN0_LSIO_GPIO0_IO19 IMX8QXP_MCLK_IN0 4 +#define IMX8QXP_MCLK_OUT0_ADMA_ACM_MCLK_OUT0 IMX8QXP_MCLK_OUT0 0 +#define IMX8QXP_MCLK_OUT0_ADMA_ESAI0_TX_HF_CLK IMX8QXP_MCLK_OUT0 1 +#define IMX8QXP_MCLK_OUT0_ADMA_LCDIF_CLK IMX8QXP_MCLK_OUT0 2 +#define IMX8QXP_MCLK_OUT0_ADMA_SPI2_SDO IMX8QXP_MCLK_OUT0 3 +#define IMX8QXP_MCLK_OUT0_LSIO_GPIO0_IO20 IMX8QXP_MCLK_OUT0 4 +#define IMX8QXP_UART1_TX_ADMA_UART1_TX IMX8QXP_UART1_TX 0 +#define IMX8QXP_UART1_TX_LSIO_PWM0_OUT IMX8QXP_UART1_TX 1 +#define IMX8QXP_UART1_TX_LSIO_GPT0_CAPTURE IMX8QXP_UART1_TX 2 +#define IMX8QXP_UART1_TX_LSIO_GPIO0_IO21 IMX8QXP_UART1_TX 4 +#define IMX8QXP_UART1_RX_ADMA_UART1_RX IMX8QXP_UART1_RX 0 +#define IMX8QXP_UART1_RX_LSIO_PWM1_OUT IMX8QXP_UART1_RX 1 +#define IMX8QXP_UART1_RX_LSIO_GPT0_COMPARE IMX8QXP_UART1_RX 2 +#define IMX8QXP_UART1_RX_LSIO_GPT1_CLK IMX8QXP_UART1_RX 3 +#define IMX8QXP_UART1_RX_LSIO_GPIO0_IO22 IMX8QXP_UART1_RX 4 +#define IMX8QXP_UART1_RTS_B_ADMA_UART1_RTS_B IMX8QXP_UART1_RTS_B 0 +#define IMX8QXP_UART1_RTS_B_LSIO_PWM2_OUT IMX8QXP_UART1_RTS_B 1 +#define IMX8QXP_UART1_RTS_B_ADMA_LCDIF_D16 IMX8QXP_UART1_RTS_B 2 +#define IMX8QXP_UART1_RTS_B_LSIO_GPT1_CAPTURE IMX8QXP_UART1_RTS_B 3 +#define IMX8QXP_UART1_RTS_B_LSIO_GPT0_CLK IMX8QXP_UART1_RTS_B 4 +#define IMX8QXP_UART1_CTS_B_ADMA_UART1_CTS_B IMX8QXP_UART1_CTS_B 0 +#define IMX8QXP_UART1_CTS_B_LSIO_PWM3_OUT IMX8QXP_UART1_CTS_B 1 +#define IMX8QXP_UART1_CTS_B_ADMA_LCDIF_D17 IMX8QXP_UART1_CTS_B 2 +#define IMX8QXP_UART1_CTS_B_LSIO_GPT1_COMPARE IMX8QXP_UART1_CTS_B 3 +#define IMX8QXP_UART1_CTS_B_LSIO_GPIO0_IO24 IMX8QXP_UART1_CTS_B 4 +#define IMX8QXP_SAI0_TXD_ADMA_SAI0_TXD IMX8QXP_SAI0_TXD 0 +#define IMX8QXP_SAI0_TXD_ADMA_SAI1_RXC IMX8QXP_SAI0_TXD 1 +#define IMX8QXP_SAI0_TXD_ADMA_SPI1_SDO IMX8QXP_SAI0_TXD 2 +#define IMX8QXP_SAI0_TXD_ADMA_LCDIF_D18 IMX8QXP_SAI0_TXD 3 +#define IMX8QXP_SAI0_TXD_LSIO_GPIO0_IO25 IMX8QXP_SAI0_TXD 4 +#define IMX8QXP_SAI0_TXC_ADMA_SAI0_TXC IMX8QXP_SAI0_TXC 0 +#define IMX8QXP_SAI0_TXC_ADMA_SAI1_TXD IMX8QXP_SAI0_TXC 1 +#define IMX8QXP_SAI0_TXC_ADMA_SPI1_SDI IMX8QXP_SAI0_TXC 2 +#define IMX8QXP_SAI0_TXC_ADMA_LCDIF_D19 IMX8QXP_SAI0_TXC 3 +#define IMX8QXP_SAI0_TXC_LSIO_GPIO0_IO26 IMX8QXP_SAI0_TXC 4 +#define IMX8QXP_SAI0_RXD_ADMA_SAI0_RXD IMX8QXP_SAI0_RXD 0 +#define IMX8QXP_SAI0_RXD_ADMA_SAI1_RXFS IMX8QXP_SAI0_RXD 1 +#define IMX8QXP_SAI0_RXD_ADMA_SPI1_CS0 IMX8QXP_SAI0_RXD 2 +#define IMX8QXP_SAI0_RXD_ADMA_LCDIF_D20 IMX8QXP_SAI0_RXD 3 +#define IMX8QXP_SAI0_RXD_LSIO_GPIO0_IO27 IMX8QXP_SAI0_RXD 4 +#define IMX8QXP_SAI0_TXFS_ADMA_SAI0_TXFS IMX8QXP_SAI0_TXFS 0 +#define IMX8QXP_SAI0_TXFS_ADMA_SPI2_CS1 IMX8QXP_SAI0_TXFS 1 +#define IMX8QXP_SAI0_TXFS_ADMA_SPI1_SCK IMX8QXP_SAI0_TXFS 2 +#define IMX8QXP_SAI0_TXFS_LSIO_GPIO0_IO28 IMX8QXP_SAI0_TXFS 4 +#define IMX8QXP_SAI1_RXD_ADMA_SAI1_RXD IMX8QXP_SAI1_RXD 0 +#define IMX8QXP_SAI1_RXD_ADMA_SAI0_RXFS IMX8QXP_SAI1_RXD 1 +#define IMX8QXP_SAI1_RXD_ADMA_SPI1_CS1 IMX8QXP_SAI1_RXD 2 +#define IMX8QXP_SAI1_RXD_ADMA_LCDIF_D21 IMX8QXP_SAI1_RXD 3 +#define IMX8QXP_SAI1_RXD_LSIO_GPIO0_IO29 IMX8QXP_SAI1_RXD 4 +#define IMX8QXP_SAI1_RXC_ADMA_SAI1_RXC IMX8QXP_SAI1_RXC 0 +#define IMX8QXP_SAI1_RXC_ADMA_SAI1_TXC IMX8QXP_SAI1_RXC 1 +#define IMX8QXP_SAI1_RXC_ADMA_LCDIF_D22 IMX8QXP_SAI1_RXC 3 +#define IMX8QXP_SAI1_RXC_LSIO_GPIO0_IO30 IMX8QXP_SAI1_RXC 4 +#define IMX8QXP_SAI1_RXFS_ADMA_SAI1_RXFS IMX8QXP_SAI1_RXFS 0 +#define IMX8QXP_SAI1_RXFS_ADMA_SAI1_TXFS IMX8QXP_SAI1_RXFS 1 +#define IMX8QXP_SAI1_RXFS_ADMA_LCDIF_D23 IMX8QXP_SAI1_RXFS 3 +#define IMX8QXP_SAI1_RXFS_LSIO_GPIO0_IO31 IMX8QXP_SAI1_RXFS 4 +#define IMX8QXP_SPI2_CS0_ADMA_SPI2_CS0 IMX8QXP_SPI2_CS0 0 +#define IMX8QXP_SPI2_CS0_LSIO_GPIO1_IO00 IMX8QXP_SPI2_CS0 4 +#define IMX8QXP_SPI2_SDO_ADMA_SPI2_SDO IMX8QXP_SPI2_SDO 0 +#define IMX8QXP_SPI2_SDO_LSIO_GPIO1_IO01 IMX8QXP_SPI2_SDO 4 +#define IMX8QXP_SPI2_SDI_ADMA_SPI2_SDI IMX8QXP_SPI2_SDI 0 +#define IMX8QXP_SPI2_SDI_LSIO_GPIO1_IO02 IMX8QXP_SPI2_SDI 4 +#define IMX8QXP_SPI2_SCK_ADMA_SPI2_SCK IMX8QXP_SPI2_SCK 0 +#define IMX8QXP_SPI2_SCK_LSIO_GPIO1_IO03 IMX8QXP_SPI2_SCK 4 +#define IMX8QXP_SPI0_SCK_ADMA_SPI0_SCK IMX8QXP_SPI0_SCK 0 +#define IMX8QXP_SPI0_SCK_ADMA_SAI0_TXC IMX8QXP_SPI0_SCK 1 +#define IMX8QXP_SPI0_SCK_M40_I2C0_SCL IMX8QXP_SPI0_SCK 2 +#define IMX8QXP_SPI0_SCK_M40_GPIO0_IO00 IMX8QXP_SPI0_SCK 3 +#define IMX8QXP_SPI0_SCK_LSIO_GPIO1_IO04 IMX8QXP_SPI0_SCK 4 +#define IMX8QXP_SPI0_SDI_ADMA_SPI0_SDI IMX8QXP_SPI0_SDI 0 +#define IMX8QXP_SPI0_SDI_ADMA_SAI0_TXD IMX8QXP_SPI0_SDI 1 +#define IMX8QXP_SPI0_SDI_M40_TPM0_CH0 IMX8QXP_SPI0_SDI 2 +#define IMX8QXP_SPI0_SDI_M40_GPIO0_IO02 IMX8QXP_SPI0_SDI 3 +#define IMX8QXP_SPI0_SDI_LSIO_GPIO1_IO05 IMX8QXP_SPI0_SDI 4 +#define IMX8QXP_SPI0_SDO_ADMA_SPI0_SDO IMX8QXP_SPI0_SDO 0 +#define IMX8QXP_SPI0_SDO_ADMA_SAI0_TXFS IMX8QXP_SPI0_SDO 1 +#define IMX8QXP_SPI0_SDO_M40_I2C0_SDA IMX8QXP_SPI0_SDO 2 +#define IMX8QXP_SPI0_SDO_M40_GPIO0_IO01 IMX8QXP_SPI0_SDO 3 +#define IMX8QXP_SPI0_SDO_LSIO_GPIO1_IO06 IMX8QXP_SPI0_SDO 4 +#define IMX8QXP_SPI0_CS1_ADMA_SPI0_CS1 IMX8QXP_SPI0_CS1 0 +#define IMX8QXP_SPI0_CS1_ADMA_SAI0_RXC IMX8QXP_SPI0_CS1 1 +#define IMX8QXP_SPI0_CS1_ADMA_SAI1_TXD IMX8QXP_SPI0_CS1 2 +#define IMX8QXP_SPI0_CS1_ADMA_LCD_PWM0_OUT IMX8QXP_SPI0_CS1 3 +#define IMX8QXP_SPI0_CS1_LSIO_GPIO1_IO07 IMX8QXP_SPI0_CS1 4 +#define IMX8QXP_SPI0_CS0_ADMA_SPI0_CS0 IMX8QXP_SPI0_CS0 0 +#define IMX8QXP_SPI0_CS0_ADMA_SAI0_RXD IMX8QXP_SPI0_CS0 1 +#define IMX8QXP_SPI0_CS0_M40_TPM0_CH1 IMX8QXP_SPI0_CS0 2 +#define IMX8QXP_SPI0_CS0_M40_GPIO0_IO03 IMX8QXP_SPI0_CS0 3 +#define IMX8QXP_SPI0_CS0_LSIO_GPIO1_IO08 IMX8QXP_SPI0_CS0 4 +#define IMX8QXP_ADC_IN1_ADMA_ADC_IN1 IMX8QXP_ADC_IN1 0 +#define IMX8QXP_ADC_IN1_M40_I2C0_SDA IMX8QXP_ADC_IN1 1 +#define IMX8QXP_ADC_IN1_M40_GPIO0_IO01 IMX8QXP_ADC_IN1 2 +#define IMX8QXP_ADC_IN1_LSIO_GPIO1_IO09 IMX8QXP_ADC_IN1 4 +#define IMX8QXP_ADC_IN0_ADMA_ADC_IN0 IMX8QXP_ADC_IN0 0 +#define IMX8QXP_ADC_IN0_M40_I2C0_SCL IMX8QXP_ADC_IN0 1 +#define IMX8QXP_ADC_IN0_M40_GPIO0_IO00 IMX8QXP_ADC_IN0 2 +#define IMX8QXP_ADC_IN0_LSIO_GPIO1_IO10 IMX8QXP_ADC_IN0 4 +#define IMX8QXP_ADC_IN3_ADMA_ADC_IN3 IMX8QXP_ADC_IN3 0 +#define IMX8QXP_ADC_IN3_M40_UART0_TX IMX8QXP_ADC_IN3 1 +#define IMX8QXP_ADC_IN3_M40_GPIO0_IO03 IMX8QXP_ADC_IN3 2 +#define IMX8QXP_ADC_IN3_ADMA_ACM_MCLK_OUT0 IMX8QXP_ADC_IN3 3 +#define IMX8QXP_ADC_IN3_LSIO_GPIO1_IO11 IMX8QXP_ADC_IN3 4 +#define IMX8QXP_ADC_IN2_ADMA_ADC_IN2 IMX8QXP_ADC_IN2 0 +#define IMX8QXP_ADC_IN2_M40_UART0_RX IMX8QXP_ADC_IN2 1 +#define IMX8QXP_ADC_IN2_M40_GPIO0_IO02 IMX8QXP_ADC_IN2 2 +#define IMX8QXP_ADC_IN2_ADMA_ACM_MCLK_IN0 IMX8QXP_ADC_IN2 3 +#define IMX8QXP_ADC_IN2_LSIO_GPIO1_IO12 IMX8QXP_ADC_IN2 4 +#define IMX8QXP_ADC_IN5_ADMA_ADC_IN5 IMX8QXP_ADC_IN5 0 +#define IMX8QXP_ADC_IN5_M40_TPM0_CH1 IMX8QXP_ADC_IN5 1 +#define IMX8QXP_ADC_IN5_M40_GPIO0_IO05 IMX8QXP_ADC_IN5 2 +#define IMX8QXP_ADC_IN5_LSIO_GPIO1_IO13 IMX8QXP_ADC_IN5 4 +#define IMX8QXP_ADC_IN4_ADMA_ADC_IN4 IMX8QXP_ADC_IN4 0 +#define IMX8QXP_ADC_IN4_M40_TPM0_CH0 IMX8QXP_ADC_IN4 1 +#define IMX8QXP_ADC_IN4_M40_GPIO0_IO04 IMX8QXP_ADC_IN4 2 +#define IMX8QXP_ADC_IN4_LSIO_GPIO1_IO14 IMX8QXP_ADC_IN4 4 +#define IMX8QXP_FLEXCAN0_RX_ADMA_FLEXCAN0_RX IMX8QXP_FLEXCAN0_RX 0 +#define IMX8QXP_FLEXCAN0_RX_ADMA_SAI2_RXC IMX8QXP_FLEXCAN0_RX 1 +#define IMX8QXP_FLEXCAN0_RX_ADMA_UART0_RTS_B IMX8QXP_FLEXCAN0_RX 2 +#define IMX8QXP_FLEXCAN0_RX_ADMA_SAI1_TXC IMX8QXP_FLEXCAN0_RX 3 +#define IMX8QXP_FLEXCAN0_RX_LSIO_GPIO1_IO15 IMX8QXP_FLEXCAN0_RX 4 +#define IMX8QXP_FLEXCAN0_TX_ADMA_FLEXCAN0_TX IMX8QXP_FLEXCAN0_TX 0 +#define IMX8QXP_FLEXCAN0_TX_ADMA_SAI2_RXD IMX8QXP_FLEXCAN0_TX 1 +#define IMX8QXP_FLEXCAN0_TX_ADMA_UART0_CTS_B IMX8QXP_FLEXCAN0_TX 2 +#define IMX8QXP_FLEXCAN0_TX_ADMA_SAI1_TXFS IMX8QXP_FLEXCAN0_TX 3 +#define IMX8QXP_FLEXCAN0_TX_LSIO_GPIO1_IO16 IMX8QXP_FLEXCAN0_TX 4 +#define IMX8QXP_FLEXCAN1_RX_ADMA_FLEXCAN1_RX IMX8QXP_FLEXCAN1_RX 0 +#define IMX8QXP_FLEXCAN1_RX_ADMA_SAI2_RXFS IMX8QXP_FLEXCAN1_RX 1 +#define IMX8QXP_FLEXCAN1_RX_ADMA_FTM_CH2 IMX8QXP_FLEXCAN1_RX 2 +#define IMX8QXP_FLEXCAN1_RX_ADMA_SAI1_TXD IMX8QXP_FLEXCAN1_RX 3 +#define IMX8QXP_FLEXCAN1_RX_LSIO_GPIO1_IO17 IMX8QXP_FLEXCAN1_RX 4 +#define IMX8QXP_FLEXCAN1_TX_ADMA_FLEXCAN1_TX IMX8QXP_FLEXCAN1_TX 0 +#define IMX8QXP_FLEXCAN1_TX_ADMA_SAI3_RXC IMX8QXP_FLEXCAN1_TX 1 +#define IMX8QXP_FLEXCAN1_TX_ADMA_DMA0_REQ_IN0 IMX8QXP_FLEXCAN1_TX 2 +#define IMX8QXP_FLEXCAN1_TX_ADMA_SAI1_RXD IMX8QXP_FLEXCAN1_TX 3 +#define IMX8QXP_FLEXCAN1_TX_LSIO_GPIO1_IO18 IMX8QXP_FLEXCAN1_TX 4 +#define IMX8QXP_FLEXCAN2_RX_ADMA_FLEXCAN2_RX IMX8QXP_FLEXCAN2_RX 0 +#define IMX8QXP_FLEXCAN2_RX_ADMA_SAI3_RXD IMX8QXP_FLEXCAN2_RX 1 +#define IMX8QXP_FLEXCAN2_RX_ADMA_UART3_RX IMX8QXP_FLEXCAN2_RX 2 +#define IMX8QXP_FLEXCAN2_RX_ADMA_SAI1_RXFS IMX8QXP_FLEXCAN2_RX 3 +#define IMX8QXP_FLEXCAN2_RX_LSIO_GPIO1_IO19 IMX8QXP_FLEXCAN2_RX 4 +#define IMX8QXP_FLEXCAN2_TX_ADMA_FLEXCAN2_TX IMX8QXP_FLEXCAN2_TX 0 +#define IMX8QXP_FLEXCAN2_TX_ADMA_SAI3_RXFS IMX8QXP_FLEXCAN2_TX 1 +#define IMX8QXP_FLEXCAN2_TX_ADMA_UART3_TX IMX8QXP_FLEXCAN2_TX 2 +#define IMX8QXP_FLEXCAN2_TX_ADMA_SAI1_RXC IMX8QXP_FLEXCAN2_TX 3 +#define IMX8QXP_FLEXCAN2_TX_LSIO_GPIO1_IO20 IMX8QXP_FLEXCAN2_TX 4 +#define IMX8QXP_UART0_RX_ADMA_UART0_RX IMX8QXP_UART0_RX 0 +#define IMX8QXP_UART0_RX_ADMA_MQS_R IMX8QXP_UART0_RX 1 +#define IMX8QXP_UART0_RX_ADMA_FLEXCAN0_RX IMX8QXP_UART0_RX 2 +#define IMX8QXP_UART0_RX_LSIO_GPIO1_IO21 IMX8QXP_UART0_RX 4 +#define IMX8QXP_UART0_TX_ADMA_UART0_TX IMX8QXP_UART0_TX 0 +#define IMX8QXP_UART0_TX_ADMA_MQS_L IMX8QXP_UART0_TX 1 +#define IMX8QXP_UART0_TX_ADMA_FLEXCAN0_TX IMX8QXP_UART0_TX 2 +#define IMX8QXP_UART0_TX_LSIO_GPIO1_IO22 IMX8QXP_UART0_TX 4 +#define IMX8QXP_UART2_TX_ADMA_UART2_TX IMX8QXP_UART2_TX 0 +#define IMX8QXP_UART2_TX_ADMA_FTM_CH1 IMX8QXP_UART2_TX 1 +#define IMX8QXP_UART2_TX_ADMA_FLEXCAN1_TX IMX8QXP_UART2_TX 2 +#define IMX8QXP_UART2_TX_LSIO_GPIO1_IO23 IMX8QXP_UART2_TX 4 +#define IMX8QXP_UART2_RX_ADMA_UART2_RX IMX8QXP_UART2_RX 0 +#define IMX8QXP_UART2_RX_ADMA_FTM_CH0 IMX8QXP_UART2_RX 1 +#define IMX8QXP_UART2_RX_ADMA_FLEXCAN1_RX IMX8QXP_UART2_RX 2 +#define IMX8QXP_UART2_RX_LSIO_GPIO1_IO24 IMX8QXP_UART2_RX 4 +#define IMX8QXP_MIPI_DSI0_I2C0_SCL_MIPI_DSI0_I2C0_SCL IMX8QXP_MIPI_DSI0_I2C0_SCL 0 +#define IMX8QXP_MIPI_DSI0_I2C0_SCL_MIPI_DSI1_GPIO0_IO02 IMX8QXP_MIPI_DSI0_I2C0_SCL 1 +#define IMX8QXP_MIPI_DSI0_I2C0_SCL_LSIO_GPIO1_IO25 IMX8QXP_MIPI_DSI0_I2C0_SCL 4 +#define IMX8QXP_MIPI_DSI0_I2C0_SDA_MIPI_DSI0_I2C0_SDA IMX8QXP_MIPI_DSI0_I2C0_SDA 0 +#define IMX8QXP_MIPI_DSI0_I2C0_SDA_MIPI_DSI1_GPIO0_IO03 IMX8QXP_MIPI_DSI0_I2C0_SDA 1 +#define IMX8QXP_MIPI_DSI0_I2C0_SDA_LSIO_GPIO1_IO26 IMX8QXP_MIPI_DSI0_I2C0_SDA 4 +#define IMX8QXP_MIPI_DSI0_GPIO0_00_MIPI_DSI0_GPIO0_IO00 IMX8QXP_MIPI_DSI0_GPIO0_00 0 +#define IMX8QXP_MIPI_DSI0_GPIO0_00_ADMA_I2C1_SCL IMX8QXP_MIPI_DSI0_GPIO0_00 1 +#define IMX8QXP_MIPI_DSI0_GPIO0_00_MIPI_DSI0_PWM0_OUT IMX8QXP_MIPI_DSI0_GPIO0_00 2 +#define IMX8QXP_MIPI_DSI0_GPIO0_00_LSIO_GPIO1_IO27 IMX8QXP_MIPI_DSI0_GPIO0_00 4 +#define IMX8QXP_MIPI_DSI0_GPIO0_01_MIPI_DSI0_GPIO0_IO01 IMX8QXP_MIPI_DSI0_GPIO0_01 0 +#define IMX8QXP_MIPI_DSI0_GPIO0_01_ADMA_I2C1_SDA IMX8QXP_MIPI_DSI0_GPIO0_01 1 +#define IMX8QXP_MIPI_DSI0_GPIO0_01_LSIO_GPIO1_IO28 IMX8QXP_MIPI_DSI0_GPIO0_01 4 +#define IMX8QXP_MIPI_DSI1_I2C0_SCL_MIPI_DSI1_I2C0_SCL IMX8QXP_MIPI_DSI1_I2C0_SCL 0 +#define IMX8QXP_MIPI_DSI1_I2C0_SCL_MIPI_DSI0_GPIO0_IO02 IMX8QXP_MIPI_DSI1_I2C0_SCL 1 +#define IMX8QXP_MIPI_DSI1_I2C0_SCL_LSIO_GPIO1_IO29 IMX8QXP_MIPI_DSI1_I2C0_SCL 4 +#define IMX8QXP_MIPI_DSI1_I2C0_SDA_MIPI_DSI1_I2C0_SDA IMX8QXP_MIPI_DSI1_I2C0_SDA 0 +#define IMX8QXP_MIPI_DSI1_I2C0_SDA_MIPI_DSI0_GPIO0_IO03 IMX8QXP_MIPI_DSI1_I2C0_SDA 1 +#define IMX8QXP_MIPI_DSI1_I2C0_SDA_LSIO_GPIO1_IO30 IMX8QXP_MIPI_DSI1_I2C0_SDA 4 +#define IMX8QXP_MIPI_DSI1_GPIO0_00_MIPI_DSI1_GPIO0_IO00 IMX8QXP_MIPI_DSI1_GPIO0_00 0 +#define IMX8QXP_MIPI_DSI1_GPIO0_00_ADMA_I2C2_SCL IMX8QXP_MIPI_DSI1_GPIO0_00 1 +#define IMX8QXP_MIPI_DSI1_GPIO0_00_MIPI_DSI1_PWM0_OUT IMX8QXP_MIPI_DSI1_GPIO0_00 2 +#define IMX8QXP_MIPI_DSI1_GPIO0_00_LSIO_GPIO1_IO31 IMX8QXP_MIPI_DSI1_GPIO0_00 4 +#define IMX8QXP_MIPI_DSI1_GPIO0_01_MIPI_DSI1_GPIO0_IO01 IMX8QXP_MIPI_DSI1_GPIO0_01 0 +#define IMX8QXP_MIPI_DSI1_GPIO0_01_ADMA_I2C2_SDA IMX8QXP_MIPI_DSI1_GPIO0_01 1 +#define IMX8QXP_MIPI_DSI1_GPIO0_01_LSIO_GPIO2_IO00 IMX8QXP_MIPI_DSI1_GPIO0_01 4 +#define IMX8QXP_JTAG_TRST_B_SCU_JTAG_TRST_B IMX8QXP_JTAG_TRST_B 0 +#define IMX8QXP_JTAG_TRST_B_SCU_WDOG0_WDOG_OUT IMX8QXP_JTAG_TRST_B 1 +#define IMX8QXP_PMIC_I2C_SCL_SCU_PMIC_I2C_SCL IMX8QXP_PMIC_I2C_SCL 0 +#define IMX8QXP_PMIC_I2C_SCL_SCU_GPIO0_IOXX_PMIC_A35_ON IMX8QXP_PMIC_I2C_SCL 1 +#define IMX8QXP_PMIC_I2C_SCL_LSIO_GPIO2_IO01 IMX8QXP_PMIC_I2C_SCL 4 +#define IMX8QXP_PMIC_I2C_SDA_SCU_PMIC_I2C_SDA IMX8QXP_PMIC_I2C_SDA 0 +#define IMX8QXP_PMIC_I2C_SDA_SCU_GPIO0_IOXX_PMIC_GPU_ON IMX8QXP_PMIC_I2C_SDA 1 +#define IMX8QXP_PMIC_I2C_SDA_LSIO_GPIO2_IO02 IMX8QXP_PMIC_I2C_SDA 4 +#define IMX8QXP_PMIC_INT_B_SCU_DIMX8QXPMIC_INT_B IMX8QXP_PMIC_INT_B 0 +#define IMX8QXP_SCU_GPIO0_00_SCU_GPIO0_IO00 IMX8QXP_SCU_GPIO0_00 0 +#define IMX8QXP_SCU_GPIO0_00_SCU_UART0_RX IMX8QXP_SCU_GPIO0_00 1 +#define IMX8QXP_SCU_GPIO0_00_M40_UART0_RX IMX8QXP_SCU_GPIO0_00 2 +#define IMX8QXP_SCU_GPIO0_00_ADMA_UART3_RX IMX8QXP_SCU_GPIO0_00 3 +#define IMX8QXP_SCU_GPIO0_00_LSIO_GPIO2_IO03 IMX8QXP_SCU_GPIO0_00 4 +#define IMX8QXP_SCU_GPIO0_01_SCU_GPIO0_IO01 IMX8QXP_SCU_GPIO0_01 0 +#define IMX8QXP_SCU_GPIO0_01_SCU_UART0_TX IMX8QXP_SCU_GPIO0_01 1 +#define IMX8QXP_SCU_GPIO0_01_M40_UART0_TX IMX8QXP_SCU_GPIO0_01 2 +#define IMX8QXP_SCU_GPIO0_01_ADMA_UART3_TX IMX8QXP_SCU_GPIO0_01 3 +#define IMX8QXP_SCU_GPIO0_01_SCU_WDOG0_WDOG_OUT IMX8QXP_SCU_GPIO0_01 4 +#define IMX8QXP_SCU_PMIC_STANDBY_SCU_DIMX8QXPMIC_STANDBY IMX8QXP_SCU_PMIC_STANDBY 0 +#define IMX8QXP_SCU_BOOT_MODE0_SCU_DSC_BOOT_MODE0 IMX8QXP_SCU_BOOT_MODE0 0 +#define IMX8QXP_SCU_BOOT_MODE1_SCU_DSC_BOOT_MODE1 IMX8QXP_SCU_BOOT_MODE1 0 +#define IMX8QXP_SCU_BOOT_MODE2_SCU_DSC_BOOT_MODE2 IMX8QXP_SCU_BOOT_MODE2 0 +#define IMX8QXP_SCU_BOOT_MODE2_SCU_PMIC_I2C_SDA IMX8QXP_SCU_BOOT_MODE2 1 +#define IMX8QXP_SCU_BOOT_MODE3_SCU_DSC_BOOT_MODE3 IMX8QXP_SCU_BOOT_MODE3 0 +#define IMX8QXP_SCU_BOOT_MODE3_SCU_PMIC_I2C_SCL IMX8QXP_SCU_BOOT_MODE3 1 +#define IMX8QXP_SCU_BOOT_MODE3_SCU_DSC_RTC_CLOCK_OUTPUT_32K IMX8QXP_SCU_BOOT_MODE3 3 +#define IMX8QXP_CSI_D00_CI_PI_D02 IMX8QXP_CSI_D00 0 +#define IMX8QXP_CSI_D00_ADMA_SAI0_RXC IMX8QXP_CSI_D00 2 +#define IMX8QXP_CSI_D01_CI_PI_D03 IMX8QXP_CSI_D01 0 +#define IMX8QXP_CSI_D01_ADMA_SAI0_RXD IMX8QXP_CSI_D01 2 +#define IMX8QXP_CSI_D02_CI_PI_D04 IMX8QXP_CSI_D02 0 +#define IMX8QXP_CSI_D02_ADMA_SAI0_RXFS IMX8QXP_CSI_D02 2 +#define IMX8QXP_CSI_D03_CI_PI_D05 IMX8QXP_CSI_D03 0 +#define IMX8QXP_CSI_D03_ADMA_SAI2_RXC IMX8QXP_CSI_D03 2 +#define IMX8QXP_CSI_D04_CI_PI_D06 IMX8QXP_CSI_D04 0 +#define IMX8QXP_CSI_D04_ADMA_SAI2_RXD IMX8QXP_CSI_D04 2 +#define IMX8QXP_CSI_D05_CI_PI_D07 IMX8QXP_CSI_D05 0 +#define IMX8QXP_CSI_D05_ADMA_SAI2_RXFS IMX8QXP_CSI_D05 2 +#define IMX8QXP_CSI_D06_CI_PI_D08 IMX8QXP_CSI_D06 0 +#define IMX8QXP_CSI_D06_ADMA_SAI3_RXC IMX8QXP_CSI_D06 2 +#define IMX8QXP_CSI_D07_CI_PI_D09 IMX8QXP_CSI_D07 0 +#define IMX8QXP_CSI_D07_ADMA_SAI3_RXD IMX8QXP_CSI_D07 2 +#define IMX8QXP_CSI_HSYNC_CI_PI_HSYNC IMX8QXP_CSI_HSYNC 0 +#define IMX8QXP_CSI_HSYNC_CI_PI_D00 IMX8QXP_CSI_HSYNC 1 +#define IMX8QXP_CSI_HSYNC_ADMA_SAI3_RXFS IMX8QXP_CSI_HSYNC 2 +#define IMX8QXP_CSI_VSYNC_CI_PI_VSYNC IMX8QXP_CSI_VSYNC 0 +#define IMX8QXP_CSI_VSYNC_CI_PI_D01 IMX8QXP_CSI_VSYNC 1 +#define IMX8QXP_CSI_PCLK_CI_PI_PCLK IMX8QXP_CSI_PCLK 0 +#define IMX8QXP_CSI_PCLK_MIPI_CSI0_I2C0_SCL IMX8QXP_CSI_PCLK 1 +#define IMX8QXP_CSI_PCLK_ADMA_SPI1_SCK IMX8QXP_CSI_PCLK 3 +#define IMX8QXP_CSI_PCLK_LSIO_GPIO3_IO00 IMX8QXP_CSI_PCLK 4 +#define IMX8QXP_CSI_MCLK_CI_PI_MCLK IMX8QXP_CSI_MCLK 0 +#define IMX8QXP_CSI_MCLK_MIPI_CSI0_I2C0_SDA IMX8QXP_CSI_MCLK 1 +#define IMX8QXP_CSI_MCLK_ADMA_SPI1_SDO IMX8QXP_CSI_MCLK 3 +#define IMX8QXP_CSI_MCLK_LSIO_GPIO3_IO01 IMX8QXP_CSI_MCLK 4 +#define IMX8QXP_CSI_EN_CI_PI_EN IMX8QXP_CSI_EN 0 +#define IMX8QXP_CSI_EN_CI_PI_I2C_SCL IMX8QXP_CSI_EN 1 +#define IMX8QXP_CSI_EN_ADMA_I2C3_SCL IMX8QXP_CSI_EN 2 +#define IMX8QXP_CSI_EN_ADMA_SPI1_SDI IMX8QXP_CSI_EN 3 +#define IMX8QXP_CSI_EN_LSIO_GPIO3_IO02 IMX8QXP_CSI_EN 4 +#define IMX8QXP_CSI_RESET_CI_PI_RESET IMX8QXP_CSI_RESET 0 +#define IMX8QXP_CSI_RESET_CI_PI_I2C_SDA IMX8QXP_CSI_RESET 1 +#define IMX8QXP_CSI_RESET_ADMA_I2C3_SDA IMX8QXP_CSI_RESET 2 +#define IMX8QXP_CSI_RESET_ADMA_SPI1_CS0 IMX8QXP_CSI_RESET 3 +#define IMX8QXP_CSI_RESET_LSIO_GPIO3_IO03 IMX8QXP_CSI_RESET 4 +#define IMX8QXP_MIPI_CSI0_MCLK_OUT_MIPI_CSI0_ACM_MCLK_OUT IMX8QXP_MIPI_CSI0_MCLK_OUT 0 +#define IMX8QXP_MIPI_CSI0_MCLK_OUT_LSIO_GPIO3_IO04 IMX8QXP_MIPI_CSI0_MCLK_OUT 4 +#define IMX8QXP_MIPI_CSI0_I2C0_SCL_MIPI_CSI0_I2C0_SCL IMX8QXP_MIPI_CSI0_I2C0_SCL 0 +#define IMX8QXP_MIPI_CSI0_I2C0_SCL_MIPI_CSI0_GPIO0_IO02 IMX8QXP_MIPI_CSI0_I2C0_SCL 1 +#define IMX8QXP_MIPI_CSI0_I2C0_SCL_LSIO_GPIO3_IO05 IMX8QXP_MIPI_CSI0_I2C0_SCL 4 +#define IMX8QXP_MIPI_CSI0_I2C0_SDA_MIPI_CSI0_I2C0_SDA IMX8QXP_MIPI_CSI0_I2C0_SDA 0 +#define IMX8QXP_MIPI_CSI0_I2C0_SDA_MIPI_CSI0_GPIO0_IO03 IMX8QXP_MIPI_CSI0_I2C0_SDA 1 +#define IMX8QXP_MIPI_CSI0_I2C0_SDA_LSIO_GPIO3_IO06 IMX8QXP_MIPI_CSI0_I2C0_SDA 4 +#define IMX8QXP_MIPI_CSI0_GPIO0_01_MIPI_CSI0_GPIO0_IO01 IMX8QXP_MIPI_CSI0_GPIO0_01 0 +#define IMX8QXP_MIPI_CSI0_GPIO0_01_ADMA_I2C0_SDA IMX8QXP_MIPI_CSI0_GPIO0_01 1 +#define IMX8QXP_MIPI_CSI0_GPIO0_01_LSIO_GPIO3_IO07 IMX8QXP_MIPI_CSI0_GPIO0_01 4 +#define IMX8QXP_MIPI_CSI0_GPIO0_00_MIPI_CSI0_GPIO0_IO00 IMX8QXP_MIPI_CSI0_GPIO0_00 0 +#define IMX8QXP_MIPI_CSI0_GPIO0_00_ADMA_I2C0_SCL IMX8QXP_MIPI_CSI0_GPIO0_00 1 +#define IMX8QXP_MIPI_CSI0_GPIO0_00_LSIO_GPIO3_IO08 IMX8QXP_MIPI_CSI0_GPIO0_00 4 +#define IMX8QXP_QSPI0A_DATA0_LSIO_QSPI0A_DATA0 IMX8QXP_QSPI0A_DATA0 0 +#define IMX8QXP_QSPI0A_DATA0_LSIO_GPIO3_IO09 IMX8QXP_QSPI0A_DATA0 4 +#define IMX8QXP_QSPI0A_DATA1_LSIO_QSPI0A_DATA1 IMX8QXP_QSPI0A_DATA1 0 +#define IMX8QXP_QSPI0A_DATA1_LSIO_GPIO3_IO10 IMX8QXP_QSPI0A_DATA1 4 +#define IMX8QXP_QSPI0A_DATA2_LSIO_QSPI0A_DATA2 IMX8QXP_QSPI0A_DATA2 0 +#define IMX8QXP_QSPI0A_DATA2_LSIO_GPIO3_IO11 IMX8QXP_QSPI0A_DATA2 4 +#define IMX8QXP_QSPI0A_DATA3_LSIO_QSPI0A_DATA3 IMX8QXP_QSPI0A_DATA3 0 +#define IMX8QXP_QSPI0A_DATA3_LSIO_GPIO3_IO12 IMX8QXP_QSPI0A_DATA3 4 +#define IMX8QXP_QSPI0A_DQS_LSIO_QSPI0A_DQS IMX8QXP_QSPI0A_DQS 0 +#define IMX8QXP_QSPI0A_DQS_LSIO_GPIO3_IO13 IMX8QXP_QSPI0A_DQS 4 +#define IMX8QXP_QSPI0A_SS0_B_LSIO_QSPI0A_SS0_B IMX8QXP_QSPI0A_SS0_B 0 +#define IMX8QXP_QSPI0A_SS0_B_LSIO_GPIO3_IO14 IMX8QXP_QSPI0A_SS0_B 4 +#define IMX8QXP_QSPI0A_SS1_B_LSIO_QSPI0A_SS1_B IMX8QXP_QSPI0A_SS1_B 0 +#define IMX8QXP_QSPI0A_SS1_B_LSIO_GPIO3_IO15 IMX8QXP_QSPI0A_SS1_B 4 +#define IMX8QXP_QSPI0A_SCLK_LSIO_QSPI0A_SCLK IMX8QXP_QSPI0A_SCLK 0 +#define IMX8QXP_QSPI0A_SCLK_LSIO_GPIO3_IO16 IMX8QXP_QSPI0A_SCLK 4 +#define IMX8QXP_QSPI0B_SCLK_LSIO_QSPI0B_SCLK IMX8QXP_QSPI0B_SCLK 0 +#define IMX8QXP_QSPI0B_SCLK_LSIO_QSPI1A_SCLK IMX8QXP_QSPI0B_SCLK 1 +#define IMX8QXP_QSPI0B_SCLK_LSIO_KPP0_COL0 IMX8QXP_QSPI0B_SCLK 2 +#define IMX8QXP_QSPI0B_SCLK_LSIO_GPIO3_IO17 IMX8QXP_QSPI0B_SCLK 4 +#define IMX8QXP_QSPI0B_DATA0_LSIO_QSPI0B_DATA0 IMX8QXP_QSPI0B_DATA0 0 +#define IMX8QXP_QSPI0B_DATA0_LSIO_QSPI1A_DATA0 IMX8QXP_QSPI0B_DATA0 1 +#define IMX8QXP_QSPI0B_DATA0_LSIO_KPP0_COL1 IMX8QXP_QSPI0B_DATA0 2 +#define IMX8QXP_QSPI0B_DATA0_LSIO_GPIO3_IO18 IMX8QXP_QSPI0B_DATA0 4 +#define IMX8QXP_QSPI0B_DATA1_LSIO_QSPI0B_DATA1 IMX8QXP_QSPI0B_DATA1 0 +#define IMX8QXP_QSPI0B_DATA1_LSIO_QSPI1A_DATA1 IMX8QXP_QSPI0B_DATA1 1 +#define IMX8QXP_QSPI0B_DATA1_LSIO_KPP0_COL2 IMX8QXP_QSPI0B_DATA1 2 +#define IMX8QXP_QSPI0B_DATA1_LSIO_GPIO3_IO19 IMX8QXP_QSPI0B_DATA1 4 +#define IMX8QXP_QSPI0B_DATA2_LSIO_QSPI0B_DATA2 IMX8QXP_QSPI0B_DATA2 0 +#define IMX8QXP_QSPI0B_DATA2_LSIO_QSPI1A_DATA2 IMX8QXP_QSPI0B_DATA2 1 +#define IMX8QXP_QSPI0B_DATA2_LSIO_KPP0_COL3 IMX8QXP_QSPI0B_DATA2 2 +#define IMX8QXP_QSPI0B_DATA2_LSIO_GPIO3_IO20 IMX8QXP_QSPI0B_DATA2 4 +#define IMX8QXP_QSPI0B_DATA3_LSIO_QSPI0B_DATA3 IMX8QXP_QSPI0B_DATA3 0 +#define IMX8QXP_QSPI0B_DATA3_LSIO_QSPI1A_DATA3 IMX8QXP_QSPI0B_DATA3 1 +#define IMX8QXP_QSPI0B_DATA3_LSIO_KPP0_ROW0 IMX8QXP_QSPI0B_DATA3 2 +#define IMX8QXP_QSPI0B_DATA3_LSIO_GPIO3_IO21 IMX8QXP_QSPI0B_DATA3 4 +#define IMX8QXP_QSPI0B_DQS_LSIO_QSPI0B_DQS IMX8QXP_QSPI0B_DQS 0 +#define IMX8QXP_QSPI0B_DQS_LSIO_QSPI1A_DQS IMX8QXP_QSPI0B_DQS 1 +#define IMX8QXP_QSPI0B_DQS_LSIO_KPP0_ROW1 IMX8QXP_QSPI0B_DQS 2 +#define IMX8QXP_QSPI0B_DQS_LSIO_GPIO3_IO22 IMX8QXP_QSPI0B_DQS 4 +#define IMX8QXP_QSPI0B_SS0_B_LSIO_QSPI0B_SS0_B IMX8QXP_QSPI0B_SS0_B 0 +#define IMX8QXP_QSPI0B_SS0_B_LSIO_QSPI1A_SS0_B IMX8QXP_QSPI0B_SS0_B 1 +#define IMX8QXP_QSPI0B_SS0_B_LSIO_KPP0_ROW2 IMX8QXP_QSPI0B_SS0_B 2 +#define IMX8QXP_QSPI0B_SS0_B_LSIO_GPIO3_IO23 IMX8QXP_QSPI0B_SS0_B 4 +#define IMX8QXP_QSPI0B_SS1_B_LSIO_QSPI0B_SS1_B IMX8QXP_QSPI0B_SS1_B 0 +#define IMX8QXP_QSPI0B_SS1_B_LSIO_QSPI1A_SS1_B IMX8QXP_QSPI0B_SS1_B 1 +#define IMX8QXP_QSPI0B_SS1_B_LSIO_KPP0_ROW3 IMX8QXP_QSPI0B_SS1_B 2 +#define IMX8QXP_QSPI0B_SS1_B_LSIO_GPIO3_IO24 IMX8QXP_QSPI0B_SS1_B 4 + +#endif /* _IMX8QXP_PADS_H */ diff --git a/include/dt-bindings/pinctrl/r7s9210-pinctrl.h b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h new file mode 100644 index 000000000000..2d0c23e5d3a7 --- /dev/null +++ b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Defines macros and constants for Renesas RZ/A2 pin controller pin + * muxing functions. + */ +#ifndef __DT_BINDINGS_PINCTRL_RENESAS_RZA2_H +#define __DT_BINDINGS_PINCTRL_RENESAS_RZA2_H + +#define RZA2_PINS_PER_PORT 8 + +/* Port names as labeled in the Hardware Manual */ +#define PORT0 0 +#define PORT1 1 +#define PORT2 2 +#define PORT3 3 +#define PORT4 4 +#define PORT5 5 +#define PORT6 6 +#define PORT7 7 +#define PORT8 8 +#define PORT9 9 +#define PORTA 10 +#define PORTB 11 +#define PORTC 12 +#define PORTD 13 +#define PORTE 14 +#define PORTF 15 +#define PORTG 16 +#define PORTH 17 +/* No I */ +#define PORTJ 18 +#define PORTK 19 +#define PORTL 20 +#define PORTM 21 /* Pins PM_0/1 are labeled JP_0/1 in HW manual */ + +/* + * Create the pin index from its bank and position numbers and store in + * the upper 16 bits the alternate function identifier + */ +#define RZA2_PINMUX(b, p, f) ((b) * RZA2_PINS_PER_PORT + (p) | (f << 16)) + +/* + * Convert a port and pin label to its global pin index + */ + #define RZA2_PIN(port, pin) ((port) * RZA2_PINS_PER_PORT + (pin)) + +#endif /* __DT_BINDINGS_PINCTRL_RENESAS_RZA2_H */ diff --git a/include/dt-bindings/power/imx8mq-power.h b/include/dt-bindings/power/imx8mq-power.h new file mode 100644 index 000000000000..8a513bd9166e --- /dev/null +++ b/include/dt-bindings/power/imx8mq-power.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (C) 2018 Pengutronix, Lucas Stach <kernel@pengutronix.de> + */ + +#ifndef __DT_BINDINGS_IMX8MQ_POWER_H__ +#define __DT_BINDINGS_IMX8MQ_POWER_H__ + +#define IMX8M_POWER_DOMAIN_MIPI 0 +#define IMX8M_POWER_DOMAIN_PCIE1 1 +#define IMX8M_POWER_DOMAIN_USB_OTG1 2 +#define IMX8M_POWER_DOMAIN_USB_OTG2 3 +#define IMX8M_POWER_DOMAIN_DDR1 4 +#define IMX8M_POWER_DOMAIN_GPU 5 +#define IMX8M_POWER_DOMAIN_VPU 6 +#define IMX8M_POWER_DOMAIN_DISP 7 +#define IMX8M_POWER_DOMAIN_MIPI_CSI1 8 +#define IMX8M_POWER_DOMAIN_MIPI_CSI2 9 +#define IMX8M_POWER_DOMAIN_PCIE2 10 + +#endif diff --git a/include/dt-bindings/power/r8a77970-sysc.h b/include/dt-bindings/power/r8a77970-sysc.h index bf54779d1625..85cc5f23cf9f 100644 --- a/include/dt-bindings/power/r8a77970-sysc.h +++ b/include/dt-bindings/power/r8a77970-sysc.h @@ -16,13 +16,12 @@ #define R8A77970_PD_CA53_CPU0 5 #define R8A77970_PD_CA53_CPU1 6 -#define R8A77970_PD_CR7 13 #define R8A77970_PD_CA53_SCU 21 #define R8A77970_PD_A2IR0 23 -#define R8A77970_PD_A3IR 24 +#define R8A77970_PD_A3IR 24 #define R8A77970_PD_A2IR1 27 -#define R8A77970_PD_A2IR2 28 -#define R8A77970_PD_A2IR3 29 +#define R8A77970_PD_A2DP 28 +#define R8A77970_PD_A2CN 29 #define R8A77970_PD_A2SC0 30 #define R8A77970_PD_A2SC1 31 diff --git a/include/dt-bindings/power/r8a77980-sysc.h b/include/dt-bindings/power/r8a77980-sysc.h index 2c90c1237725..e12c8587b87e 100644 --- a/include/dt-bindings/power/r8a77980-sysc.h +++ b/include/dt-bindings/power/r8a77980-sysc.h @@ -15,14 +15,14 @@ #define R8A77980_PD_A2SC2 0 #define R8A77980_PD_A2SC3 1 #define R8A77980_PD_A2SC4 2 -#define R8A77980_PD_A2PD0 3 -#define R8A77980_PD_A2PD1 4 +#define R8A77980_PD_A2DP0 3 +#define R8A77980_PD_A2DP1 4 #define R8A77980_PD_CA53_CPU0 5 #define R8A77980_PD_CA53_CPU1 6 #define R8A77980_PD_CA53_CPU2 7 #define R8A77980_PD_CA53_CPU3 8 #define R8A77980_PD_A2CN 10 -#define R8A77980_PD_A3VIP 11 +#define R8A77980_PD_A3VIP0 11 #define R8A77980_PD_A2IR5 12 #define R8A77980_PD_CR7 13 #define R8A77980_PD_A2IR4 15 diff --git a/include/dt-bindings/power/raspberrypi-power.h b/include/dt-bindings/power/raspberrypi-power.h index b3ff8e09a78f..3575f9f4b0bd 100644 --- a/include/dt-bindings/power/raspberrypi-power.h +++ b/include/dt-bindings/power/raspberrypi-power.h @@ -1,9 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright © 2015 Broadcom - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef _DT_BINDINGS_ARM_BCM2835_RPI_POWER_H diff --git a/include/dt-bindings/power/rk3066-power.h b/include/dt-bindings/power/rk3066-power.h new file mode 100644 index 000000000000..acf9f310ac53 --- /dev/null +++ b/include/dt-bindings/power/rk3066-power.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3066_POWER_H__ +#define __DT_BINDINGS_POWER_RK3066_POWER_H__ + +/* VD_CORE */ +#define RK3066_PD_A9_0 0 +#define RK3066_PD_A9_1 1 +#define RK3066_PD_DBG 4 +#define RK3066_PD_SCU 5 + +/* VD_LOGIC */ +#define RK3066_PD_VIDEO 6 +#define RK3066_PD_VIO 7 +#define RK3066_PD_GPU 8 +#define RK3066_PD_PERI 9 +#define RK3066_PD_CPU 10 +#define RK3066_PD_ALIVE 11 + +/* VD_PMU */ +#define RK3066_PD_RTC 12 + +#endif diff --git a/include/dt-bindings/power/rk3188-power.h b/include/dt-bindings/power/rk3188-power.h new file mode 100644 index 000000000000..93d23dfba33f --- /dev/null +++ b/include/dt-bindings/power/rk3188-power.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_POWER_RK3188_POWER_H__ +#define __DT_BINDINGS_POWER_RK3188_POWER_H__ + +/* VD_CORE */ +#define RK3188_PD_A9_0 0 +#define RK3188_PD_A9_1 1 +#define RK3188_PD_A9_2 2 +#define RK3188_PD_A9_3 3 +#define RK3188_PD_DBG 4 +#define RK3188_PD_SCU 5 + +/* VD_LOGIC */ +#define RK3188_PD_VIDEO 6 +#define RK3188_PD_VIO 7 +#define RK3188_PD_GPU 8 +#define RK3188_PD_PERI 9 +#define RK3188_PD_CPU 10 +#define RK3188_PD_ALIVE 11 + +/* VD_PMU */ +#define RK3188_PD_RTC 12 + +#endif diff --git a/include/dt-bindings/regulator/active-semi,8945a-regulator.h b/include/dt-bindings/regulator/active-semi,8945a-regulator.h new file mode 100644 index 000000000000..9bdba5e3141a --- /dev/null +++ b/include/dt-bindings/regulator/active-semi,8945a-regulator.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 Microchip Technology, Inc. All rights reserved. + * + * Device Tree binding constants for the ACT8945A PMIC regulators + */ + +#ifndef _DT_BINDINGS_REGULATOR_ACT8945A_H +#define _DT_BINDINGS_REGULATOR_ACT8945A_H + +/* + * These constants should be used to specify regulator modes in device tree for + * ACT8945A regulators as follows: + * ACT8945A_REGULATOR_MODE_FIXED: It is specific to DCDC regulators and it + * specifies the usage of fixed-frequency + * PWM. + * + * ACT8945A_REGULATOR_MODE_NORMAL: It is specific to LDO regulators and it + * specifies the usage of normal mode. + * + * ACT8945A_REGULATOR_MODE_LOWPOWER: For DCDC and LDO regulators; it specify + * the usage of proprietary power-saving + * mode. + */ + +#define ACT8945A_REGULATOR_MODE_FIXED 1 +#define ACT8945A_REGULATOR_MODE_NORMAL 2 +#define ACT8945A_REGULATOR_MODE_LOWPOWER 3 + +#endif diff --git a/include/dt-bindings/reset/sun8i-de2.h b/include/dt-bindings/reset/sun8i-de2.h index 9526017432f0..1c36a6ac86d6 100644 --- a/include/dt-bindings/reset/sun8i-de2.h +++ b/include/dt-bindings/reset/sun8i-de2.h @@ -10,5 +10,6 @@ #define RST_MIXER0 0 #define RST_MIXER1 1 #define RST_WB 2 +#define RST_ROT 3 #endif /* _DT_BINDINGS_RESET_SUN8I_DE2_H_ */ diff --git a/include/dt-bindings/reset/suniv-ccu-f1c100s.h b/include/dt-bindings/reset/suniv-ccu-f1c100s.h new file mode 100644 index 000000000000..6a4b4385fe5a --- /dev/null +++ b/include/dt-bindings/reset/suniv-ccu-f1c100s.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) + * + * Copyright (C) 2018 Icenowy Zheng <icenowy@aosc.xyz> + * + */ + +#ifndef _DT_BINDINGS_RST_SUNIV_F1C100S_H_ +#define _DT_BINDINGS_RST_SUNIV_F1C100S_H_ + +#define RST_USB_PHY0 0 +#define RST_BUS_DMA 1 +#define RST_BUS_MMC0 2 +#define RST_BUS_MMC1 3 +#define RST_BUS_DRAM 4 +#define RST_BUS_SPI0 5 +#define RST_BUS_SPI1 6 +#define RST_BUS_OTG 7 +#define RST_BUS_VE 8 +#define RST_BUS_LCD 9 +#define RST_BUS_DEINTERLACE 10 +#define RST_BUS_CSI 11 +#define RST_BUS_TVD 12 +#define RST_BUS_TVE 13 +#define RST_BUS_DE_BE 14 +#define RST_BUS_DE_FE 15 +#define RST_BUS_CODEC 16 +#define RST_BUS_SPDIF 17 +#define RST_BUS_IR 18 +#define RST_BUS_RSB 19 +#define RST_BUS_I2S0 20 +#define RST_BUS_I2C0 21 +#define RST_BUS_I2C1 22 +#define RST_BUS_I2C2 23 +#define RST_BUS_UART0 24 +#define RST_BUS_UART1 25 +#define RST_BUS_UART2 26 + +#endif /* _DT_BINDINGS_RST_SUNIV_F1C100S_H_ */ diff --git a/include/dt-bindings/sound/qcom,q6afe.h b/include/dt-bindings/sound/qcom,q6afe.h index e2d3892240b8..1df06f8ad5c3 100644 --- a/include/dt-bindings/sound/qcom,q6afe.h +++ b/include/dt-bindings/sound/qcom,q6afe.h @@ -106,6 +106,7 @@ #define QUINARY_TDM_TX_6 101 #define QUINARY_TDM_RX_7 102 #define QUINARY_TDM_TX_7 103 +#define DISPLAY_PORT_RX 104 #endif /* __DT_BINDINGS_Q6_AFE_H__ */ diff --git a/include/dt-bindings/thermal/tegra194-bpmp-thermal.h b/include/dt-bindings/thermal/tegra194-bpmp-thermal.h new file mode 100644 index 000000000000..aa7fb08135ca --- /dev/null +++ b/include/dt-bindings/thermal/tegra194-bpmp-thermal.h @@ -0,0 +1,15 @@ +/* + * This header provides constants for binding nvidia,tegra194-bpmp-thermal. + */ + +#ifndef _DT_BINDINGS_THERMAL_TEGRA194_BPMP_THERMAL_H +#define _DT_BINDINGS_THERMAL_TEGRA194_BPMP_THERMAL_H + +#define TEGRA194_BPMP_THERMAL_ZONE_CPU 2 +#define TEGRA194_BPMP_THERMAL_ZONE_GPU 3 +#define TEGRA194_BPMP_THERMAL_ZONE_AUX 4 +#define TEGRA194_BPMP_THERMAL_ZONE_PLLX 5 +#define TEGRA194_BPMP_THERMAL_ZONE_AO 6 +#define TEGRA194_BPMP_THERMAL_ZONE_TJ_MAX 7 + +#endif diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 6502feb9524b..33771352dcd6 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -21,7 +21,6 @@ #include <linux/clocksource.h> #include <linux/hrtimer.h> -#include <linux/workqueue.h> struct arch_timer_context { /* Registers: control register, timer value */ @@ -52,9 +51,6 @@ struct arch_timer_cpu { /* Background timer used when the guest is not running */ struct hrtimer bg_timer; - /* Work queued with the above timer expires */ - struct work_struct expired; - /* Physical timer emulation */ struct hrtimer phys_timer; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index ed80f147bd50..87715f20b69a 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -101,7 +101,7 @@ static inline bool has_acpi_companion(struct device *dev) static inline void acpi_preset_companion(struct device *dev, struct acpi_device *parent, u64 addr) { - ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, NULL)); + ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false)); } static inline const char *acpi_dev_name(struct acpi_device *adev) @@ -340,7 +340,14 @@ struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); bool acpi_isa_irq_available(int irq); +#ifdef CONFIG_PCI void acpi_penalize_sci_irq(int irq, int trigger, int polarity); +#else +static inline void acpi_penalize_sci_irq(int irq, int trigger, + int polarity) +{ +} +#endif void acpi_pci_irq_disable (struct pci_dev *dev); extern int ec_read(u8 addr, u8 *val); @@ -1054,6 +1061,17 @@ static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) } #endif +#if defined(CONFIG_ACPI) && IS_ENABLED(CONFIG_I2C) +bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, + struct acpi_resource_i2c_serialbus **i2c); +#else +static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, + struct acpi_resource_i2c_serialbus **i2c) +{ + return false; +} +#endif + /* Device properties */ #ifdef CONFIG_ACPI @@ -1313,4 +1331,14 @@ static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level) } #endif +#ifdef CONFIG_ACPI +extern int acpi_platform_notify(struct device *dev, enum kobject_action action); +#else +static inline int +acpi_platform_notify(struct device *dev, enum kobject_action action) +{ + return 0; +} +#endif + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/adxl.h b/include/linux/adxl.h index 2d29f55923e3..2a629acb4c3f 100644 --- a/include/linux/adxl.h +++ b/include/linux/adxl.h @@ -7,12 +7,7 @@ #ifndef _LINUX_ADXL_H #define _LINUX_ADXL_H -#ifdef CONFIG_ACPI_ADXL const char * const *adxl_get_component_names(void); int adxl_decode(u64 addr, u64 component_values[]); -#else -static inline const char * const *adxl_get_component_names(void) { return NULL; } -static inline int adxl_decode(u64 addr, u64 component_values[]) { return -EOPNOTSUPP; } -#endif #endif /* _LINUX_ADXL_H */ diff --git a/include/linux/alcor_pci.h b/include/linux/alcor_pci.h new file mode 100644 index 000000000000..da973e8a2da8 --- /dev/null +++ b/include/linux/alcor_pci.h @@ -0,0 +1,286 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2018 Oleksij Rempel <linux@rempel-privat.de> + * + * Driver for Alcor Micro AU6601 and AU6621 controllers + */ + +#ifndef __ALCOR_PCI_H +#define __ALCOR_PCI_H + +#define ALCOR_SD_CARD 0 +#define ALCOR_MS_CARD 1 + +#define DRV_NAME_ALCOR_PCI_SDMMC "alcor_sdmmc" +#define DRV_NAME_ALCOR_PCI_MS "alcor_ms" + +#define PCI_ID_ALCOR_MICRO 0x1AEA +#define PCI_ID_AU6601 0x6601 +#define PCI_ID_AU6621 0x6621 + +#define MHZ_TO_HZ(freq) ((freq) * 1000 * 1000) + +#define AU6601_BASE_CLOCK 31000000 +#define AU6601_MIN_CLOCK 150000 +#define AU6601_MAX_CLOCK 208000000 +#define AU6601_MAX_DMA_SEGMENTS 1 +#define AU6601_MAX_PIO_SEGMENTS 1 +#define AU6601_MAX_DMA_BLOCK_SIZE 0x1000 +#define AU6601_MAX_PIO_BLOCK_SIZE 0x200 +#define AU6601_MAX_DMA_BLOCKS 1 +#define AU6601_DMA_LOCAL_SEGMENTS 1 + +/* registers spotter by reverse engineering but still + * with unknown functionality: + * 0x10 - ADMA phy address. AU6621 only? + * 0x51 - LED ctrl? + * 0x52 - unknown + * 0x61 - LED related? Always toggled BIT0 + * 0x63 - Same as 0x61? + * 0x77 - unknown + */ + +/* SDMA phy address. Higher then 0x0800.0000? + * The au6601 and au6621 have different DMA engines with different issues. One + * For example au6621 engine is triggered by addr change. No other interaction + * is needed. This means, if we get two buffers with same address, then engine + * will stall. + */ +#define AU6601_REG_SDMA_ADDR 0x00 +#define AU6601_SDMA_MASK 0xffffffff + +#define AU6601_DMA_BOUNDARY 0x05 +#define AU6621_DMA_PAGE_CNT 0x05 +/* PIO */ +#define AU6601_REG_BUFFER 0x08 +/* ADMA ctrl? AU6621 only. */ +#define AU6621_DMA_CTRL 0x0c +#define AU6621_DMA_ENABLE BIT(0) +/* CMD index */ +#define AU6601_REG_CMD_OPCODE 0x23 +/* CMD parametr */ +#define AU6601_REG_CMD_ARG 0x24 +/* CMD response 4x4 Bytes */ +#define AU6601_REG_CMD_RSP0 0x30 +#define AU6601_REG_CMD_RSP1 0x34 +#define AU6601_REG_CMD_RSP2 0x38 +#define AU6601_REG_CMD_RSP3 0x3C +/* default timeout set to 125: 125 * 40ms = 5 sec + * how exactly it is calculated? + */ +#define AU6601_TIME_OUT_CTRL 0x69 +/* Block size for SDMA or PIO */ +#define AU6601_REG_BLOCK_SIZE 0x6c +/* Some power related reg, used together with AU6601_OUTPUT_ENABLE */ +#define AU6601_POWER_CONTROL 0x70 + +/* PLL ctrl */ +#define AU6601_CLK_SELECT 0x72 +#define AU6601_CLK_OVER_CLK 0x80 +#define AU6601_CLK_384_MHZ 0x30 +#define AU6601_CLK_125_MHZ 0x20 +#define AU6601_CLK_48_MHZ 0x10 +#define AU6601_CLK_EXT_PLL 0x04 +#define AU6601_CLK_X2_MODE 0x02 +#define AU6601_CLK_ENABLE 0x01 +#define AU6601_CLK_31_25_MHZ 0x00 + +#define AU6601_CLK_DIVIDER 0x73 + +#define AU6601_INTERFACE_MODE_CTRL 0x74 +#define AU6601_DLINK_MODE 0x80 +#define AU6601_INTERRUPT_DELAY_TIME 0x40 +#define AU6601_SIGNAL_REQ_CTRL 0x30 +#define AU6601_MS_CARD_WP BIT(3) +#define AU6601_SD_CARD_WP BIT(0) + +/* same register values are used for: + * - AU6601_OUTPUT_ENABLE + * - AU6601_POWER_CONTROL + */ +#define AU6601_ACTIVE_CTRL 0x75 +#define AU6601_XD_CARD BIT(4) +/* AU6601_MS_CARD_ACTIVE - will cativate MS card section? */ +#define AU6601_MS_CARD BIT(3) +#define AU6601_SD_CARD BIT(0) + +/* card slot state. It should automatically detect type of + * the card + */ +#define AU6601_DETECT_STATUS 0x76 +#define AU6601_DETECT_EN BIT(7) +#define AU6601_MS_DETECTED BIT(3) +#define AU6601_SD_DETECTED BIT(0) +#define AU6601_DETECT_STATUS_M 0xf + +#define AU6601_REG_SW_RESET 0x79 +#define AU6601_BUF_CTRL_RESET BIT(7) +#define AU6601_RESET_DATA BIT(3) +#define AU6601_RESET_CMD BIT(0) + +#define AU6601_OUTPUT_ENABLE 0x7a + +#define AU6601_PAD_DRIVE0 0x7b +#define AU6601_PAD_DRIVE1 0x7c +#define AU6601_PAD_DRIVE2 0x7d +/* read EEPROM? */ +#define AU6601_FUNCTION 0x7f + +#define AU6601_CMD_XFER_CTRL 0x81 +#define AU6601_CMD_17_BYTE_CRC 0xc0 +#define AU6601_CMD_6_BYTE_WO_CRC 0x80 +#define AU6601_CMD_6_BYTE_CRC 0x40 +#define AU6601_CMD_START_XFER 0x20 +#define AU6601_CMD_STOP_WAIT_RDY 0x10 +#define AU6601_CMD_NO_RESP 0x00 + +#define AU6601_REG_BUS_CTRL 0x82 +#define AU6601_BUS_WIDTH_4BIT 0x20 +#define AU6601_BUS_WIDTH_8BIT 0x10 +#define AU6601_BUS_WIDTH_1BIT 0x00 + +#define AU6601_DATA_XFER_CTRL 0x83 +#define AU6601_DATA_WRITE BIT(7) +#define AU6601_DATA_DMA_MODE BIT(6) +#define AU6601_DATA_START_XFER BIT(0) + +#define AU6601_DATA_PIN_STATE 0x84 +#define AU6601_BUS_STAT_CMD BIT(15) +/* BIT(4) - BIT(7) are permanently 1. + * May be reserved or not attached DAT4-DAT7 + */ +#define AU6601_BUS_STAT_DAT3 BIT(3) +#define AU6601_BUS_STAT_DAT2 BIT(2) +#define AU6601_BUS_STAT_DAT1 BIT(1) +#define AU6601_BUS_STAT_DAT0 BIT(0) +#define AU6601_BUS_STAT_DAT_MASK 0xf + +#define AU6601_OPT 0x85 +#define AU6601_OPT_CMD_LINE_LEVEL 0x80 +#define AU6601_OPT_NCRC_16_CLK BIT(4) +#define AU6601_OPT_CMD_NWT BIT(3) +#define AU6601_OPT_STOP_CLK BIT(2) +#define AU6601_OPT_DDR_MODE BIT(1) +#define AU6601_OPT_SD_18V BIT(0) + +#define AU6601_CLK_DELAY 0x86 +#define AU6601_CLK_DATA_POSITIVE_EDGE 0x80 +#define AU6601_CLK_CMD_POSITIVE_EDGE 0x40 +#define AU6601_CLK_POSITIVE_EDGE_ALL (AU6601_CLK_CMD_POSITIVE_EDGE \ + | AU6601_CLK_DATA_POSITIVE_EDGE) + + +#define AU6601_REG_INT_STATUS 0x90 +#define AU6601_REG_INT_ENABLE 0x94 +#define AU6601_INT_DATA_END_BIT_ERR BIT(22) +#define AU6601_INT_DATA_CRC_ERR BIT(21) +#define AU6601_INT_DATA_TIMEOUT_ERR BIT(20) +#define AU6601_INT_CMD_INDEX_ERR BIT(19) +#define AU6601_INT_CMD_END_BIT_ERR BIT(18) +#define AU6601_INT_CMD_CRC_ERR BIT(17) +#define AU6601_INT_CMD_TIMEOUT_ERR BIT(16) +#define AU6601_INT_ERROR BIT(15) +#define AU6601_INT_OVER_CURRENT_ERR BIT(8) +#define AU6601_INT_CARD_INSERT BIT(7) +#define AU6601_INT_CARD_REMOVE BIT(6) +#define AU6601_INT_READ_BUF_RDY BIT(5) +#define AU6601_INT_WRITE_BUF_RDY BIT(4) +#define AU6601_INT_DMA_END BIT(3) +#define AU6601_INT_DATA_END BIT(1) +#define AU6601_INT_CMD_END BIT(0) + +#define AU6601_INT_NORMAL_MASK 0x00007FFF +#define AU6601_INT_ERROR_MASK 0xFFFF8000 + +#define AU6601_INT_CMD_MASK (AU6601_INT_CMD_END | \ + AU6601_INT_CMD_TIMEOUT_ERR | AU6601_INT_CMD_CRC_ERR | \ + AU6601_INT_CMD_END_BIT_ERR | AU6601_INT_CMD_INDEX_ERR) +#define AU6601_INT_DATA_MASK (AU6601_INT_DATA_END | AU6601_INT_DMA_END | \ + AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY | \ + AU6601_INT_DATA_TIMEOUT_ERR | AU6601_INT_DATA_CRC_ERR | \ + AU6601_INT_DATA_END_BIT_ERR) +#define AU6601_INT_ALL_MASK ((u32)-1) + +/* MS_CARD mode registers */ + +#define AU6601_MS_STATUS 0xa0 + +#define AU6601_MS_BUS_MODE_CTRL 0xa1 +#define AU6601_MS_BUS_8BIT_MODE 0x03 +#define AU6601_MS_BUS_4BIT_MODE 0x01 +#define AU6601_MS_BUS_1BIT_MODE 0x00 + +#define AU6601_MS_TPC_CMD 0xa2 +#define AU6601_MS_TPC_READ_PAGE_DATA 0x02 +#define AU6601_MS_TPC_READ_REG 0x04 +#define AU6601_MS_TPC_GET_INT 0x07 +#define AU6601_MS_TPC_WRITE_PAGE_DATA 0x0D +#define AU6601_MS_TPC_WRITE_REG 0x0B +#define AU6601_MS_TPC_SET_RW_REG_ADRS 0x08 +#define AU6601_MS_TPC_SET_CMD 0x0E +#define AU6601_MS_TPC_EX_SET_CMD 0x09 +#define AU6601_MS_TPC_READ_SHORT_DATA 0x03 +#define AU6601_MS_TPC_WRITE_SHORT_DATA 0x0C + +#define AU6601_MS_TRANSFER_MODE 0xa3 +#define AU6601_MS_XFER_INT_TIMEOUT_CHK BIT(2) +#define AU6601_MS_XFER_DMA_ENABLE BIT(1) +#define AU6601_MS_XFER_START BIT(0) + +#define AU6601_MS_DATA_PIN_STATE 0xa4 + +#define AU6601_MS_INT_STATUS 0xb0 +#define AU6601_MS_INT_ENABLE 0xb4 +#define AU6601_MS_INT_OVER_CURRENT_ERROR BIT(23) +#define AU6601_MS_INT_DATA_CRC_ERROR BIT(21) +#define AU6601_MS_INT_INT_TIMEOUT BIT(20) +#define AU6601_MS_INT_INT_RESP_ERROR BIT(19) +#define AU6601_MS_INT_CED_ERROR BIT(18) +#define AU6601_MS_INT_TPC_TIMEOUT BIT(16) +#define AU6601_MS_INT_ERROR BIT(15) +#define AU6601_MS_INT_CARD_INSERT BIT(7) +#define AU6601_MS_INT_CARD_REMOVE BIT(6) +#define AU6601_MS_INT_BUF_READ_RDY BIT(5) +#define AU6601_MS_INT_BUF_WRITE_RDY BIT(4) +#define AU6601_MS_INT_DMA_END BIT(3) +#define AU6601_MS_INT_TPC_END BIT(1) + +#define AU6601_MS_INT_DATA_MASK 0x00000038 +#define AU6601_MS_INT_TPC_MASK 0x003d8002 +#define AU6601_MS_INT_TPC_ERROR 0x003d0000 + +#define ALCOR_PCIE_LINK_CTRL_OFFSET 0x10 +#define ALCOR_PCIE_LINK_CAP_OFFSET 0x0c +#define ALCOR_CAP_START_OFFSET 0x34 + +struct alcor_dev_cfg { + u8 dma; +}; + +struct alcor_pci_priv { + struct pci_dev *pdev; + struct pci_dev *parent_pdev; + struct device *dev; + void __iomem *iobase; + unsigned int irq; + + unsigned long id; /* idr id */ + + struct alcor_dev_cfg *cfg; + + /* PCI ASPM related vars */ + int pdev_cap_off; + u8 pdev_aspm_cap; + int parent_cap_off; + u8 parent_aspm_cap; + u8 ext_config_dev_aspm; +}; + +void alcor_write8(struct alcor_pci_priv *priv, u8 val, unsigned int addr); +void alcor_write16(struct alcor_pci_priv *priv, u16 val, unsigned int addr); +void alcor_write32(struct alcor_pci_priv *priv, u32 val, unsigned int addr); +void alcor_write32be(struct alcor_pci_priv *priv, u32 val, unsigned int addr); +u8 alcor_read8(struct alcor_pci_priv *priv, unsigned int addr); +u32 alcor_read32(struct alcor_pci_priv *priv, unsigned int addr); +u32 alcor_read32be(struct alcor_pci_priv *priv, unsigned int addr); +#endif diff --git a/include/linux/audit.h b/include/linux/audit.h index 9334fbef7bae..a625c29a2ea2 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -115,8 +115,6 @@ extern int audit_classify_compat_syscall(int abi, unsigned syscall); struct filename; -extern void audit_log_session_info(struct audit_buffer *ab); - #define AUDIT_OFF 0 #define AUDIT_ON 1 #define AUDIT_LOCKED 2 @@ -153,8 +151,7 @@ extern void audit_log_link_denied(const char *operation); extern void audit_log_lost(const char *message); extern int audit_log_task_context(struct audit_buffer *ab); -extern void audit_log_task_info(struct audit_buffer *ab, - struct task_struct *tsk); +extern void audit_log_task_info(struct audit_buffer *ab); extern int audit_update_lsm_rules(void); @@ -202,8 +199,7 @@ static inline int audit_log_task_context(struct audit_buffer *ab) { return 0; } -static inline void audit_log_task_info(struct audit_buffer *ab, - struct task_struct *tsk) +static inline void audit_log_task_info(struct audit_buffer *ab) { } #define audit_enabled AUDIT_OFF #endif /* CONFIG_AUDIT */ diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index b2488055fd1d..7605b5919c3a 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -171,7 +171,7 @@ struct virtchnl_msg { VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); -/* Message descriptions and data structures.*/ +/* Message descriptions and data structures. */ /* VIRTCHNL_OP_VERSION * VF posts its version number to the PF. PF responds with its version number @@ -342,6 +342,8 @@ struct virtchnl_vsi_queue_config_info { struct virtchnl_queue_pair_info qpair[1]; }; +VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); + /* VIRTCHNL_OP_REQUEST_QUEUES * VF sends this message to request the PF to allocate additional queues to * this VF. Each VF gets a guaranteed number of queues on init but asking for @@ -357,8 +359,6 @@ struct virtchnl_vf_res_request { u16 num_queue_pairs; }; -VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); - /* VIRTCHNL_OP_CONFIG_IRQ_MAP * VF uses this message to map vectors to queues. * The rxq_map and txq_map fields are bitmaps used to indicate which queues @@ -819,8 +819,8 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, if (msglen >= valid_len) { struct virtchnl_tc_info *vti = (struct virtchnl_tc_info *)msg; - valid_len += vti->num_tc * - sizeof(struct virtchnl_channel_info); + valid_len += (vti->num_tc - 1) * + sizeof(struct virtchnl_channel_info); if (vti->num_tc == 0) err_msg_format = true; } diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 9a6bc0951cfa..c31157135598 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -258,6 +258,14 @@ static inline void wb_get(struct bdi_writeback *wb) */ static inline void wb_put(struct bdi_writeback *wb) { + if (WARN_ON_ONCE(!wb->bdi)) { + /* + * A driver bug might cause a file to be removed before bdi was + * initialized. + */ + return; + } + if (wb != &wb->bdi->wb) percpu_ref_put(&wb->refcnt); } diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index e9f5fe69df31..688ab0de7810 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -25,6 +25,7 @@ struct linux_binprm { #endif struct mm_struct *mm; unsigned long p; /* current top of mem */ + unsigned long argmin; /* rlimit marker for copy_strings() */ unsigned int /* * True after the bprm_set_creds hook has been called once @@ -138,7 +139,6 @@ extern int transfer_args_to_stack(struct linux_binprm *bprm, extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm); extern int copy_strings_kernel(int argc, const char *const *argv, struct linux_binprm *bprm); -extern int prepare_bprm_creds(struct linux_binprm *bprm); extern void install_exec_creds(struct linux_binprm *bprm); extern void set_binfmt(struct linux_binfmt *new); extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); diff --git a/include/linux/bio.h b/include/linux/bio.h index 056fb627edb3..7380b094dcca 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -491,35 +491,40 @@ do { \ bio_clear_flag(bio, BIO_THROTTLED);\ (bio)->bi_disk = (bdev)->bd_disk; \ (bio)->bi_partno = (bdev)->bd_partno; \ + bio_associate_blkg(bio); \ } while (0) #define bio_copy_dev(dst, src) \ do { \ (dst)->bi_disk = (src)->bi_disk; \ (dst)->bi_partno = (src)->bi_partno; \ + bio_clone_blkg_association(dst, src); \ } while (0) #define bio_dev(bio) \ disk_devt((bio)->bi_disk) #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) -int bio_associate_blkcg_from_page(struct bio *bio, struct page *page); +void bio_associate_blkg_from_page(struct bio *bio, struct page *page); #else -static inline int bio_associate_blkcg_from_page(struct bio *bio, - struct page *page) { return 0; } +static inline void bio_associate_blkg_from_page(struct bio *bio, + struct page *page) { } #endif #ifdef CONFIG_BLK_CGROUP -int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); -int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg); -void bio_disassociate_task(struct bio *bio); -void bio_clone_blkcg_association(struct bio *dst, struct bio *src); +void bio_disassociate_blkg(struct bio *bio); +void bio_associate_blkg(struct bio *bio); +void bio_associate_blkg_from_css(struct bio *bio, + struct cgroup_subsys_state *css); +void bio_clone_blkg_association(struct bio *dst, struct bio *src); #else /* CONFIG_BLK_CGROUP */ -static inline int bio_associate_blkcg(struct bio *bio, - struct cgroup_subsys_state *blkcg_css) { return 0; } -static inline void bio_disassociate_task(struct bio *bio) { } -static inline void bio_clone_blkcg_association(struct bio *dst, - struct bio *src) { } +static inline void bio_disassociate_blkg(struct bio *bio) { } +static inline void bio_associate_blkg(struct bio *bio) { } +static inline void bio_associate_blkg_from_css(struct bio *bio, + struct cgroup_subsys_state *css) +{ } +static inline void bio_clone_blkg_association(struct bio *dst, + struct bio *src) { } #endif /* CONFIG_BLK_CGROUP */ #ifdef CONFIG_HIGHMEM diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 6d766a19f2bb..76c61318fda5 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -21,6 +21,7 @@ #include <linux/blkdev.h> #include <linux/atomic.h> #include <linux/kthread.h> +#include <linux/fs.h> /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */ #define BLKG_STAT_CPU_BATCH (INT_MAX / 2) @@ -122,11 +123,8 @@ struct blkcg_gq { /* all non-root blkcg_gq's are guaranteed to have access to parent */ struct blkcg_gq *parent; - /* request allocation list for this blkcg-q pair */ - struct request_list rl; - /* reference count */ - atomic_t refcnt; + struct percpu_ref refcnt; /* is this blkg online? protected by both blkcg and q locks */ bool online; @@ -184,6 +182,8 @@ extern struct cgroup_subsys_state * const blkcg_root_css; struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint); +struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, + struct request_queue *q); struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct request_queue *q); int blkcg_init_queue(struct request_queue *q); @@ -230,22 +230,62 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, char *input, struct blkg_conf_ctx *ctx); void blkg_conf_finish(struct blkg_conf_ctx *ctx); +/** + * blkcg_css - find the current css + * + * Find the css associated with either the kthread or the current task. + * This may return a dying css, so it is up to the caller to use tryget logic + * to confirm it is alive and well. + */ +static inline struct cgroup_subsys_state *blkcg_css(void) +{ + struct cgroup_subsys_state *css; + + css = kthread_blkcg(); + if (css) + return css; + return task_css(current, io_cgrp_id); +} static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) { return css ? container_of(css, struct blkcg, css) : NULL; } -static inline struct blkcg *bio_blkcg(struct bio *bio) +/** + * __bio_blkcg - internal, inconsistent version to get blkcg + * + * DO NOT USE. + * This function is inconsistent and consequently is dangerous to use. The + * first part of the function returns a blkcg where a reference is owned by the + * bio. This means it does not need to be rcu protected as it cannot go away + * with the bio owning a reference to it. However, the latter potentially gets + * it from task_css(). This can race against task migration and the cgroup + * dying. It is also semantically different as it must be called rcu protected + * and is susceptible to failure when trying to get a reference to it. + * Therefore, it is not ok to assume that *_get() will always succeed on the + * blkcg returned here. + */ +static inline struct blkcg *__bio_blkcg(struct bio *bio) { - struct cgroup_subsys_state *css; + if (bio && bio->bi_blkg) + return bio->bi_blkg->blkcg; + return css_to_blkcg(blkcg_css()); +} - if (bio && bio->bi_css) - return css_to_blkcg(bio->bi_css); - css = kthread_blkcg(); - if (css) - return css_to_blkcg(css); - return css_to_blkcg(task_css(current, io_cgrp_id)); +/** + * bio_blkcg - grab the blkcg associated with a bio + * @bio: target bio + * + * This returns the blkcg associated with a bio, %NULL if not associated. + * Callers are expected to either handle %NULL or know association has been + * done prior to calling this. + */ +static inline struct blkcg *bio_blkcg(struct bio *bio) +{ + if (bio && bio->bi_blkg) + return bio->bi_blkg->blkcg; + return NULL; } static inline bool blk_cgroup_congested(void) @@ -328,16 +368,12 @@ static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, * @q: request_queue of interest * * Lookup blkg for the @blkcg - @q pair. This function should be called - * under RCU read lock and is guaranteed to return %NULL if @q is bypassing - * - see blk_queue_bypass_start() for details. + * under RCU read loc. */ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) { WARN_ON_ONCE(!rcu_read_lock_held()); - - if (unlikely(blk_queue_bypass(q))) - return NULL; return __blkg_lookup(blkcg, q, false); } @@ -451,26 +487,46 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) */ static inline void blkg_get(struct blkcg_gq *blkg) { - WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); - atomic_inc(&blkg->refcnt); + percpu_ref_get(&blkg->refcnt); } /** - * blkg_try_get - try and get a blkg reference + * blkg_tryget - try and get a blkg reference * @blkg: blkg to get * * This is for use when doing an RCU lookup of the blkg. We may be in the midst * of freeing this blkg, so we can only use it if the refcnt is not zero. */ -static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg) +static inline bool blkg_tryget(struct blkcg_gq *blkg) { - if (atomic_inc_not_zero(&blkg->refcnt)) - return blkg; - return NULL; + return blkg && percpu_ref_tryget(&blkg->refcnt); } +/** + * blkg_tryget_closest - try and get a blkg ref on the closet blkg + * @blkg: blkg to get + * + * This needs to be called rcu protected. As the failure mode here is to walk + * up the blkg tree, this ensure that the blkg->parent pointers are always + * valid. This returns the blkg that it ended up taking a reference on or %NULL + * if no reference was taken. + */ +static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) +{ + struct blkcg_gq *ret_blkg = NULL; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + while (blkg) { + if (blkg_tryget(blkg)) { + ret_blkg = blkg; + break; + } + blkg = blkg->parent; + } -void __blkg_release_rcu(struct rcu_head *rcu); + return ret_blkg; +} /** * blkg_put - put a blkg reference @@ -478,9 +534,7 @@ void __blkg_release_rcu(struct rcu_head *rcu); */ static inline void blkg_put(struct blkcg_gq *blkg) { - WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); - if (atomic_dec_and_test(&blkg->refcnt)) - call_rcu(&blkg->rcu_head, __blkg_release_rcu); + percpu_ref_put(&blkg->refcnt); } /** @@ -515,94 +569,6 @@ static inline void blkg_put(struct blkcg_gq *blkg) if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ (p_blkg)->q, false))) -/** - * blk_get_rl - get request_list to use - * @q: request_queue of interest - * @bio: bio which will be attached to the allocated request (may be %NULL) - * - * The caller wants to allocate a request from @q to use for @bio. Find - * the request_list to use and obtain a reference on it. Should be called - * under queue_lock. This function is guaranteed to return non-%NULL - * request_list. - */ -static inline struct request_list *blk_get_rl(struct request_queue *q, - struct bio *bio) -{ - struct blkcg *blkcg; - struct blkcg_gq *blkg; - - rcu_read_lock(); - - blkcg = bio_blkcg(bio); - - /* bypass blkg lookup and use @q->root_rl directly for root */ - if (blkcg == &blkcg_root) - goto root_rl; - - /* - * Try to use blkg->rl. blkg lookup may fail under memory pressure - * or if either the blkcg or queue is going away. Fall back to - * root_rl in such cases. - */ - blkg = blkg_lookup(blkcg, q); - if (unlikely(!blkg)) - goto root_rl; - - blkg_get(blkg); - rcu_read_unlock(); - return &blkg->rl; -root_rl: - rcu_read_unlock(); - return &q->root_rl; -} - -/** - * blk_put_rl - put request_list - * @rl: request_list to put - * - * Put the reference acquired by blk_get_rl(). Should be called under - * queue_lock. - */ -static inline void blk_put_rl(struct request_list *rl) -{ - if (rl->blkg->blkcg != &blkcg_root) - blkg_put(rl->blkg); -} - -/** - * blk_rq_set_rl - associate a request with a request_list - * @rq: request of interest - * @rl: target request_list - * - * Associate @rq with @rl so that accounting and freeing can know the - * request_list @rq came from. - */ -static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) -{ - rq->rl = rl; -} - -/** - * blk_rq_rl - return the request_list a request came from - * @rq: request of interest - * - * Return the request_list @rq is allocated from. - */ -static inline struct request_list *blk_rq_rl(struct request *rq) -{ - return rq->rl; -} - -struct request_list *__blk_queue_next_rl(struct request_list *rl, - struct request_queue *q); -/** - * blk_queue_for_each_rl - iterate through all request_lists of a request_queue - * - * Should be used under queue_lock. - */ -#define blk_queue_for_each_rl(rl, q) \ - for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) - static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp) { int ret; @@ -797,32 +763,34 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg struct bio *bio) { return false; } #endif + +static inline void blkcg_bio_issue_init(struct bio *bio) +{ + bio_issue_init(&bio->bi_issue, bio_sectors(bio)); +} + static inline bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio) { - struct blkcg *blkcg; struct blkcg_gq *blkg; bool throtl = false; rcu_read_lock(); - blkcg = bio_blkcg(bio); - - /* associate blkcg if bio hasn't attached one */ - bio_associate_blkcg(bio, &blkcg->css); - - blkg = blkg_lookup(blkcg, q); - if (unlikely(!blkg)) { - spin_lock_irq(q->queue_lock); - blkg = blkg_lookup_create(blkcg, q); - if (IS_ERR(blkg)) - blkg = NULL; - spin_unlock_irq(q->queue_lock); + + if (!bio->bi_blkg) { + char b[BDEVNAME_SIZE]; + + WARN_ONCE(1, + "no blkg associated for bio on block-device: %s\n", + bio_devname(bio, b)); + bio_associate_blkg(bio); } + blkg = bio->bi_blkg; + throtl = blk_throtl_bio(q, blkg, bio); if (!throtl) { - blkg = blkg ?: q->root_blkg; /* * If the bio is flagged with BIO_QUEUE_ENTERED it means this * is a split bio and we would have already accounted for the @@ -834,6 +802,8 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); } + blkcg_bio_issue_init(bio); + rcu_read_unlock(); return !throtl; } @@ -930,6 +900,7 @@ static inline int blkcg_activate_policy(struct request_queue *q, static inline void blkcg_deactivate_policy(struct request_queue *q, const struct blkcg_policy *pol) { } +static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, @@ -939,12 +910,7 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } static inline void blkg_get(struct blkcg_gq *blkg) { } static inline void blkg_put(struct blkcg_gq *blkg) { } -static inline struct request_list *blk_get_rl(struct request_queue *q, - struct bio *bio) { return &q->root_rl; } -static inline void blk_put_rl(struct request_list *rl) { } -static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } -static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } - +static inline void blkcg_bio_issue_init(struct bio *bio) { } static inline bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio) { return true; } diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h index 9f4c17f0d2d8..0b1f45c62623 100644 --- a/include/linux/blk-mq-pci.h +++ b/include/linux/blk-mq-pci.h @@ -2,10 +2,10 @@ #ifndef _LINUX_BLK_MQ_PCI_H #define _LINUX_BLK_MQ_PCI_H -struct blk_mq_tag_set; +struct blk_mq_queue_map; struct pci_dev; -int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev, +int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev, int offset); #endif /* _LINUX_BLK_MQ_PCI_H */ diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h index b4ade198007d..7b6ecf9ac4c3 100644 --- a/include/linux/blk-mq-rdma.h +++ b/include/linux/blk-mq-rdma.h @@ -4,7 +4,7 @@ struct blk_mq_tag_set; struct ib_device; -int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set, +int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map, struct ib_device *dev, int first_vec); #endif /* _LINUX_BLK_MQ_RDMA_H */ diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h index 69b4da262c45..687ae287e1dc 100644 --- a/include/linux/blk-mq-virtio.h +++ b/include/linux/blk-mq-virtio.h @@ -2,10 +2,10 @@ #ifndef _LINUX_BLK_MQ_VIRTIO_H #define _LINUX_BLK_MQ_VIRTIO_H -struct blk_mq_tag_set; +struct blk_mq_queue_map; struct virtio_device; -int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set, +int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap, struct virtio_device *vdev, int first_vec); #endif /* _LINUX_BLK_MQ_VIRTIO_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 2286dc12c6bc..0e030f5f76b6 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -37,7 +37,8 @@ struct blk_mq_hw_ctx { struct blk_mq_ctx *dispatch_from; unsigned int dispatch_busy; - unsigned int nr_ctx; + unsigned short type; + unsigned short nr_ctx; struct blk_mq_ctx **ctxs; spinlock_t dispatch_wait_lock; @@ -74,10 +75,31 @@ struct blk_mq_hw_ctx { struct srcu_struct srcu[0]; }; +struct blk_mq_queue_map { + unsigned int *mq_map; + unsigned int nr_queues; + unsigned int queue_offset; +}; + +enum hctx_type { + HCTX_TYPE_DEFAULT, /* all I/O not otherwise accounted for */ + HCTX_TYPE_READ, /* just for READ I/O */ + HCTX_TYPE_POLL, /* polled I/O of any kind */ + + HCTX_MAX_TYPES, +}; + struct blk_mq_tag_set { - unsigned int *mq_map; + /* + * map[] holds ctx -> hctx mappings, one map exists for each type + * that the driver wishes to support. There are no restrictions + * on maps being of the same size, and it's perfectly legal to + * share maps between types. + */ + struct blk_mq_queue_map map[HCTX_MAX_TYPES]; + unsigned int nr_maps; /* nr entries in map[] */ const struct blk_mq_ops *ops; - unsigned int nr_hw_queues; + unsigned int nr_hw_queues; /* nr hw queues across maps */ unsigned int queue_depth; /* max hw supported */ unsigned int reserved_tags; unsigned int cmd_size; /* per-request extra data */ @@ -99,6 +121,7 @@ struct blk_mq_queue_data { typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); +typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *); typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *); typedef void (put_budget_fn)(struct blk_mq_hw_ctx *); typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); @@ -109,11 +132,13 @@ typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *, typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, unsigned int); -typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, +typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, bool); -typedef void (busy_tag_iter_fn)(struct request *, void *, bool); -typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int); +typedef bool (busy_tag_iter_fn)(struct request *, void *, bool); +typedef int (poll_fn)(struct blk_mq_hw_ctx *); typedef int (map_queues_fn)(struct blk_mq_tag_set *set); +typedef bool (busy_fn)(struct request_queue *); +typedef void (complete_fn)(struct request *); struct blk_mq_ops { @@ -123,6 +148,15 @@ struct blk_mq_ops { queue_rq_fn *queue_rq; /* + * If a driver uses bd->last to judge when to submit requests to + * hardware, it must define this function. In case of errors that + * make us stop issuing further requests, this hook serves the + * purpose of kicking the hardware (which the last request otherwise + * would have done). + */ + commit_rqs_fn *commit_rqs; + + /* * Reserve budget before queue request, once .queue_rq is * run, it is driver's responsibility to release the * reserved budget. Also we have to handle failure case @@ -141,7 +175,7 @@ struct blk_mq_ops { */ poll_fn *poll; - softirq_done_fn *complete; + complete_fn *complete; /* * Called when the block layer side of a hardware queue has been @@ -165,6 +199,11 @@ struct blk_mq_ops { /* Called from inside blk_get_request() */ void (*initialize_rq_fn)(struct request *rq); + /* + * If set, returns whether or not this queue currently is busy + */ + busy_fn *busy; + map_queues_fn *map_queues; #ifdef CONFIG_BLK_DEBUG_FS @@ -218,6 +257,8 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); void blk_mq_free_request(struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); +bool blk_mq_queue_inflight(struct request_queue *q); + enum { /* return when out of requests */ BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), @@ -264,7 +305,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); -void blk_mq_complete_request(struct request *rq); +bool blk_mq_complete_request(struct request *rq); bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio); bool blk_mq_queue_stopped(struct request_queue *q); @@ -288,24 +329,12 @@ void blk_mq_freeze_queue_wait(struct request_queue *q); int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, unsigned long timeout); -int blk_mq_map_queues(struct blk_mq_tag_set *set); +int blk_mq_map_queues(struct blk_mq_queue_map *qmap); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); void blk_mq_quiesce_queue_nowait(struct request_queue *q); -/** - * blk_mq_mark_complete() - Set request state to complete - * @rq: request to set to complete state - * - * Returns true if request state was successfully set to complete. If - * successful, the caller is responsibile for seeing this request is ended, as - * blk_mq_complete_request will not work again. - */ -static inline bool blk_mq_mark_complete(struct request *rq) -{ - return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) == - MQ_RQ_IN_FLIGHT; -} +unsigned int blk_mq_rq_cpu(struct request *rq); /* * Driver command data is immediately after the request. So subtract request @@ -328,4 +357,14 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq) for ((i) = 0; (i) < (hctx)->nr_ctx && \ ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) +static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, + struct request *rq) +{ + if (rq->tag != -1) + return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT); + + return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) | + BLK_QC_T_INTERNAL; +} + #endif diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 1dcf652ba0aa..5c7e7f859a24 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -174,11 +174,11 @@ struct bio { void *bi_private; #ifdef CONFIG_BLK_CGROUP /* - * Optional ioc and css associated with this bio. Put on bio - * release. Read comment on top of bio_associate_current(). + * Represents the association of the css and request_queue for the bio. + * If a bio goes direct to device, it will not have a blkg as it will + * not have a request_queue associated with it. The reference is put + * on release of the bio. */ - struct io_context *bi_ioc; - struct cgroup_subsys_state *bi_css; struct blkcg_gq *bi_blkg; struct bio_issue bi_issue; #endif @@ -228,6 +228,7 @@ struct bio { #define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion * of this bio. */ #define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */ +#define BIO_TRACKED 12 /* set if bio goes through the rq_qos path */ /* See BVEC_POOL_OFFSET below before adding new flags */ @@ -323,6 +324,8 @@ enum req_flag_bits { /* command specific flags for REQ_OP_WRITE_ZEROES: */ __REQ_NOUNMAP, /* do not free blocks when zeroing */ + __REQ_HIPRI, + /* for driver use */ __REQ_DRV, __REQ_SWAP, /* swapping request. */ @@ -343,8 +346,8 @@ enum req_flag_bits { #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) - #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) +#define REQ_HIPRI (1ULL << __REQ_HIPRI) #define REQ_DRV (1ULL << __REQ_DRV) #define REQ_SWAP (1ULL << __REQ_SWAP) @@ -422,17 +425,6 @@ static inline bool blk_qc_t_valid(blk_qc_t cookie) return cookie != BLK_QC_T_NONE; } -static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num, - bool internal) -{ - blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT); - - if (internal) - ret |= BLK_QC_T_INTERNAL; - - return ret; -} - static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) { return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4293dc1cd160..338604dff7d0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -58,25 +58,6 @@ struct blk_stat_callback; typedef void (rq_end_io_fn)(struct request *, blk_status_t); -#define BLK_RL_SYNCFULL (1U << 0) -#define BLK_RL_ASYNCFULL (1U << 1) - -struct request_list { - struct request_queue *q; /* the queue this rl belongs to */ -#ifdef CONFIG_BLK_CGROUP - struct blkcg_gq *blkg; /* blkg this request pool belongs to */ -#endif - /* - * count[], starved[], and wait[] are indexed by - * BLK_RW_SYNC/BLK_RW_ASYNC - */ - int count[2]; - int starved[2]; - mempool_t *rq_pool; - wait_queue_head_t wait[2]; - unsigned int flags; -}; - /* * request flags */ typedef __u32 __bitwise req_flags_t; @@ -85,8 +66,6 @@ typedef __u32 __bitwise req_flags_t; #define RQF_SORTED ((__force req_flags_t)(1 << 0)) /* drive already may have started this one */ #define RQF_STARTED ((__force req_flags_t)(1 << 1)) -/* uses tagged queueing */ -#define RQF_QUEUED ((__force req_flags_t)(1 << 2)) /* may not be passed by ioscheduler */ #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) /* request for flush sequence */ @@ -150,8 +129,8 @@ enum mq_rq_state { struct request { struct request_queue *q; struct blk_mq_ctx *mq_ctx; + struct blk_mq_hw_ctx *mq_hctx; - int cpu; unsigned int cmd_flags; /* op and common flags */ req_flags_t rq_flags; @@ -245,11 +224,7 @@ struct request { refcount_t ref; unsigned int timeout; - - /* access through blk_rq_set_deadline, blk_rq_deadline */ - unsigned long __deadline; - - struct list_head timeout_list; + unsigned long deadline; union { struct __call_single_data csd; @@ -264,10 +239,6 @@ struct request { /* for bidi */ struct request *next_rq; - -#ifdef CONFIG_BLK_CGROUP - struct request_list *rl; /* rl this rq is alloced from */ -#endif }; static inline bool blk_op_is_scsi(unsigned int op) @@ -311,41 +282,21 @@ static inline unsigned short req_get_ioprio(struct request *req) struct blk_queue_ctx; -typedef void (request_fn_proc) (struct request_queue *q); typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); -typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t); -typedef int (prep_rq_fn) (struct request_queue *, struct request *); -typedef void (unprep_rq_fn) (struct request_queue *, struct request *); struct bio_vec; -typedef void (softirq_done_fn)(struct request *); typedef int (dma_drain_needed_fn)(struct request *); -typedef int (lld_busy_fn) (struct request_queue *q); -typedef int (bsg_job_fn) (struct bsg_job *); -typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t); -typedef void (exit_rq_fn)(struct request_queue *, struct request *); enum blk_eh_timer_return { BLK_EH_DONE, /* drivers has completed the command */ BLK_EH_RESET_TIMER, /* reset timer and try again */ }; -typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); - enum blk_queue_state { Queue_down, Queue_up, }; -struct blk_queue_tag { - struct request **tag_index; /* map of busy tags */ - unsigned long *tag_map; /* bit map of free/busy tags */ - int max_depth; /* what we will send to device */ - int real_max_depth; /* what the array can hold */ - atomic_t refcnt; /* map can be shared */ - int alloc_policy; /* tag allocation policy */ - int next_tag; /* next tag */ -}; #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ @@ -389,7 +340,6 @@ struct queue_limits { unsigned char misaligned; unsigned char discard_misaligned; - unsigned char cluster; unsigned char raid_partial_stripes_expensive; enum blk_zoned_model zoned; }; @@ -444,40 +394,15 @@ struct request_queue { struct list_head queue_head; struct request *last_merge; struct elevator_queue *elevator; - int nr_rqs[2]; /* # allocated [a]sync rqs */ - int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ struct blk_queue_stats *stats; struct rq_qos *rq_qos; - /* - * If blkcg is not used, @q->root_rl serves all requests. If blkcg - * is used, root blkg allocates from @q->root_rl and all other - * blkgs from their own blkg->rl. Which one to use should be - * determined using bio_request_list(). - */ - struct request_list root_rl; - - request_fn_proc *request_fn; make_request_fn *make_request_fn; - poll_q_fn *poll_fn; - prep_rq_fn *prep_rq_fn; - unprep_rq_fn *unprep_rq_fn; - softirq_done_fn *softirq_done_fn; - rq_timed_out_fn *rq_timed_out_fn; dma_drain_needed_fn *dma_drain_needed; - lld_busy_fn *lld_busy_fn; - /* Called just after a request is allocated */ - init_rq_fn *init_rq_fn; - /* Called just before a request is freed */ - exit_rq_fn *exit_rq_fn; - /* Called from inside blk_get_request() */ - void (*initialize_rq_fn)(struct request *rq); const struct blk_mq_ops *mq_ops; - unsigned int *mq_map; - /* sw queues */ struct blk_mq_ctx __percpu *queue_ctx; unsigned int nr_queues; @@ -488,17 +413,6 @@ struct request_queue { struct blk_mq_hw_ctx **queue_hw_ctx; unsigned int nr_hw_queues; - /* - * Dispatch queue sorting - */ - sector_t end_sector; - struct request *boundary_rq; - - /* - * Delayed queue handling - */ - struct delayed_work delay_work; - struct backing_dev_info *backing_dev_info; /* @@ -529,13 +443,7 @@ struct request_queue { */ gfp_t bounce_gfp; - /* - * protects queue structures from reentrancy. ->__queue_lock should - * _never_ be used directly, it is queue private. always use - * ->queue_lock. - */ - spinlock_t __queue_lock; - spinlock_t *queue_lock; + spinlock_t queue_lock; /* * queue kobject @@ -545,7 +453,7 @@ struct request_queue { /* * mq queue kobject */ - struct kobject mq_kobj; + struct kobject *mq_kobj; #ifdef CONFIG_BLK_DEV_INTEGRITY struct blk_integrity integrity; @@ -561,27 +469,12 @@ struct request_queue { * queue settings */ unsigned long nr_requests; /* Max # of requests */ - unsigned int nr_congestion_on; - unsigned int nr_congestion_off; - unsigned int nr_batching; unsigned int dma_drain_size; void *dma_drain_buffer; unsigned int dma_pad_mask; unsigned int dma_alignment; - struct blk_queue_tag *queue_tags; - - unsigned int nr_sorted; - unsigned int in_flight[2]; - - /* - * Number of active block driver functions for which blk_drain_queue() - * must wait. Must be incremented around functions that unlock the - * queue_lock internally, e.g. scsi_request_fn(). - */ - unsigned int request_fn_active; - unsigned int rq_timeout; int poll_nsec; @@ -590,7 +483,6 @@ struct request_queue { struct timer_list timeout; struct work_struct timeout_work; - struct list_head timeout_list; struct list_head icq_list; #ifdef CONFIG_BLK_CGROUP @@ -645,11 +537,9 @@ struct request_queue { struct mutex sysfs_lock; - int bypass_depth; atomic_t mq_freeze_depth; #if defined(CONFIG_BLK_DEV_BSG) - bsg_job_fn *bsg_job_fn; struct bsg_class_device bsg_dev; #endif @@ -669,12 +559,12 @@ struct request_queue { #ifdef CONFIG_BLK_DEBUG_FS struct dentry *debugfs_dir; struct dentry *sched_debugfs_dir; + struct dentry *rqos_debugfs_dir; #endif bool mq_sysfs_init_done; size_t cmd_size; - void *rq_alloc_data; struct work_struct release_work; @@ -682,10 +572,8 @@ struct request_queue { u64 write_hints[BLK_MAX_WRITE_HINTS]; }; -#define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */ #define QUEUE_FLAG_STOPPED 1 /* queue is stopped */ #define QUEUE_FLAG_DYING 2 /* queue being torn down */ -#define QUEUE_FLAG_BYPASS 3 /* act as dumb FIFO queue */ #define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */ #define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */ #define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */ @@ -718,19 +606,15 @@ struct request_queue { (1 << QUEUE_FLAG_ADD_RANDOM)) #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ - (1 << QUEUE_FLAG_SAME_COMP) | \ - (1 << QUEUE_FLAG_POLL)) + (1 << QUEUE_FLAG_SAME_COMP)) void blk_queue_flag_set(unsigned int flag, struct request_queue *q); void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); -bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); -#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) -#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_noxmerges(q) \ @@ -757,37 +641,20 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); extern void blk_set_pm_only(struct request_queue *q); extern void blk_clear_pm_only(struct request_queue *q); -static inline int queue_in_flight(struct request_queue *q) -{ - return q->in_flight[0] + q->in_flight[1]; -} - static inline bool blk_account_rq(struct request *rq) { return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); } -#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) -/* rq->queuelist of dequeued request must be list_empty() */ -#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) -/* - * Driver can handle struct request, if it either has an old style - * request_fn defined, or is blk-mq based. - */ -static inline bool queue_is_rq_based(struct request_queue *q) -{ - return q->request_fn || q->mq_ops; -} - -static inline unsigned int blk_queue_cluster(struct request_queue *q) +static inline bool queue_is_mq(struct request_queue *q) { - return q->limits.cluster; + return q->mq_ops; } static inline enum blk_zoned_model @@ -845,27 +712,6 @@ static inline bool rq_is_sync(struct request *rq) return op_is_sync(rq->cmd_flags); } -static inline bool blk_rl_full(struct request_list *rl, bool sync) -{ - unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; - - return rl->flags & flag; -} - -static inline void blk_set_rl_full(struct request_list *rl, bool sync) -{ - unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; - - rl->flags |= flag; -} - -static inline void blk_clear_rl_full(struct request_list *rl, bool sync) -{ - unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; - - rl->flags &= ~flag; -} - static inline bool rq_mergeable(struct request *rq) { if (blk_rq_is_passthrough(rq)) @@ -902,16 +748,6 @@ static inline unsigned int blk_queue_depth(struct request_queue *q) return q->nr_requests; } -/* - * q->prep_rq_fn return values - */ -enum { - BLKPREP_OK, /* serve it */ - BLKPREP_KILL, /* fatal error, kill, return -EIO */ - BLKPREP_DEFER, /* leave on queue */ - BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */ -}; - extern unsigned long blk_max_low_pfn, blk_max_pfn; /* @@ -983,10 +819,8 @@ extern blk_qc_t direct_make_request(struct bio *bio); extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_init_request_from_bio(struct request *req, struct bio *bio); extern void blk_put_request(struct request *); -extern void __blk_put_request(struct request_queue *, struct request *); extern struct request *blk_get_request(struct request_queue *, unsigned int op, blk_mq_req_flags_t flags); -extern void blk_requeue_request(struct request_queue *, struct request *); extern int blk_lld_busy(struct request_queue *q); extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, @@ -996,7 +830,6 @@ extern void blk_rq_unprep_clone(struct request *rq); extern blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq); extern int blk_rq_append_bio(struct request *rq, struct bio **bio); -extern void blk_delay_queue(struct request_queue *, unsigned long); extern void blk_queue_split(struct request_queue *, struct bio **); extern void blk_recount_segments(struct request_queue *, struct bio *); extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); @@ -1009,15 +842,7 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); extern void blk_queue_exit(struct request_queue *q); -extern void blk_start_queue(struct request_queue *q); -extern void blk_start_queue_async(struct request_queue *q); -extern void blk_stop_queue(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); -extern void __blk_stop_queue(struct request_queue *q); -extern void __blk_run_queue(struct request_queue *q); -extern void __blk_run_queue_uncond(struct request_queue *q); -extern void blk_run_queue(struct request_queue *); -extern void blk_run_queue_async(struct request_queue *q); extern int blk_rq_map_user(struct request_queue *, struct request *, struct rq_map_data *, void __user *, unsigned long, gfp_t); @@ -1034,7 +859,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, int blk_status_to_errno(blk_status_t status); blk_status_t errno_to_blk_status(int errno); -bool blk_poll(struct request_queue *q, blk_qc_t cookie); +int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { @@ -1172,13 +997,6 @@ static inline unsigned int blk_rq_count_bios(struct request *rq) return nr_bios; } -/* - * Request issue related functions. - */ -extern struct request *blk_peek_request(struct request_queue *q); -extern void blk_start_request(struct request *rq); -extern struct request *blk_fetch_request(struct request_queue *q); - void blk_steal_bios(struct bio_list *list, struct request *rq); /* @@ -1196,27 +1014,18 @@ void blk_steal_bios(struct bio_list *list, struct request *rq); */ extern bool blk_update_request(struct request *rq, blk_status_t error, unsigned int nr_bytes); -extern void blk_finish_request(struct request *rq, blk_status_t error); -extern bool blk_end_request(struct request *rq, blk_status_t error, - unsigned int nr_bytes); extern void blk_end_request_all(struct request *rq, blk_status_t error); extern bool __blk_end_request(struct request *rq, blk_status_t error, unsigned int nr_bytes); extern void __blk_end_request_all(struct request *rq, blk_status_t error); extern bool __blk_end_request_cur(struct request *rq, blk_status_t error); -extern void blk_complete_request(struct request *); extern void __blk_complete_request(struct request *); extern void blk_abort_request(struct request *); -extern void blk_unprep_request(struct request *); /* * Access functions for manipulating queue properties */ -extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, - spinlock_t *lock, int node_id); -extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); -extern int blk_init_allocated_queue(struct request_queue *); extern void blk_cleanup_queue(struct request_queue *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_bounce_limit(struct request_queue *, u64); @@ -1255,15 +1064,10 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); extern int blk_queue_dma_drain(struct request_queue *q, dma_drain_needed_fn *dma_drain_needed, void *buf, unsigned int size); -extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); -extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); -extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); extern void blk_queue_dma_alignment(struct request_queue *, int); extern void blk_queue_update_dma_alignment(struct request_queue *, int); -extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); -extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); @@ -1299,8 +1103,7 @@ extern long nr_blockdev_pages(void); bool __must_check blk_get_queue(struct request_queue *); struct request_queue *blk_alloc_queue(gfp_t); -struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, - spinlock_t *lock); +struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id); extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); @@ -1317,9 +1120,10 @@ extern void blk_set_queue_dying(struct request_queue *); * schedule() where blk_schedule_flush_plug() is called. */ struct blk_plug { - struct list_head list; /* requests */ struct list_head mq_list; /* blk-mq requests */ struct list_head cb_list; /* md requires an unplug callback */ + unsigned short rq_count; + bool multiple_queues; }; #define BLK_MAX_REQUEST_COUNT 16 #define BLK_PLUG_FLUSH_SIZE (128 * 1024) @@ -1358,31 +1162,10 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) struct blk_plug *plug = tsk->plug; return plug && - (!list_empty(&plug->list) || - !list_empty(&plug->mq_list) || + (!list_empty(&plug->mq_list) || !list_empty(&plug->cb_list)); } -/* - * tag stuff - */ -extern int blk_queue_start_tag(struct request_queue *, struct request *); -extern struct request *blk_queue_find_tag(struct request_queue *, int); -extern void blk_queue_end_tag(struct request_queue *, struct request *); -extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int); -extern void blk_queue_free_tags(struct request_queue *); -extern int blk_queue_resize_tags(struct request_queue *, int); -extern struct blk_queue_tag *blk_init_tags(int, int); -extern void blk_free_tags(struct blk_queue_tag *); - -static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, - int tag) -{ - if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) - return NULL; - return bqt->tag_index[tag]; -} - extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); @@ -1982,4 +1765,17 @@ static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, #endif /* CONFIG_BLOCK */ +static inline void blk_wake_io_task(struct task_struct *waiter) +{ + /* + * If we're polling, the task itself is doing the completions. For + * that case, we don't need to signal a wakeup, it's enough to just + * mark us as RUNNING. + */ + if (waiter == current) + __set_current_state(TASK_RUNNING); + else + wake_up_process(waiter); +} + #endif diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 33014ae73103..e734f163bd0b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -23,6 +23,7 @@ struct bpf_prog; struct bpf_map; struct sock; struct seq_file; +struct btf; struct btf_type; /* map is generic key/value storage optionally accesible by eBPF programs */ @@ -52,6 +53,7 @@ struct bpf_map_ops { void (*map_seq_show_elem)(struct bpf_map *map, void *key, struct seq_file *m); int (*map_check_btf)(const struct bpf_map *map, + const struct btf *btf, const struct btf_type *key_type, const struct btf_type *value_type); }; @@ -126,6 +128,7 @@ static inline bool bpf_map_support_seq_show(const struct bpf_map *map) } int map_check_no_btf(const struct bpf_map *map, + const struct btf *btf, const struct btf_type *key_type, const struct btf_type *value_type); @@ -268,15 +271,18 @@ struct bpf_prog_offload_ops { int (*insn_hook)(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); int (*finalize)(struct bpf_verifier_env *env); + int (*prepare)(struct bpf_prog *prog); + int (*translate)(struct bpf_prog *prog); + void (*destroy)(struct bpf_prog *prog); }; struct bpf_prog_offload { struct bpf_prog *prog; struct net_device *netdev; + struct bpf_offload_dev *offdev; void *dev_priv; struct list_head offloads; bool dev_state; - const struct bpf_prog_offload_ops *dev_ops; void *jited_image; u32 jited_len; }; @@ -293,9 +299,11 @@ struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; u32 max_ctx_offset; + u32 max_pkt_offset; u32 stack_depth; u32 id; - u32 func_cnt; + u32 func_cnt; /* used by non-func prog as the number of func progs */ + u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ bool offload_requested; struct bpf_prog **func; void *jit_data; /* JIT specific data. arch dependent */ @@ -312,6 +320,30 @@ struct bpf_prog_aux { void *security; #endif struct bpf_prog_offload *offload; + struct btf *btf; + struct bpf_func_info *func_info; + /* bpf_line_info loaded from userspace. linfo->insn_off + * has the xlated insn offset. + * Both the main and sub prog share the same linfo. + * The subprog can access its first linfo by + * using the linfo_idx. + */ + struct bpf_line_info *linfo; + /* jited_linfo is the jited addr of the linfo. It has a + * one to one mapping to linfo: + * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. + * Both the main and sub prog share the same jited_linfo. + * The subprog can access its first jited_linfo by + * using the linfo_idx. + */ + void **jited_linfo; + u32 func_info_cnt; + u32 nr_linfo; + /* subprog can use linfo_idx to access its first linfo and + * jited_linfo. + * main prog always has linfo_idx == 0 + */ + u32 linfo_idx; union { struct work_struct work; struct rcu_head rcu; @@ -523,7 +555,8 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) } /* verify correctness of eBPF program */ -int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); +int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, + union bpf_attr __user *uattr); void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); /* Map specifics */ @@ -691,7 +724,8 @@ int bpf_map_offload_get_next_key(struct bpf_map *map, bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); -struct bpf_offload_dev *bpf_offload_dev_create(void); +struct bpf_offload_dev * +bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops); void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, struct net_device *netdev); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index d93e89761a8b..27b74947cd2b 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -38,6 +38,7 @@ enum bpf_reg_liveness { REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */ REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */ + REG_LIVE_DONE = 4, /* liveness won't be updating this register anymore */ }; struct bpf_reg_state { @@ -147,6 +148,7 @@ struct bpf_verifier_state { /* call stack tracking */ struct bpf_func_state *frame[MAX_CALL_FRAMES]; u32 curframe; + bool speculative; }; #define bpf_get_spilled_reg(slot, frame) \ @@ -166,15 +168,24 @@ struct bpf_verifier_state_list { struct bpf_verifier_state_list *next; }; +/* Possible states for alu_state member. */ +#define BPF_ALU_SANITIZE_SRC 1U +#define BPF_ALU_SANITIZE_DST 2U +#define BPF_ALU_NEG_VALUE (1U << 2) +#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ + BPF_ALU_SANITIZE_DST) + struct bpf_insn_aux_data { union { enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ unsigned long map_state; /* pointer/poison value for maps */ s32 call_imm; /* saved imm field of call insn */ + u32 alu_limit; /* limit for add/sub register with pointer */ }; int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ int sanitize_stack_off; /* stack slot to be cleared */ bool seen; /* this insn was processed by the verifier */ + u8 alu_state; /* used in combination with alu_limit */ }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ @@ -203,6 +214,7 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) struct bpf_subprog_info { u32 start; /* insn idx of function entry point */ + u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ u16 stack_depth; /* max. stack depth used by this function */ }; @@ -210,6 +222,8 @@ struct bpf_subprog_info { * one verifier_env per bpf_check() call */ struct bpf_verifier_env { + u32 insn_idx; + u32 prev_insn_idx; struct bpf_prog *prog; /* eBPF program being verified */ const struct bpf_verifier_ops *ops; struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ @@ -223,6 +237,7 @@ struct bpf_verifier_env { bool allow_ptr_leaks; bool seen_direct_write; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ + const struct bpf_line_info *prev_linfo; struct bpf_verifier_log log; struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; u32 subprog_cnt; @@ -245,7 +260,7 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) return cur_func(env)->regs; } -int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); +int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); int bpf_prog_offload_finalize(struct bpf_verifier_env *env); diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 949e9af8d9d6..9cd00a37b8d3 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -28,6 +28,7 @@ #define PHY_ID_BCM89610 0x03625cd0 #define PHY_ID_BCM7250 0xae025280 +#define PHY_ID_BCM7255 0xae025120 #define PHY_ID_BCM7260 0xae025190 #define PHY_ID_BCM7268 0xae025090 #define PHY_ID_BCM7271 0xae0253b0 diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index 6aeaf6472665..b356e0006731 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -31,6 +31,9 @@ struct device; struct scatterlist; struct request_queue; +typedef int (bsg_job_fn) (struct bsg_job *); +typedef enum blk_eh_timer_return (bsg_timeout_fn)(struct request *); + struct bsg_buffer { unsigned int payload_len; int sg_cnt; @@ -72,7 +75,8 @@ struct bsg_job { void bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len); struct request_queue *bsg_setup_queue(struct device *dev, const char *name, - bsg_job_fn *job_fn, int dd_job_size); + bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size); +void bsg_remove_queue(struct request_queue *q); void bsg_job_put(struct bsg_job *job); int __must_check bsg_job_get(struct bsg_job *job); diff --git a/include/linux/btf.h b/include/linux/btf.h index e076c4697049..12502e25e767 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -7,6 +7,7 @@ #include <linux/types.h> struct btf; +struct btf_member; struct btf_type; union bpf_attr; @@ -46,5 +47,24 @@ void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, struct seq_file *m); int btf_get_fd_by_id(u32 id); u32 btf_id(const struct btf *btf); +bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, + const struct btf_member *m, + u32 expected_offset, u32 expected_size); + +#ifdef CONFIG_BPF_SYSCALL +const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); +const char *btf_name_by_offset(const struct btf *btf, u32 offset); +#else +static inline const struct btf_type *btf_type_by_id(const struct btf *btf, + u32 type_id) +{ + return NULL; +} +static inline const char *btf_name_by_offset(const struct btf *btf, + u32 offset) +{ + return NULL; +} +#endif #endif diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h index 43d1fd50d433..faeec7433aab 100644 --- a/include/linux/build_bug.h +++ b/include/linux/build_bug.h @@ -5,21 +5,8 @@ #include <linux/compiler.h> #ifdef __CHECKER__ -#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) -#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) #define BUILD_BUG_ON_ZERO(e) (0) -#define BUILD_BUG_ON_INVALID(e) (0) -#define BUILD_BUG_ON_MSG(cond, msg) (0) -#define BUILD_BUG_ON(condition) (0) -#define BUILD_BUG() (0) #else /* __CHECKER__ */ - -/* Force a compilation error if a constant expression is not a power of 2 */ -#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \ - BUILD_BUG_ON(((n) & ((n) - 1)) != 0) -#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ - BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) - /* * Force a compilation error if condition is true, but also produce a * result (of value 0 and type size_t), so the expression can be used @@ -27,6 +14,13 @@ * aren't permitted). */ #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); })) +#endif /* __CHECKER__ */ + +/* Force a compilation error if a constant expression is not a power of 2 */ +#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON(((n) & ((n) - 1)) != 0) +#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) /* * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the @@ -51,23 +45,9 @@ * If you have some code which relies on certain constants being equal, or * some other compile-time-evaluated condition, you should use BUILD_BUG_ON to * detect if someone changes it. - * - * The implementation uses gcc's reluctance to create a negative array, but gcc - * (as of 4.4) only emits that error for obvious cases (e.g. not arguments to - * inline functions). Luckily, in 4.3 they added the "error" function - * attribute just for this type of case. Thus, we use a negative sized array - * (should always create an error on gcc versions older than 4.4) and then call - * an undefined function with the error attribute (should always create an - * error on gcc 4.3 and later). If for some reason, neither creates a - * compile-time error, we'll still have a link-time error, which is harder to - * track down. */ -#ifndef __OPTIMIZE__ -#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) -#else #define BUILD_BUG_ON(condition) \ BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) -#endif /** * BUILD_BUG - break compile if used. @@ -78,6 +58,4 @@ */ #define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") -#endif /* __CHECKER__ */ - #endif /* _LINUX_BUILD_BUG_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index a83e1f632eb7..f01623aef2f7 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -169,6 +169,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, unsigned int idx); +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr); unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); void can_free_echo_skb(struct net_device *dev, unsigned int idx); diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h index cb31683bbe15..8268811a697e 100644 --- a/include/linux/can/rx-offload.h +++ b/include/linux/can/rx-offload.h @@ -41,7 +41,12 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload * int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight); int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg); int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); -int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb); +int can_rx_offload_queue_sorted(struct can_rx_offload *offload, + struct sk_buff *skb, u32 timestamp); +unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, + unsigned int idx, u32 timestamp); +int can_rx_offload_queue_tail(struct can_rx_offload *offload, + struct sk_buff *skb); void can_rx_offload_reset(struct can_rx_offload *offload); void can_rx_offload_del(struct can_rx_offload *offload); void can_rx_offload_enable(struct can_rx_offload *offload); diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 6b92b3395fa9..65a38c4a02a1 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -213,12 +213,6 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \ CEPH_FEATURE_CEPHX_V2) -#define CEPH_FEATURES_REQUIRED_DEFAULT \ - (CEPH_FEATURE_NOSRCADDR | \ - CEPH_FEATURE_SUBSCRIBE2 | \ - CEPH_FEATURE_RECONNECT_SEQ | \ - CEPH_FEATURE_PGID64 | \ - CEPH_FEATURE_PGPOOL3 | \ - CEPH_FEATURE_OSDENC) +#define CEPH_FEATURES_REQUIRED_DEFAULT 0 #endif diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 5e1694fe035b..8fcbae1b8db0 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -92,6 +92,7 @@ enum { CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ + CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */ /* internal flags, do not use outside cgroup core proper */ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 9d12757a65b0..9968332cceed 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -93,6 +93,8 @@ extern struct css_set init_css_set; bool css_has_online_children(struct cgroup_subsys_state *css); struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); +struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup, + struct cgroup_subsys *ss); struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, struct cgroup_subsys *ss); struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 60c51871b04b..e443fa9fa859 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -1,12 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* - * linux/include/linux/clk-provider.h - * * Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com> * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __LINUX_CLK_PROVIDER_H #define __LINUX_CLK_PROVIDER_H @@ -601,6 +596,12 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw); * @lock: register lock * * Clock with adjustable fractional divider affecting its output frequency. + * + * Flags: + * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator + * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED + * is set then the numerator and denominator are both the value read + * plus one. */ struct clk_fractional_divider { struct clk_hw hw; @@ -620,6 +621,8 @@ struct clk_fractional_divider { #define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw) +#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0) + extern const struct clk_ops clk_fractional_divider_ops; struct clk *clk_register_fractional_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h index e0c362363c38..85f8cf9d1226 100644 --- a/include/linux/clk/clk-conf.h +++ b/include/linux/clk/clk-conf.h @@ -1,10 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Samsung Electronics Co., Ltd. * Sylwester Nawrocki <s.nawrocki@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/types.h> diff --git a/include/linux/compat.h b/include/linux/compat.h index 88720b443cd6..056be0d03722 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -169,6 +169,10 @@ typedef struct { compat_sigset_word sig[_COMPAT_NSIG_WORDS]; } compat_sigset_t; +int set_compat_user_sigmask(const compat_sigset_t __user *usigmask, + sigset_t *set, sigset_t *oldset, + size_t sigsetsize); + struct compat_sigaction { #ifndef __ARCH_HAS_IRIX_SIGACTION compat_uptr_t sa_handler; @@ -558,6 +562,12 @@ asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id, struct io_event __user *events, struct old_timespec32 __user *timeout, const struct __compat_aio_sigset __user *usig); +asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id, + compat_long_t min_nr, + compat_long_t nr, + struct io_event __user *events, + struct __kernel_timespec __user *timeout, + const struct __compat_aio_sigset __user *usig); /* fs/cookies.c */ asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t); @@ -643,11 +653,21 @@ asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp, compat_ulong_t __user *exp, struct old_timespec32 __user *tsp, void __user *sig); +asmlinkage long compat_sys_pselect6_time64(int n, compat_ulong_t __user *inp, + compat_ulong_t __user *outp, + compat_ulong_t __user *exp, + struct __kernel_timespec __user *tsp, + void __user *sig); asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, struct old_timespec32 __user *tsp, const compat_sigset_t __user *sigmask, compat_size_t sigsetsize); +asmlinkage long compat_sys_ppoll_time64(struct pollfd __user *ufds, + unsigned int nfds, + struct __kernel_timespec __user *tsp, + const compat_sigset_t __user *sigmask, + compat_size_t sigsetsize); /* fs/signalfd.c */ asmlinkage long compat_sys_signalfd4(int ufd, @@ -768,6 +788,9 @@ asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset, asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese, struct compat_siginfo __user *uinfo, struct old_timespec32 __user *uts, compat_size_t sigsetsize); +asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese, + struct compat_siginfo __user *uinfo, + struct __kernel_timespec __user *uts, compat_size_t sigsetsize); asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig, struct compat_siginfo __user *uinfo); /* No generic prototype for rt_sigreturn */ @@ -873,6 +896,9 @@ asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages, asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, struct compat_siginfo __user *uinfo); +asmlinkage long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr __user *mmsg, + unsigned vlen, unsigned int flags, + struct __kernel_timespec __user *timeout); asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags, struct old_timespec32 __user *timeout); diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 3e7dafb3ea80..39f668d5066b 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -16,9 +16,13 @@ /* all clang versions usable with the kernel support KASAN ABI version 5 */ #define KASAN_ABI_VERSION 5 +#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer) /* emulate gcc's __SANITIZE_ADDRESS__ flag */ -#if __has_feature(address_sanitizer) #define __SANITIZE_ADDRESS__ +#define __no_sanitize_address \ + __attribute__((no_sanitize("address", "hwaddress"))) +#else +#define __no_sanitize_address #endif /* diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index c0f5db3a9621..5776da43da97 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -143,16 +143,10 @@ #define KASAN_ABI_VERSION 3 #endif -/* - * Because __no_sanitize_address conflicts with inlining: - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 - * we do one or the other. - */ -#ifdef CONFIG_KASAN -#define __no_sanitize_address_or_inline \ - __no_sanitize_address __maybe_unused notrace +#if __has_attribute(__no_sanitize_address__) +#define __no_sanitize_address __attribute__((no_sanitize_address)) #else -#define __no_sanitize_address_or_inline inline +#define __no_sanitize_address #endif #if GCC_VERSION >= 50100 diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 18c80cfa4fc4..fc5004a4b07d 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -99,13 +99,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, * unique, to convince GCC not to merge duplicate inline asm statements. */ #define annotate_reachable() ({ \ - asm volatile("ANNOTATE_REACHABLE counter=%c0" \ - : : "i" (__COUNTER__)); \ + asm volatile("%c0:\n\t" \ + ".pushsection .discard.reachable\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ }) #define annotate_unreachable() ({ \ - asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \ - : : "i" (__COUNTER__)); \ + asm volatile("%c0:\n\t" \ + ".pushsection .discard.unreachable\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ }) +#define ASM_UNREACHABLE \ + "999:\n\t" \ + ".pushsection .discard.unreachable\n\t" \ + ".long 999b - .\n\t" \ + ".popsection\n\t" #else #define annotate_reachable() #define annotate_unreachable() @@ -189,7 +198,7 @@ void __read_once_size(const volatile void *p, void *res, int size) * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 * '__maybe_unused' allows us to avoid defined-but-not-used warnings. */ -# define __no_kasan_or_inline __no_sanitize_address __maybe_unused +# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused #else # define __no_kasan_or_inline __always_inline #endif @@ -293,45 +302,6 @@ static inline void *offset_to_ptr(const int *off) return (void *)((unsigned long)off + *off); } -#else /* __ASSEMBLY__ */ - -#ifdef __KERNEL__ -#ifndef LINKER_SCRIPT - -#ifdef CONFIG_STACK_VALIDATION -.macro ANNOTATE_UNREACHABLE counter:req -\counter: - .pushsection .discard.unreachable - .long \counter\()b -. - .popsection -.endm - -.macro ANNOTATE_REACHABLE counter:req -\counter: - .pushsection .discard.reachable - .long \counter\()b -. - .popsection -.endm - -.macro ASM_UNREACHABLE -999: - .pushsection .discard.unreachable - .long 999b - . - .popsection -.endm -#else /* CONFIG_STACK_VALIDATION */ -.macro ANNOTATE_UNREACHABLE counter:req -.endm - -.macro ANNOTATE_REACHABLE counter:req -.endm - -.macro ASM_UNREACHABLE -.endm -#endif /* CONFIG_STACK_VALIDATION */ - -#endif /* LINKER_SCRIPT */ -#endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ /* Compile time object size, -1 for unknown */ diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 6b28c1b7310c..19f32b0c29af 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -4,22 +4,26 @@ /* * The attributes in this file are unconditionally defined and they directly - * map to compiler attribute(s) -- except those that are optional. + * map to compiler attribute(s), unless one of the compilers does not support + * the attribute. In that case, __has_attribute is used to check for support + * and the reason is stated in its comment ("Optional: ..."). * * Any other "attributes" (i.e. those that depend on a configuration option, * on a compiler, on an architecture, on plugins, on other attributes...) * should be defined elsewhere (e.g. compiler_types.h or compiler-*.h). + * The intention is to keep this file as simple as possible, as well as + * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks). * * This file is meant to be sorted (by actual attribute name, * not by #define identifier). Use the __attribute__((__name__)) syntax * (i.e. with underscores) to avoid future collisions with other macros. - * If an attribute is optional, state the reason in the comment. + * Provide links to the documentation of each supported compiler, if it exists. */ /* - * To check for optional attributes, we use __has_attribute, which is supported - * on gcc >= 5, clang >= 2.9 and icc >= 17. In the meantime, to support - * 4.6 <= gcc < 5, we implement __has_attribute by hand. + * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17. + * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute + * by hand. * * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__ * depending on the compiler used to build it; however, these attributes have @@ -33,7 +37,6 @@ # define __GCC4_has_attribute___designated_init__ 0 # define __GCC4_has_attribute___externally_visible__ 1 # define __GCC4_has_attribute___noclone__ 1 -# define __GCC4_has_attribute___optimize__ 1 # define __GCC4_has_attribute___nonstring__ 0 # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) #endif @@ -159,17 +162,11 @@ /* * Optional: not supported by clang - * Note: icc does not recognize gcc's no-tracer * * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute - * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-optimize-function-attribute */ #if __has_attribute(__noclone__) -# if __has_attribute(__optimize__) -# define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) -# else -# define __noclone __attribute__((__noclone__)) -# endif +# define __noclone __attribute__((__noclone__)) #else # define __noclone #endif @@ -203,19 +200,6 @@ #define __noreturn __attribute__((__noreturn__)) /* - * Optional: only supported since gcc >= 4.8 - * Optional: not supported by icc - * - * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fsanitize_005faddress-function-attribute - * clang: https://clang.llvm.org/docs/AttributeReference.html#no-sanitize-address-no-address-safety-analysis - */ -#if __has_attribute(__no_sanitize_address__) -# define __no_sanitize_address __attribute__((__no_sanitize_address__)) -#else -# define __no_sanitize_address -#endif - -/* * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute * clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 3439d7d0249a..ba814f18cb4c 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -104,59 +104,6 @@ struct ftrace_likely_data { unsigned long constant; }; -#endif /* __KERNEL__ */ - -#endif /* __ASSEMBLY__ */ - -/* - * The below symbols may be defined for one or more, but not ALL, of the above - * compilers. We don't consider that to be an error, so set them to nothing. - * For example, some of them are for compiler specific plugins. - */ -#ifndef __latent_entropy -# define __latent_entropy -#endif - -#ifndef __randomize_layout -# define __randomize_layout __designated_init -#endif - -#ifndef __no_randomize_layout -# define __no_randomize_layout -#endif - -#ifndef randomized_struct_fields_start -# define randomized_struct_fields_start -# define randomized_struct_fields_end -#endif - -/* Are two types/vars the same type (ignoring qualifiers)? */ -#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) - -/* Is this type a native word size -- useful for atomic operations */ -#define __native_word(t) \ - (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ - sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) - -/* Helpers for emitting diagnostics in pragmas. */ -#ifndef __diag -#define __diag(string) -#endif - -#ifndef __diag_GCC -#define __diag_GCC(version, severity, string) -#endif - -#define __diag_push() __diag(push) -#define __diag_pop() __diag(pop) - -#define __diag_ignore(compiler, version, option, comment) \ - __diag_ ## compiler(version, ignore, option) -#define __diag_warn(compiler, version, option, comment) \ - __diag_ ## compiler(version, warn, option) -#define __diag_error(compiler, version, option, comment) \ - __diag_ ## compiler(version, error, option) - #ifdef CONFIG_ENABLE_MUST_CHECK #define __must_check __attribute__((__warn_unused_result__)) #else @@ -211,4 +158,61 @@ struct ftrace_likely_data { */ #define noinline_for_stack noinline +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +/* + * The below symbols may be defined for one or more, but not ALL, of the above + * compilers. We don't consider that to be an error, so set them to nothing. + * For example, some of them are for compiler specific plugins. + */ +#ifndef __latent_entropy +# define __latent_entropy +#endif + +#ifndef __randomize_layout +# define __randomize_layout __designated_init +#endif + +#ifndef __no_randomize_layout +# define __no_randomize_layout +#endif + +#ifndef randomized_struct_fields_start +# define randomized_struct_fields_start +# define randomized_struct_fields_end +#endif + +#ifndef asm_volatile_goto +#define asm_volatile_goto(x...) asm goto(x) +#endif + +/* Are two types/vars the same type (ignoring qualifiers)? */ +#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) + +/* Is this type a native word size -- useful for atomic operations */ +#define __native_word(t) \ + (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ + sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) + +/* Helpers for emitting diagnostics in pragmas. */ +#ifndef __diag +#define __diag(string) +#endif + +#ifndef __diag_GCC +#define __diag_GCC(version, severity, string) +#endif + +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) + +#define __diag_ignore(compiler, version, option, comment) \ + __diag_ ## compiler(version, ignore, option) +#define __diag_warn(compiler, version, option, comment) \ + __diag_ ## compiler(version, warn, option) +#define __diag_error(compiler, version, option, comment) \ + __diag_ ## compiler(version, error, option) + #endif /* __LINUX_COMPILER_TYPES_H */ diff --git a/include/linux/cordic.h b/include/linux/cordic.h index cf68ca4a508c..3d656f54d64f 100644 --- a/include/linux/cordic.h +++ b/include/linux/cordic.h @@ -18,6 +18,15 @@ #include <linux/types.h> +#define CORDIC_ANGLE_GEN 39797 +#define CORDIC_PRECISION_SHIFT 16 +#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2) + +#define CORDIC_FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT)) +#define CORDIC_FLOAT(X) (((X) >= 0) \ + ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \ + : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1)) + /** * struct cordic_iq - i/q coordinate. * diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 882a9b9e34bc..c86d6d8bdfed 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -950,6 +950,14 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) } #endif +#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) +void sched_cpufreq_governor_change(struct cpufreq_policy *policy, + struct cpufreq_governor *old_gov); +#else +static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy, + struct cpufreq_governor *old_gov) { } +#endif + extern void arch_freq_prepare_all(void); extern unsigned int arch_freq_get_on_cpu(int cpu); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index e0cd2baa8380..fd586d0301e7 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -164,6 +164,8 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_L2X0_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, + CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, + CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index faed7a8977e8..4dff74f48d4b 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -33,6 +33,8 @@ struct cpuidle_state_usage { unsigned long long disable; unsigned long long usage; unsigned long long time; /* in US */ + unsigned long long above; /* Number of times it's been too deep */ + unsigned long long below; /* Number of times it's been too shallow */ #ifdef CONFIG_SUSPEND unsigned long long s2idle_usage; unsigned long long s2idle_time; /* in US */ diff --git a/include/linux/cred.h b/include/linux/cred.h index 7eed6101c791..4907c9df86b3 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -169,6 +169,7 @@ extern int change_create_files_as(struct cred *, struct inode *); extern int set_security_override(struct cred *, u32); extern int set_security_override_from_ctx(struct cred *, const char *); extern int set_create_files_as(struct cred *, struct inode *); +extern int cred_fscmp(const struct cred *, const struct cred *); extern void __init cred_init(void); /* @@ -236,7 +237,7 @@ static inline struct cred *get_new_cred(struct cred *cred) * @cred: The credentials to reference * * Get a reference on the specified set of credentials. The caller must - * release the reference. + * release the reference. If %NULL is passed, it is returned with no action. * * This is used to deal with a committed set of credentials. Although the * pointer is const, this will temporarily discard the const and increment the @@ -247,16 +248,29 @@ static inline struct cred *get_new_cred(struct cred *cred) static inline const struct cred *get_cred(const struct cred *cred) { struct cred *nonconst_cred = (struct cred *) cred; + if (!cred) + return cred; validate_creds(cred); return get_new_cred(nonconst_cred); } +static inline const struct cred *get_cred_rcu(const struct cred *cred) +{ + struct cred *nonconst_cred = (struct cred *) cred; + if (!cred) + return NULL; + if (!atomic_inc_not_zero(&nonconst_cred->usage)) + return NULL; + validate_creds(cred); + return cred; +} + /** * put_cred - Release a reference to a set of credentials * @cred: The credentials to release * * Release a reference to a set of credentials, deleting them when the last ref - * is released. + * is released. If %NULL is passed, nothing is done. * * This takes a const pointer to a set of credentials because the credentials * on task_struct are attached by const pointers to prevent accidental @@ -266,9 +280,11 @@ static inline void put_cred(const struct cred *_cred) { struct cred *cred = (struct cred *) _cred; - validate_creds(cred); - if (atomic_dec_and_test(&(cred)->usage)) - __put_cred(cred); + if (cred) { + validate_creds(cred); + if (atomic_dec_and_test(&(cred)->usage)) + __put_cred(cred); + } } /** diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 3634ad6fe202..902ec171fc6d 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -49,7 +49,6 @@ #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 -#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_KPP 0x00000008 #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b @@ -77,12 +76,6 @@ #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 /* - * This bit is set for symmetric key ciphers that have already been wrapped - * with a generic IV generator to prevent them from being wrapped again. - */ -#define CRYPTO_ALG_GENIV 0x00000200 - -/* * Set if the algorithm has passed automated run-time testing. Note that * if there is no run-time testing for a given algorithm it is considered * to have passed. @@ -157,7 +150,6 @@ struct crypto_async_request; struct crypto_blkcipher; struct crypto_tfm; struct crypto_type; -struct skcipher_givcrypt_request; typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); @@ -246,31 +238,16 @@ struct cipher_desc { * be called in parallel with the same transformation object. * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt * and the conditions are exactly the same. - * @givencrypt: Update the IV for encryption. With this function, a cipher - * implementation may provide the function on how to update the IV - * for encryption. - * @givdecrypt: Update the IV for decryption. This is the reverse of - * @givencrypt . - * @geniv: The transformation implementation may use an "IV generator" provided - * by the kernel crypto API. Several use cases have a predefined - * approach how IVs are to be updated. For such use cases, the kernel - * crypto API provides ready-to-use implementations that can be - * referenced with this variable. * @ivsize: IV size applicable for transformation. The consumer must provide an * IV of exactly that size to perform the encrypt or decrypt operation. * - * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are - * mandatory and must be filled. + * All fields except @ivsize are mandatory and must be filled. */ struct ablkcipher_alg { int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); - int (*givencrypt)(struct skcipher_givcrypt_request *req); - int (*givdecrypt)(struct skcipher_givcrypt_request *req); - - const char *geniv; unsigned int min_keysize; unsigned int max_keysize; @@ -284,10 +261,9 @@ struct ablkcipher_alg { * @setkey: see struct ablkcipher_alg * @encrypt: see struct ablkcipher_alg * @decrypt: see struct ablkcipher_alg - * @geniv: see struct ablkcipher_alg * @ivsize: see struct ablkcipher_alg * - * All fields except @geniv and @ivsize are mandatory and must be filled. + * All fields except @ivsize are mandatory and must be filled. */ struct blkcipher_alg { int (*setkey)(struct crypto_tfm *tfm, const u8 *key, @@ -299,8 +275,6 @@ struct blkcipher_alg { struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes); - const char *geniv; - unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; @@ -369,6 +343,115 @@ struct compress_alg { unsigned int slen, u8 *dst, unsigned int *dlen); }; +#ifdef CONFIG_CRYPTO_STATS +/* + * struct crypto_istat_aead - statistics for AEAD algorithm + * @encrypt_cnt: number of encrypt requests + * @encrypt_tlen: total data size handled by encrypt requests + * @decrypt_cnt: number of decrypt requests + * @decrypt_tlen: total data size handled by decrypt requests + * @err_cnt: number of error for AEAD requests + */ +struct crypto_istat_aead { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t err_cnt; +}; + +/* + * struct crypto_istat_akcipher - statistics for akcipher algorithm + * @encrypt_cnt: number of encrypt requests + * @encrypt_tlen: total data size handled by encrypt requests + * @decrypt_cnt: number of decrypt requests + * @decrypt_tlen: total data size handled by decrypt requests + * @verify_cnt: number of verify operation + * @sign_cnt: number of sign requests + * @err_cnt: number of error for akcipher requests + */ +struct crypto_istat_akcipher { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t verify_cnt; + atomic64_t sign_cnt; + atomic64_t err_cnt; +}; + +/* + * struct crypto_istat_cipher - statistics for cipher algorithm + * @encrypt_cnt: number of encrypt requests + * @encrypt_tlen: total data size handled by encrypt requests + * @decrypt_cnt: number of decrypt requests + * @decrypt_tlen: total data size handled by decrypt requests + * @err_cnt: number of error for cipher requests + */ +struct crypto_istat_cipher { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t err_cnt; +}; + +/* + * struct crypto_istat_compress - statistics for compress algorithm + * @compress_cnt: number of compress requests + * @compress_tlen: total data size handled by compress requests + * @decompress_cnt: number of decompress requests + * @decompress_tlen: total data size handled by decompress requests + * @err_cnt: number of error for compress requests + */ +struct crypto_istat_compress { + atomic64_t compress_cnt; + atomic64_t compress_tlen; + atomic64_t decompress_cnt; + atomic64_t decompress_tlen; + atomic64_t err_cnt; +}; + +/* + * struct crypto_istat_hash - statistics for has algorithm + * @hash_cnt: number of hash requests + * @hash_tlen: total data size hashed + * @err_cnt: number of error for hash requests + */ +struct crypto_istat_hash { + atomic64_t hash_cnt; + atomic64_t hash_tlen; + atomic64_t err_cnt; +}; + +/* + * struct crypto_istat_kpp - statistics for KPP algorithm + * @setsecret_cnt: number of setsecrey operation + * @generate_public_key_cnt: number of generate_public_key operation + * @compute_shared_secret_cnt: number of compute_shared_secret operation + * @err_cnt: number of error for KPP requests + */ +struct crypto_istat_kpp { + atomic64_t setsecret_cnt; + atomic64_t generate_public_key_cnt; + atomic64_t compute_shared_secret_cnt; + atomic64_t err_cnt; +}; + +/* + * struct crypto_istat_rng: statistics for RNG algorithm + * @generate_cnt: number of RNG generate requests + * @generate_tlen: total data size of generated data by the RNG + * @seed_cnt: number of times the RNG was seeded + * @err_cnt: number of error for RNG requests + */ +struct crypto_istat_rng { + atomic64_t generate_cnt; + atomic64_t generate_tlen; + atomic64_t seed_cnt; + atomic64_t err_cnt; +}; +#endif /* CONFIG_CRYPTO_STATS */ #define cra_ablkcipher cra_u.ablkcipher #define cra_blkcipher cra_u.blkcipher @@ -454,32 +537,14 @@ struct compress_alg { * @cra_refcnt: internally used * @cra_destroy: internally used * - * All following statistics are for this crypto_alg - * @encrypt_cnt: number of encrypt requests - * @decrypt_cnt: number of decrypt requests - * @compress_cnt: number of compress requests - * @decompress_cnt: number of decompress requests - * @generate_cnt: number of RNG generate requests - * @seed_cnt: number of times the rng was seeded - * @hash_cnt: number of hash requests - * @sign_cnt: number of sign requests - * @setsecret_cnt: number of setsecrey operation - * @generate_public_key_cnt: number of generate_public_key operation - * @verify_cnt: number of verify operation - * @compute_shared_secret_cnt: number of compute_shared_secret operation - * @encrypt_tlen: total data size handled by encrypt requests - * @decrypt_tlen: total data size handled by decrypt requests - * @compress_tlen: total data size handled by compress requests - * @decompress_tlen: total data size handled by decompress requests - * @generate_tlen: total data size of generated data by the RNG - * @hash_tlen: total data size hashed - * @akcipher_err_cnt: number of error for akcipher requests - * @cipher_err_cnt: number of error for akcipher requests - * @compress_err_cnt: number of error for akcipher requests - * @aead_err_cnt: number of error for akcipher requests - * @hash_err_cnt: number of error for akcipher requests - * @rng_err_cnt: number of error for akcipher requests - * @kpp_err_cnt: number of error for akcipher requests + * @stats: union of all possible crypto_istat_xxx structures + * @stats.aead: statistics for AEAD algorithm + * @stats.akcipher: statistics for akcipher algorithm + * @stats.cipher: statistics for cipher algorithm + * @stats.compress: statistics for compress algorithm + * @stats.hash: statistics for hash algorithm + * @stats.rng: statistics for rng algorithm + * @stats.kpp: statistics for KPP algorithm * * The struct crypto_alg describes a generic Crypto API algorithm and is common * for all of the transformations. Any variable not documented here shall not @@ -515,46 +580,86 @@ struct crypto_alg { struct module *cra_module; +#ifdef CONFIG_CRYPTO_STATS union { - atomic_t encrypt_cnt; - atomic_t compress_cnt; - atomic_t generate_cnt; - atomic_t hash_cnt; - atomic_t setsecret_cnt; - }; - union { - atomic64_t encrypt_tlen; - atomic64_t compress_tlen; - atomic64_t generate_tlen; - atomic64_t hash_tlen; - }; - union { - atomic_t akcipher_err_cnt; - atomic_t cipher_err_cnt; - atomic_t compress_err_cnt; - atomic_t aead_err_cnt; - atomic_t hash_err_cnt; - atomic_t rng_err_cnt; - atomic_t kpp_err_cnt; - }; - union { - atomic_t decrypt_cnt; - atomic_t decompress_cnt; - atomic_t seed_cnt; - atomic_t generate_public_key_cnt; - }; - union { - atomic64_t decrypt_tlen; - atomic64_t decompress_tlen; - }; - union { - atomic_t verify_cnt; - atomic_t compute_shared_secret_cnt; - }; - atomic_t sign_cnt; + struct crypto_istat_aead aead; + struct crypto_istat_akcipher akcipher; + struct crypto_istat_cipher cipher; + struct crypto_istat_compress compress; + struct crypto_istat_hash hash; + struct crypto_istat_rng rng; + struct crypto_istat_kpp kpp; + } stats; +#endif /* CONFIG_CRYPTO_STATS */ } CRYPTO_MINALIGN_ATTR; +#ifdef CONFIG_CRYPTO_STATS +void crypto_stats_init(struct crypto_alg *alg); +void crypto_stats_get(struct crypto_alg *alg); +void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); +void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); +void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); +void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); +void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg); +void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg); +void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg); +void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg); +void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret); +void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret); +void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret); +void crypto_stats_rng_seed(struct crypto_alg *alg, int ret); +void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret); +void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); +void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); +#else +static inline void crypto_stats_init(struct crypto_alg *alg) +{} +static inline void crypto_stats_get(struct crypto_alg *alg) +{} +static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret) +{} +static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) +{} +#endif /* * A helper struct for waiting for completion of async crypto ops */ @@ -800,14 +905,14 @@ static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( static inline u32 crypto_skcipher_type(u32 type) { - type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; return type; } static inline u32 crypto_skcipher_mask(u32 mask) { - mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + mask &= ~CRYPTO_ALG_TYPE_MASK; mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; return mask; } @@ -973,38 +1078,6 @@ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( return __crypto_ablkcipher_cast(req->base.tfm); } -static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct ablkcipher_tfm *crt = - crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt); - } else { - atomic_inc(&crt->base->base.__crt_alg->encrypt_cnt); - atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen); - } -#endif -} - -static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct ablkcipher_tfm *crt = - crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt); - } else { - atomic_inc(&crt->base->base.__crt_alg->decrypt_cnt); - atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen); - } -#endif -} - /** * crypto_ablkcipher_encrypt() - encrypt plaintext * @req: reference to the ablkcipher_request handle that holds all information @@ -1020,10 +1093,13 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) { struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + struct crypto_alg *alg = crt->base->base.__crt_alg; + unsigned int nbytes = req->nbytes; int ret; + crypto_stats_get(alg); ret = crt->encrypt(req); - crypto_stat_ablkcipher_encrypt(req, ret); + crypto_stats_ablkcipher_encrypt(nbytes, ret, alg); return ret; } @@ -1042,10 +1118,13 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) { struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + struct crypto_alg *alg = crt->base->base.__crt_alg; + unsigned int nbytes = req->nbytes; int ret; + crypto_stats_get(alg); ret = crt->decrypt(req); - crypto_stat_ablkcipher_decrypt(req, ret); + crypto_stats_ablkcipher_decrypt(nbytes, ret, alg); return ret; } diff --git a/include/linux/dax.h b/include/linux/dax.h index 450b28db9533..0dd316a74a29 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -7,6 +7,8 @@ #include <linux/radix-tree.h> #include <asm/pgtable.h> +typedef unsigned long dax_entry_t; + struct iomap_ops; struct dax_device; struct dax_operations { @@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping, struct block_device *bdev, struct writeback_control *wbc); struct page *dax_layout_busy_page(struct address_space *mapping); -bool dax_lock_mapping_entry(struct page *page); -void dax_unlock_mapping_entry(struct page *page); +dax_entry_t dax_lock_page(struct page *page); +void dax_unlock_page(struct page *page, dax_entry_t cookie); #else static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) @@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping, return -EOPNOTSUPP; } -static inline bool dax_lock_mapping_entry(struct page *page) +static inline dax_entry_t dax_lock_page(struct page *page) { if (IS_DAX(page->mapping->host)) - return true; - return false; + return ~0UL; + return 0; } -static inline void dax_unlock_mapping_entry(struct page *page) +static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) { } #endif diff --git a/include/linux/dell-led.h b/include/linux/dell-led.h deleted file mode 100644 index 92521471517f..000000000000 --- a/include/linux/dell-led.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __DELL_LED_H__ -#define __DELL_LED_H__ - -int dell_micmute_led_set(int on); - -#endif diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index e4963b0f45da..fbffa74bfc1b 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -131,6 +131,9 @@ struct devfreq_dev_profile { * @scaling_min_freq: Limit minimum frequency requested by OPP interface * @scaling_max_freq: Limit maximum frequency requested by OPP interface * @stop_polling: devfreq polling status of a device. + * @suspend_freq: frequency of a device set during suspend phase. + * @resume_freq: frequency of a device set in resume phase. + * @suspend_count: suspend requests counter for a device. * @total_trans: Number of devfreq transitions * @trans_table: Statistics of devfreq transitions * @time_in_state: Statistics of devfreq states @@ -167,6 +170,10 @@ struct devfreq { unsigned long scaling_max_freq; bool stop_polling; + unsigned long suspend_freq; + unsigned long resume_freq; + atomic_t suspend_count; + /* information for device frequency transition */ unsigned int total_trans; unsigned int *trans_table; @@ -198,6 +205,9 @@ extern void devm_devfreq_remove_device(struct device *dev, extern int devfreq_suspend_device(struct devfreq *devfreq); extern int devfreq_resume_device(struct devfreq *devfreq); +extern void devfreq_suspend(void); +extern void devfreq_resume(void); + /** * update_devfreq() - Reevaluate the device and configure frequency * @devfreq: the devfreq device @@ -324,6 +334,9 @@ static inline int devfreq_resume_device(struct devfreq *devfreq) return 0; } +static inline void devfreq_suspend(void) {} +static inline void devfreq_resume(void) {} + static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, u32 flags) { diff --git a/include/linux/device.h b/include/linux/device.h index 1b25c7a43f4c..6cb4640b6160 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1058,6 +1058,16 @@ static inline struct device *kobj_to_dev(struct kobject *kobj) return container_of(kobj, struct device, kobj); } +/** + * device_iommu_mapped - Returns true when the device DMA is translated + * by an IOMMU + * @dev: Device to perform the check on + */ +static inline bool device_iommu_mapped(struct device *dev) +{ + return (dev->iommu_group != NULL); +} + /* Get the wakeup routines, which depend on struct device */ #include <linux/pm_wakeup.h> diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index 30213adbb6b9..2ad5c363d7d5 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h @@ -30,8 +30,6 @@ struct bus_type; extern void dma_debug_add_bus(struct bus_type *bus); -extern int dma_debug_resize_entries(u32 num_entries); - extern void debug_dma_map_single(struct device *dev, const void *addr, unsigned long len); @@ -72,17 +70,6 @@ extern void debug_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, int direction); -extern void debug_dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - int direction); - -extern void debug_dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, int direction); - extern void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, int direction); @@ -101,11 +88,6 @@ static inline void dma_debug_add_bus(struct bus_type *bus) { } -static inline int dma_debug_resize_entries(u32 num_entries) -{ - return 0; -} - static inline void debug_dma_map_single(struct device *dev, const void *addr, unsigned long len) { @@ -174,22 +156,6 @@ static inline void debug_dma_sync_single_for_device(struct device *dev, { } -static inline void debug_dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - int direction) -{ -} - -static inline void debug_dma_sync_single_range_for_device(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - int direction) -{ -} - static inline void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, int direction) diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index bd73e7a91410..b7338702592a 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -5,8 +5,6 @@ #include <linux/dma-mapping.h> #include <linux/mem_encrypt.h> -#define DIRECT_MAPPING_ERROR 0 - #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA #include <asm/dma-direct.h> #else @@ -50,14 +48,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) return __sme_clr(__dma_to_phys(dev, daddr)); } -#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN -void dma_mark_clean(void *addr, size_t size); -#else -static inline void dma_mark_clean(void *addr, size_t size) -{ -} -#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */ - u64 dma_direct_get_required_mask(struct device *dev); void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); @@ -67,11 +57,8 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); -dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs); -int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, - enum dma_data_direction dir, unsigned long attrs); +struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); +void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page); int dma_direct_supported(struct device *dev, u64 mask); -int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr); #endif /* _LINUX_DMA_DIRECT_H */ diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 02dba8cd033d..999e4b104410 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -541,6 +541,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) return ret < 0 ? ret : 0; } +struct dma_fence *dma_fence_get_stub(void); u64 dma_fence_context_alloc(unsigned num); #define DMA_FENCE_TRACE(f, fmt, args...) \ diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index e8ca5e654277..e760dc5d1fa8 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -69,7 +69,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs); void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs); -int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); /* The DMA API isn't _quite_ the whole story, though... */ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 15bd41447025..ba521d5506c9 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -128,13 +128,14 @@ struct dma_map_ops { enum dma_data_direction dir); void (*cache_sync)(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); int (*dma_supported)(struct device *dev, u64 mask); u64 (*get_required_mask)(struct device *dev); }; -extern const struct dma_map_ops dma_direct_ops; +#define DMA_MAPPING_ERROR (~(dma_addr_t)0) + extern const struct dma_map_ops dma_virt_ops; +extern const struct dma_map_ops dma_dummy_ops; #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) @@ -220,6 +221,69 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev) } #endif +static inline bool dma_is_direct(const struct dma_map_ops *ops) +{ + return likely(!ops); +} + +/* + * All the dma_direct_* declarations are here just for the indirect call bypass, + * and must not be used directly drivers! + */ +dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs); +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs); + +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ + defined(CONFIG_SWIOTLB) +void dma_direct_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir); +void dma_direct_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir); +#else +static inline void dma_direct_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ +} +static inline void dma_direct_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ +} +#endif + +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ + defined(CONFIG_SWIOTLB) +void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs); +void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs); +void dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir); +void dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir); +#else +static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ +} +static inline void dma_direct_unmap_sg(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir, + unsigned long attrs) +{ +} +static inline void dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ +} +static inline void dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ +} +#endif + static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, @@ -230,9 +294,12 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, BUG_ON(!valid_dma_direction(dir)); debug_dma_map_single(dev, ptr, size); - addr = ops->map_page(dev, virt_to_page(ptr), - offset_in_page(ptr), size, - dir, attrs); + if (dma_is_direct(ops)) + addr = dma_direct_map_page(dev, virt_to_page(ptr), + offset_in_page(ptr), size, dir, attrs); + else + addr = ops->map_page(dev, virt_to_page(ptr), + offset_in_page(ptr), size, dir, attrs); debug_dma_map_page(dev, virt_to_page(ptr), offset_in_page(ptr), size, dir, addr, true); @@ -247,11 +314,19 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); - if (ops->unmap_page) + if (dma_is_direct(ops)) + dma_direct_unmap_page(dev, addr, size, dir, attrs); + else if (ops->unmap_page) ops->unmap_page(dev, addr, size, dir, attrs); debug_dma_unmap_page(dev, addr, size, dir, true); } +static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + return dma_unmap_single_attrs(dev, addr, size, dir, attrs); +} + /* * dma_maps_sg_attrs returns 0 on error and > 0 on success. * It should never return a value < 0. @@ -264,7 +339,10 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int ents; BUG_ON(!valid_dma_direction(dir)); - ents = ops->map_sg(dev, sg, nents, dir, attrs); + if (dma_is_direct(ops)) + ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); + else + ents = ops->map_sg(dev, sg, nents, dir, attrs); BUG_ON(ents < 0); debug_dma_map_sg(dev, sg, nents, ents, dir); @@ -279,7 +357,9 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg BUG_ON(!valid_dma_direction(dir)); debug_dma_unmap_sg(dev, sg, nents, dir); - if (ops->unmap_sg) + if (dma_is_direct(ops)) + dma_direct_unmap_sg(dev, sg, nents, dir, attrs); + else if (ops->unmap_sg) ops->unmap_sg(dev, sg, nents, dir, attrs); } @@ -293,25 +373,15 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev, dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); - addr = ops->map_page(dev, page, offset, size, dir, attrs); + if (dma_is_direct(ops)) + addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); + else + addr = ops->map_page(dev, page, offset, size, dir, attrs); debug_dma_map_page(dev, page, offset, size, dir, addr, false); return addr; } -static inline void dma_unmap_page_attrs(struct device *dev, - dma_addr_t addr, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->unmap_page) - ops->unmap_page(dev, addr, size, dir, attrs); - debug_dma_unmap_page(dev, addr, size, dir, false); -} - static inline dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, @@ -327,7 +397,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev, BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); addr = phys_addr; - if (ops->map_resource) + if (ops && ops->map_resource) addr = ops->map_resource(dev, phys_addr, size, dir, attrs); debug_dma_map_resource(dev, phys_addr, size, dir, addr); @@ -342,7 +412,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); - if (ops->unmap_resource) + if (ops && ops->unmap_resource) ops->unmap_resource(dev, addr, size, dir, attrs); debug_dma_unmap_resource(dev, addr, size, dir); } @@ -354,11 +424,20 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_for_cpu) + if (dma_is_direct(ops)) + dma_direct_sync_single_for_cpu(dev, addr, size, dir); + else if (ops->sync_single_for_cpu) ops->sync_single_for_cpu(dev, addr, size, dir); debug_dma_sync_single_for_cpu(dev, addr, size, dir); } +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t addr, unsigned long offset, size_t size, + enum dma_data_direction dir) +{ + return dma_sync_single_for_cpu(dev, addr + offset, size, dir); +} + static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) @@ -366,37 +445,18 @@ static inline void dma_sync_single_for_device(struct device *dev, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_for_device) + if (dma_is_direct(ops)) + dma_direct_sync_single_for_device(dev, addr, size, dir); + else if (ops->sync_single_for_device) ops->sync_single_for_device(dev, addr, size, dir); debug_dma_sync_single_for_device(dev, addr, size, dir); } -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t addr, - unsigned long offset, - size_t size, - enum dma_data_direction dir) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_for_cpu) - ops->sync_single_for_cpu(dev, addr + offset, size, dir); - debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); -} - static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t addr, - unsigned long offset, - size_t size, - enum dma_data_direction dir) + dma_addr_t addr, unsigned long offset, size_t size, + enum dma_data_direction dir) { - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_for_device) - ops->sync_single_for_device(dev, addr + offset, size, dir); - debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); + return dma_sync_single_for_device(dev, addr + offset, size, dir); } static inline void @@ -406,7 +466,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_sg_for_cpu) + if (dma_is_direct(ops)) + dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); + else if (ops->sync_sg_for_cpu) ops->sync_sg_for_cpu(dev, sg, nelems, dir); debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); } @@ -418,7 +480,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_sg_for_device) + if (dma_is_direct(ops)) + dma_direct_sync_sg_for_device(dev, sg, nelems, dir); + else if (ops->sync_sg_for_device) ops->sync_sg_for_device(dev, sg, nelems, dir); debug_dma_sync_sg_for_device(dev, sg, nelems, dir); @@ -431,16 +495,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) -static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction dir) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->cache_sync) - ops->cache_sync(dev, vaddr, size, dir); -} +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction dir); extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, @@ -455,107 +511,29 @@ void *dma_common_pages_remap(struct page **pages, size_t size, const void *caller); void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); -/** - * dma_mmap_attrs - map a coherent DMA allocation into user space - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @vma: vm_area_struct describing requested user mapping - * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs - * @handle: device-view address returned from dma_alloc_attrs - * @size: size of memory originally requested in dma_alloc_attrs - * @attrs: attributes of mapping properties requested in dma_alloc_attrs - * - * Map a coherent DMA buffer previously allocated by dma_alloc_attrs - * into user space. The coherent DMA buffer must not be freed by the - * driver until the user space mapping has been released. - */ -static inline int -dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size, unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - BUG_ON(!ops); - if (ops->mmap) - return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); - return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); -} +int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot); +bool dma_in_atomic_pool(void *start, size_t size); +void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags); +bool dma_free_from_pool(void *start, size_t size); +int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); -static inline int -dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, - dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - BUG_ON(!ops); - if (ops->get_sgtable) - return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, - attrs); - return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size, - attrs); -} - +int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) -#ifndef arch_dma_alloc_attrs -#define arch_dma_alloc_attrs(dev) (true) -#endif - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - void *cpu_addr; - - BUG_ON(!ops); - WARN_ON_ONCE(dev && !dev->coherent_dma_mask); - - if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) - return cpu_addr; - - /* let the implementation decide on the zone to allocate from: */ - flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); - - if (!arch_dma_alloc_attrs(&dev)) - return NULL; - if (!ops->alloc) - return NULL; - - cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); - return cpu_addr; -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!ops); - - if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) - return; - /* - * On non-coherent platforms which implement DMA-coherent buffers via - * non-cacheable remaps, ops->free() may call vunmap(). Thus getting - * this far in IRQ context is a) at risk of a BUG_ON() or trying to - * sleep on some machines, and b) an indication that the driver is - * probably misusing the coherent API anyway. - */ - WARN_ON(irqs_disabled()); - - if (!ops->free || !cpu_addr) - return; - - debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); - ops->free(dev, size, cpu_addr, dma_handle, attrs); -} +void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t flag, unsigned long attrs); +void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle, unsigned long attrs); static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) @@ -573,43 +551,16 @@ static inline void dma_free_coherent(struct device *dev, size_t size, static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - const struct dma_map_ops *ops = get_dma_ops(dev); - debug_dma_mapping_error(dev, dma_addr); - if (ops->mapping_error) - return ops->mapping_error(dev, dma_addr); - return 0; -} - -static inline void dma_check_mask(struct device *dev, u64 mask) -{ - if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) - dev_warn(dev, "SME is active, device will require DMA bounce buffers\n"); -} - -static inline int dma_supported(struct device *dev, u64 mask) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - if (!ops) - return 0; - if (!ops->dma_supported) - return 1; - return ops->dma_supported(dev, mask); -} - -#ifndef HAVE_ARCH_DMA_SET_MASK -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - dma_check_mask(dev, mask); - - *dev->dma_mask = mask; + if (dma_addr == DMA_MAPPING_ERROR) + return -ENOMEM; return 0; } -#endif + +int dma_supported(struct device *dev, u64 mask); +int dma_set_mask(struct device *dev, u64 mask); +int dma_set_coherent_mask(struct device *dev, u64 mask); static inline u64 dma_get_mask(struct device *dev) { @@ -618,21 +569,6 @@ static inline u64 dma_get_mask(struct device *dev) return DMA_BIT_MASK(32); } -#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK -int dma_set_coherent_mask(struct device *dev, u64 mask); -#else -static inline int dma_set_coherent_mask(struct device *dev, u64 mask) -{ - if (!dma_supported(dev, mask)) - return -EIO; - - dma_check_mask(dev, mask); - - dev->coherent_dma_mask = mask; - return 0; -} -#endif - /* * Set both the DMA mask and the coherent DMA mask to the same thing. * Note that we don't check the return value from dma_set_coherent_mask() @@ -676,8 +612,7 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev) return SZ_64K; } -static inline unsigned int dma_set_max_seg_size(struct device *dev, - unsigned int size) +static inline int dma_set_max_seg_size(struct device *dev, unsigned int size) { if (dev->dma_parms) { dev->dma_parms->max_segment_size = size; @@ -709,12 +644,13 @@ static inline unsigned long dma_max_pfn(struct device *dev) } #endif +/* + * Please always use dma_alloc_coherent instead as it already zeroes the memory! + */ static inline void *dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { - void *ret = dma_alloc_coherent(dev, size, dma_handle, - flag | __GFP_ZERO); - return ret; + return dma_alloc_coherent(dev, size, dma_handle, flag); } static inline int dma_get_cache_alignment(void) @@ -796,7 +732,7 @@ static inline void dmam_release_declared_memory(struct device *dev) static inline void *dma_alloc_wc(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t gfp) { - unsigned long attrs = DMA_ATTR_NO_WARN; + unsigned long attrs = DMA_ATTR_WRITE_COMBINE; if (gfp & __GFP_NOWARN) attrs |= DMA_ATTR_NO_WARN; diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index 9051b055beec..69b36ed31a99 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -38,7 +38,10 @@ pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction); #else -#define arch_dma_cache_sync NULL +static inline void arch_dma_cache_sync(struct device *dev, void *vaddr, + size_t size, enum dma_data_direction direction) +{ +} #endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */ #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE @@ -69,4 +72,6 @@ static inline void arch_sync_dma_for_cpu_all(struct device *dev) } #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */ +void arch_dma_prep_coherent(struct page *page, size_t size); + #endif /* _LINUX_DMA_NONCOHERENT_H */ diff --git a/include/linux/dma/pxa-dma.h b/include/linux/dma/pxa-dma.h index 9fc594f69eff..fceb5df07097 100644 --- a/include/linux/dma/pxa-dma.h +++ b/include/linux/dma/pxa-dma.h @@ -23,15 +23,4 @@ struct pxad_param { enum pxad_chan_prio prio; }; -struct dma_chan; - -#ifdef CONFIG_PXA_DMA -bool pxad_filter_fn(struct dma_chan *chan, void *param); -#else -static inline bool pxad_filter_fn(struct dma_chan *chan, void *param) -{ - return false; -} -#endif - #endif /* _PXA_DMA_H_ */ diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h index b42b80e52cc2..ab82df64682a 100644 --- a/include/linux/dma/sprd-dma.h +++ b/include/linux/dma/sprd-dma.h @@ -3,9 +3,65 @@ #ifndef _SPRD_DMA_H_ #define _SPRD_DMA_H_ -#define SPRD_DMA_REQ_SHIFT 16 -#define SPRD_DMA_FLAGS(req_mode, int_type) \ - ((req_mode) << SPRD_DMA_REQ_SHIFT | (int_type)) +#define SPRD_DMA_REQ_SHIFT 8 +#define SPRD_DMA_TRG_MODE_SHIFT 16 +#define SPRD_DMA_CHN_MODE_SHIFT 24 +#define SPRD_DMA_FLAGS(chn_mode, trg_mode, req_mode, int_type) \ + ((chn_mode) << SPRD_DMA_CHN_MODE_SHIFT | \ + (trg_mode) << SPRD_DMA_TRG_MODE_SHIFT | \ + (req_mode) << SPRD_DMA_REQ_SHIFT | (int_type)) + +/* + * The Spreadtrum DMA controller supports channel 2-stage tansfer, that means + * we can request 2 dma channels, one for source channel, and another one for + * destination channel. Each channel is independent, and has its own + * configurations. Once the source channel's transaction is done, it will + * trigger the destination channel's transaction automatically by hardware + * signal. + * + * To support 2-stage tansfer, we must configure the channel mode and trigger + * mode as below definition. + */ + +/* + * enum sprd_dma_chn_mode: define the DMA channel mode for 2-stage transfer + * @SPRD_DMA_CHN_MODE_NONE: No channel mode setting which means channel doesn't + * support the 2-stage transfer. + * @SPRD_DMA_SRC_CHN0: Channel used as source channel 0. + * @SPRD_DMA_SRC_CHN1: Channel used as source channel 1. + * @SPRD_DMA_DST_CHN0: Channel used as destination channel 0. + * @SPRD_DMA_DST_CHN1: Channel used as destination channel 1. + * + * Now the DMA controller can supports 2 groups 2-stage transfer. + */ +enum sprd_dma_chn_mode { + SPRD_DMA_CHN_MODE_NONE, + SPRD_DMA_SRC_CHN0, + SPRD_DMA_SRC_CHN1, + SPRD_DMA_DST_CHN0, + SPRD_DMA_DST_CHN1, +}; + +/* + * enum sprd_dma_trg_mode: define the DMA channel trigger mode for 2-stage + * transfer + * @SPRD_DMA_NO_TRG: No trigger setting. + * @SPRD_DMA_FRAG_DONE_TRG: Trigger the transaction of destination channel + * automatically once the source channel's fragment request is done. + * @SPRD_DMA_BLOCK_DONE_TRG: Trigger the transaction of destination channel + * automatically once the source channel's block request is done. + * @SPRD_DMA_TRANS_DONE_TRG: Trigger the transaction of destination channel + * automatically once the source channel's transfer request is done. + * @SPRD_DMA_LIST_DONE_TRG: Trigger the transaction of destination channel + * automatically once the source channel's link-list request is done. + */ +enum sprd_dma_trg_mode { + SPRD_DMA_NO_TRG, + SPRD_DMA_FRAG_DONE_TRG, + SPRD_DMA_BLOCK_DONE_TRG, + SPRD_DMA_TRANS_DONE_TRG, + SPRD_DMA_LIST_DONE_TRG, +}; /* * enum sprd_dma_req_mode: define the DMA request mode diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h deleted file mode 100644 index 21b3e7d33d68..000000000000 --- a/include/linux/dma_remapping.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _DMA_REMAPPING_H -#define _DMA_REMAPPING_H - -/* - * VT-d hardware uses 4KiB page size regardless of host page size. - */ -#define VTD_PAGE_SHIFT (12) -#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) -#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) -#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) - -#define VTD_STRIDE_SHIFT (9) -#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) - -#define DMA_PTE_READ (1) -#define DMA_PTE_WRITE (2) -#define DMA_PTE_LARGE_PAGE (1 << 7) -#define DMA_PTE_SNP (1 << 11) - -#define CONTEXT_TT_MULTI_LEVEL 0 -#define CONTEXT_TT_DEV_IOTLB 1 -#define CONTEXT_TT_PASS_THROUGH 2 -/* Extended context entry types */ -#define CONTEXT_TT_PT_PASID 4 -#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5 -#define CONTEXT_TT_MASK (7ULL << 2) - -#define CONTEXT_DINVE (1ULL << 8) -#define CONTEXT_PRS (1ULL << 9) -#define CONTEXT_PASIDE (1ULL << 11) - -struct intel_iommu; -struct dmar_domain; -struct root_entry; - - -#ifdef CONFIG_INTEL_IOMMU -extern int iommu_calculate_agaw(struct intel_iommu *iommu); -extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); -extern int dmar_disabled; -extern int intel_iommu_enabled; -extern int intel_iommu_tboot_noforce; -#else -static inline int iommu_calculate_agaw(struct intel_iommu *iommu) -{ - return 0; -} -static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) -{ - return 0; -} -#define dmar_disabled (1) -#define intel_iommu_enabled (0) -#endif - - -#endif diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 843a41ba7e28..f8af1d770520 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -39,6 +39,7 @@ struct acpi_dmar_header; /* DMAR Flags */ #define DMAR_INTR_REMAP 0x1 #define DMAR_X2APIC_OPT_OUT 0x2 +#define DMAR_PLATFORM_OPT_IN 0x4 struct intel_iommu; @@ -170,6 +171,8 @@ static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) { return 0; } #endif /* CONFIG_IRQ_REMAP */ +extern bool dmar_platform_optin(void); + #else /* CONFIG_DMAR_TABLE */ static inline int dmar_device_add(void *handle) @@ -182,6 +185,11 @@ static inline int dmar_device_remove(void *handle) return 0; } +static inline bool dmar_platform_optin(void) +{ + return false; +} + #endif /* CONFIG_DMAR_TABLE */ struct irte { diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 2d0259327721..a19d98367f08 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -51,7 +51,7 @@ #endif extern const char *drbd_buildtag(void); -#define REL_VERSION "8.4.10" +#define REL_VERSION "8.4.11" #define API_VERSION 1 #define PRO_VERSION_MIN 86 #define PRO_VERSION_MAX 101 diff --git a/include/linux/edac.h b/include/linux/edac.h index 1d0c9ea8825d..342dabda9c7e 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -669,10 +669,4 @@ struct mem_ctl_info { bool fake_inject_ue; u16 fake_inject_count; }; - -/* - * Maximum number of memory controllers in the coherent fabric. - */ -#define EDAC_MAX_MCS 2 * MAX_NUMNODES - #endif diff --git a/include/linux/efi.h b/include/linux/efi.h index 845174e113ce..45ff763fba76 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -663,6 +663,10 @@ void efi_native_runtime_setup(void); #define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f) #define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23) +#define EFI_CERT_SHA256_GUID EFI_GUID(0xc1c41626, 0x504c, 0x4092, 0xac, 0xa9, 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28) +#define EFI_CERT_X509_GUID EFI_GUID(0xa5c059a1, 0x94e4, 0x4aa7, 0x87, 0xb5, 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72) +#define EFI_CERT_X509_SHA256_GUID EFI_GUID(0x3bd2a492, 0x96c0, 0x4079, 0xb4, 0x20, 0xfc, 0xf9, 0x8e, 0xf1, 0x03, 0xed) + /* * This GUID is used to pass to the kernel proper the struct screen_info * structure that was populated by the stub based on the GOP protocol instance @@ -934,6 +938,27 @@ typedef struct { efi_memory_desc_t entry[0]; } efi_memory_attributes_table_t; +typedef struct { + efi_guid_t signature_owner; + u8 signature_data[]; +} efi_signature_data_t; + +typedef struct { + efi_guid_t signature_type; + u32 signature_list_size; + u32 signature_header_size; + u32 signature_size; + u8 signature_header[]; + /* efi_signature_data_t signatures[][] */ +} efi_signature_list_t; + +typedef u8 efi_sha256_hash_t[32]; + +typedef struct { + efi_sha256_hash_t to_be_signed_hash; + efi_time_t time_of_revocation; +} efi_cert_x509_sha256_t; + /* * All runtime access to EFI goes through this structure: */ @@ -1000,13 +1025,11 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timespec64 *ts); extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ #ifdef CONFIG_X86 -extern void efi_free_boot_services(void); extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, bool nonblocking); extern void efi_find_mirror(void); #else -static inline void efi_free_boot_services(void) {} static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, @@ -1046,7 +1069,6 @@ extern void efi_mem_reserve(phys_addr_t addr, u64 size); extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); -extern void efi_reserve_boot_services(void); extern int efi_get_fdt_params(struct efi_fdt_params *params); extern struct kobject *efi_kobj; @@ -1116,6 +1138,15 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm, char * __init efi_md_typeattr_format(char *buf, size_t size, const efi_memory_desc_t *md); + +typedef void (*efi_element_handler_t)(const char *source, + const void *element_data, + size_t element_size); +extern int __init parse_efi_signature_list( + const char *source, + const void *data, size_t size, + efi_element_handler_t (*get_handler_for_guid)(const efi_guid_t *)); + /** * efi_range_is_wc - check the WC bit on an address range * @start: starting kvirt address @@ -1167,6 +1198,8 @@ static inline bool efi_enabled(int feature) extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); extern bool efi_is_table_address(unsigned long phys_addr); + +extern int efi_apply_persistent_mem_reservations(void); #else static inline bool efi_enabled(int feature) { @@ -1185,6 +1218,11 @@ static inline bool efi_is_table_address(unsigned long phys_addr) { return false; } + +static inline int efi_apply_persistent_mem_reservations(void) +{ + return 0; +} #endif extern int efi_status_to_err(efi_status_t status); @@ -1708,9 +1746,19 @@ extern struct efi_runtime_work efi_rts_work; extern struct workqueue_struct *efi_rts_wq; struct linux_efi_memreserve { - phys_addr_t next; - phys_addr_t base; - phys_addr_t size; + int size; // allocated size of the array + atomic_t count; // number of entries used + phys_addr_t next; // pa of next struct instance + struct { + phys_addr_t base; + phys_addr_t size; + } entry[0]; }; +#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \ + (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0])) + +#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \ + / sizeof(((struct linux_efi_memreserve *)0)->entry[0])) + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 015bb59c0331..2e9e2763bf47 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -23,74 +23,6 @@ enum elv_merge { ELEVATOR_DISCARD_MERGE = 3, }; -typedef enum elv_merge (elevator_merge_fn) (struct request_queue *, struct request **, - struct bio *); - -typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *); - -typedef void (elevator_merged_fn) (struct request_queue *, struct request *, enum elv_merge); - -typedef int (elevator_allow_bio_merge_fn) (struct request_queue *, - struct request *, struct bio *); - -typedef int (elevator_allow_rq_merge_fn) (struct request_queue *, - struct request *, struct request *); - -typedef void (elevator_bio_merged_fn) (struct request_queue *, - struct request *, struct bio *); - -typedef int (elevator_dispatch_fn) (struct request_queue *, int); - -typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); -typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); -typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); -typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int); - -typedef void (elevator_init_icq_fn) (struct io_cq *); -typedef void (elevator_exit_icq_fn) (struct io_cq *); -typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, - struct bio *, gfp_t); -typedef void (elevator_put_req_fn) (struct request *); -typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *); -typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); - -typedef int (elevator_init_fn) (struct request_queue *, - struct elevator_type *e); -typedef void (elevator_exit_fn) (struct elevator_queue *); -typedef void (elevator_registered_fn) (struct request_queue *); - -struct elevator_ops -{ - elevator_merge_fn *elevator_merge_fn; - elevator_merged_fn *elevator_merged_fn; - elevator_merge_req_fn *elevator_merge_req_fn; - elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn; - elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn; - elevator_bio_merged_fn *elevator_bio_merged_fn; - - elevator_dispatch_fn *elevator_dispatch_fn; - elevator_add_req_fn *elevator_add_req_fn; - elevator_activate_req_fn *elevator_activate_req_fn; - elevator_deactivate_req_fn *elevator_deactivate_req_fn; - - elevator_completed_req_fn *elevator_completed_req_fn; - - elevator_request_list_fn *elevator_former_req_fn; - elevator_request_list_fn *elevator_latter_req_fn; - - elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */ - elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */ - - elevator_set_req_fn *elevator_set_req_fn; - elevator_put_req_fn *elevator_put_req_fn; - - elevator_may_queue_fn *elevator_may_queue_fn; - - elevator_init_fn *elevator_init_fn; - elevator_exit_fn *elevator_exit_fn; - elevator_registered_fn *elevator_registered_fn; -}; - struct blk_mq_alloc_data; struct blk_mq_hw_ctx; @@ -137,17 +69,14 @@ struct elevator_type struct kmem_cache *icq_cache; /* fields provided by elevator implementation */ - union { - struct elevator_ops sq; - struct elevator_mq_ops mq; - } ops; + struct elevator_mq_ops ops; + size_t icq_size; /* see iocontext.h */ size_t icq_align; /* ditto */ struct elv_fs_entry *elevator_attrs; char elevator_name[ELV_NAME_MAX]; const char *elevator_alias; struct module *elevator_owner; - bool uses_mq; #ifdef CONFIG_BLK_DEBUG_FS const struct blk_mq_debugfs_attr *queue_debugfs_attrs; const struct blk_mq_debugfs_attr *hctx_debugfs_attrs; @@ -175,40 +104,25 @@ struct elevator_queue struct kobject kobj; struct mutex sysfs_lock; unsigned int registered:1; - unsigned int uses_mq:1; DECLARE_HASHTABLE(hash, ELV_HASH_BITS); }; /* * block elevator interface */ -extern void elv_dispatch_sort(struct request_queue *, struct request *); -extern void elv_dispatch_add_tail(struct request_queue *, struct request *); -extern void elv_add_request(struct request_queue *, struct request *, int); -extern void __elv_add_request(struct request_queue *, struct request *, int); extern enum elv_merge elv_merge(struct request_queue *, struct request **, struct bio *); extern void elv_merge_requests(struct request_queue *, struct request *, struct request *); extern void elv_merged_request(struct request_queue *, struct request *, enum elv_merge); -extern void elv_bio_merged(struct request_queue *q, struct request *, - struct bio *); extern bool elv_attempt_insert_merge(struct request_queue *, struct request *); -extern void elv_requeue_request(struct request_queue *, struct request *); extern struct request *elv_former_request(struct request_queue *, struct request *); extern struct request *elv_latter_request(struct request_queue *, struct request *); -extern int elv_may_queue(struct request_queue *, unsigned int); -extern void elv_completed_request(struct request_queue *, struct request *); -extern int elv_set_request(struct request_queue *q, struct request *rq, - struct bio *bio, gfp_t gfp_mask); -extern void elv_put_request(struct request_queue *, struct request *); -extern void elv_drain_elevator(struct request_queue *); /* * io scheduler registration */ -extern void __init load_default_elevator_module(void); extern int elv_register(struct elevator_type *); extern void elv_unregister(struct elevator_type *); @@ -260,9 +174,5 @@ enum { #define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) #define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist) -#else /* CONFIG_BLOCK */ - -static inline void load_default_elevator_module(void) { } - #endif /* CONFIG_BLOCK */ #endif diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h new file mode 100644 index 000000000000..aa027f7bcb3e --- /dev/null +++ b/include/linux/energy_model.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ENERGY_MODEL_H +#define _LINUX_ENERGY_MODEL_H +#include <linux/cpumask.h> +#include <linux/jump_label.h> +#include <linux/kobject.h> +#include <linux/rcupdate.h> +#include <linux/sched/cpufreq.h> +#include <linux/sched/topology.h> +#include <linux/types.h> + +#ifdef CONFIG_ENERGY_MODEL +/** + * em_cap_state - Capacity state of a performance domain + * @frequency: The CPU frequency in KHz, for consistency with CPUFreq + * @power: The power consumed by 1 CPU at this level, in milli-watts + * @cost: The cost coefficient associated with this level, used during + * energy calculation. Equal to: power * max_frequency / frequency + */ +struct em_cap_state { + unsigned long frequency; + unsigned long power; + unsigned long cost; +}; + +/** + * em_perf_domain - Performance domain + * @table: List of capacity states, in ascending order + * @nr_cap_states: Number of capacity states + * @cpus: Cpumask covering the CPUs of the domain + * + * A "performance domain" represents a group of CPUs whose performance is + * scaled together. All CPUs of a performance domain must have the same + * micro-architecture. Performance domains often have a 1-to-1 mapping with + * CPUFreq policies. + */ +struct em_perf_domain { + struct em_cap_state *table; + int nr_cap_states; + unsigned long cpus[0]; +}; + +#define EM_CPU_MAX_POWER 0xFFFF + +struct em_data_callback { + /** + * active_power() - Provide power at the next capacity state of a CPU + * @power : Active power at the capacity state in mW (modified) + * @freq : Frequency at the capacity state in kHz (modified) + * @cpu : CPU for which we do this operation + * + * active_power() must find the lowest capacity state of 'cpu' above + * 'freq' and update 'power' and 'freq' to the matching active power + * and frequency. + * + * The power is the one of a single CPU in the domain, expressed in + * milli-watts. It is expected to fit in the [0, EM_CPU_MAX_POWER] + * range. + * + * Return 0 on success. + */ + int (*active_power)(unsigned long *power, unsigned long *freq, int cpu); +}; +#define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb } + +struct em_perf_domain *em_cpu_get(int cpu); +int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, + struct em_data_callback *cb); + +/** + * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain + * @pd : performance domain for which energy has to be estimated + * @max_util : highest utilization among CPUs of the domain + * @sum_util : sum of the utilization of all CPUs in the domain + * + * Return: the sum of the energy consumed by the CPUs of the domain assuming + * a capacity state satisfying the max utilization of the domain. + */ +static inline unsigned long em_pd_energy(struct em_perf_domain *pd, + unsigned long max_util, unsigned long sum_util) +{ + unsigned long freq, scale_cpu; + struct em_cap_state *cs; + int i, cpu; + + /* + * In order to predict the capacity state, map the utilization of the + * most utilized CPU of the performance domain to a requested frequency, + * like schedutil. + */ + cpu = cpumask_first(to_cpumask(pd->cpus)); + scale_cpu = arch_scale_cpu_capacity(NULL, cpu); + cs = &pd->table[pd->nr_cap_states - 1]; + freq = map_util_freq(max_util, cs->frequency, scale_cpu); + + /* + * Find the lowest capacity state of the Energy Model above the + * requested frequency. + */ + for (i = 0; i < pd->nr_cap_states; i++) { + cs = &pd->table[i]; + if (cs->frequency >= freq) + break; + } + + /* + * The capacity of a CPU in the domain at that capacity state (cs) + * can be computed as: + * + * cs->freq * scale_cpu + * cs->cap = -------------------- (1) + * cpu_max_freq + * + * So, ignoring the costs of idle states (which are not available in + * the EM), the energy consumed by this CPU at that capacity state is + * estimated as: + * + * cs->power * cpu_util + * cpu_nrg = -------------------- (2) + * cs->cap + * + * since 'cpu_util / cs->cap' represents its percentage of busy time. + * + * NOTE: Although the result of this computation actually is in + * units of power, it can be manipulated as an energy value + * over a scheduling period, since it is assumed to be + * constant during that interval. + * + * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product + * of two terms: + * + * cs->power * cpu_max_freq cpu_util + * cpu_nrg = ------------------------ * --------- (3) + * cs->freq scale_cpu + * + * The first term is static, and is stored in the em_cap_state struct + * as 'cs->cost'. + * + * Since all CPUs of the domain have the same micro-architecture, they + * share the same 'cs->cost', and the same CPU capacity. Hence, the + * total energy of the domain (which is the simple sum of the energy of + * all of its CPUs) can be factorized as: + * + * cs->cost * \Sum cpu_util + * pd_nrg = ------------------------ (4) + * scale_cpu + */ + return cs->cost * sum_util / scale_cpu; +} + +/** + * em_pd_nr_cap_states() - Get the number of capacity states of a perf. domain + * @pd : performance domain for which this must be done + * + * Return: the number of capacity states in the performance domain table + */ +static inline int em_pd_nr_cap_states(struct em_perf_domain *pd) +{ + return pd->nr_cap_states; +} + +#else +struct em_perf_domain {}; +struct em_data_callback {}; +#define EM_DATA_CB(_active_power_cb) { } + +static inline int em_register_perf_domain(cpumask_t *span, + unsigned int nr_states, struct em_data_callback *cb) +{ + return -EINVAL; +} +static inline struct em_perf_domain *em_cpu_get(int cpu) +{ + return NULL; +} +static inline unsigned long em_pd_energy(struct em_perf_domain *pd, + unsigned long max_util, unsigned long sum_util) +{ + return 0; +} +static inline int em_pd_nr_cap_states(struct em_perf_domain *pd) +{ + return 0; +} +#endif + +#endif diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 572e11bb8696..2c0af7b00715 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -32,6 +32,7 @@ struct device; int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr); unsigned char *arch_get_platform_mac_address(void); +int nvmem_get_mac_address(struct device *dev, void *addrbuf); u32 eth_get_headlen(void *data, unsigned int max_len); __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); extern const struct header_ops eth_header_ops; diff --git a/include/linux/export.h b/include/linux/export.h index ce764a5d2ee4..fd8711ed9ac4 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -92,22 +92,22 @@ struct kernel_symbol { */ #define __EXPORT_SYMBOL(sym, sec) -#elif defined(__KSYM_DEPS__) +#elif defined(CONFIG_TRIM_UNUSED_KSYMS) + +#include <generated/autoksyms.h> /* * For fine grained build dependencies, we want to tell the build system * about each possible exported symbol even if they're not actually exported. - * We use a string pattern that is unlikely to be valid code that the build - * system filters out from the preprocessor output (see ksym_dep_filter - * in scripts/Kbuild.include). + * We use a symbol pattern __ksym_marker_<symbol> that the build system filters + * from the $(NM) output (see scripts/gen_ksymdeps.sh). These symbols are + * discarded in the final link stage. */ -#define __EXPORT_SYMBOL(sym, sec) === __KSYM_##sym === - -#elif defined(CONFIG_TRIM_UNUSED_KSYMS) - -#include <generated/autoksyms.h> +#define __ksym_marker(sym) \ + static int __ksym_marker_##sym[0] __section(".discard.ksym") __used #define __EXPORT_SYMBOL(sym, sec) \ + __ksym_marker(sym); \ __cond_export_sym(sym, sec, __is_defined(__KSYM_##sym)) #define __cond_export_sym(sym, sec, conf) \ ___cond_export_sym(sym, sec, conf) diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index a5a60691e48b..9e2142795335 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -37,10 +37,11 @@ /* Events that user can request to be notified on */ #define FANOTIFY_EVENTS (FAN_ACCESS | FAN_MODIFY | \ - FAN_CLOSE | FAN_OPEN) + FAN_CLOSE | FAN_OPEN | FAN_OPEN_EXEC) /* Events that require a permission response from user */ -#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM) +#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \ + FAN_OPEN_EXEC_PERM) /* Extra flags that may be reported with event or control handling of events */ #define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR) diff --git a/include/linux/fb.h b/include/linux/fb.h index a3cab6dc9b44..7cdd31a69719 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -485,7 +485,7 @@ struct fb_info { struct list_head modelist; /* mode list */ struct fb_videomode *mode; /* current mode */ -#ifdef CONFIG_FB_BACKLIGHT +#if IS_ENABLED(CONFIG_FB_BACKLIGHT) /* assigned backlight device */ /* set before framebuffer registration, remove after unregister */ diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 41615f38bcff..f07c55ea0c22 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -121,6 +121,7 @@ extern void __fd_install(struct files_struct *files, unsigned int fd, struct file *file); extern int __close_fd(struct files_struct *files, unsigned int fd); +extern int __close_fd_get_file(unsigned int fd, struct file **res); extern struct kmem_cache *files_cachep; diff --git a/include/linux/filter.h b/include/linux/filter.h index de629b706d1d..ad106d845b22 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -53,14 +53,10 @@ struct sock_reuseport; #define BPF_REG_D BPF_REG_8 /* data, callee-saved */ #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ -/* Kernel hidden auxiliary/helper register for hardening step. - * Only used by eBPF JITs. It's nothing more than a temporary - * register that JITs use internally, only that here it's part - * of eBPF instructions that have been rewritten for blinding - * constants. See JIT pre-step in bpf_jit_blind_constants(). - */ +/* Kernel hidden auxiliary/helper register. */ #define BPF_REG_AX MAX_BPF_REG -#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) +#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1) +#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG /* unused opcode to mark special call to bpf_tail_call() helper */ #define BPF_TAIL_CALL 0xf0 @@ -449,6 +445,13 @@ struct sock_reuseport; offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 +#if BITS_PER_LONG == 64 +# define bpf_ctx_range_ptr(TYPE, MEMBER) \ + offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 +#else +# define bpf_ctx_range_ptr(TYPE, MEMBER) \ + offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1 +#endif /* BITS_PER_LONG == 64 */ #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ ({ \ @@ -668,24 +671,10 @@ static inline u32 bpf_ctx_off_adjust_machine(u32 size) return size; } -static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access, - u32 size_default) -{ - size_default = bpf_ctx_off_adjust_machine(size_default); - size_access = bpf_ctx_off_adjust_machine(size_access); - -#ifdef __LITTLE_ENDIAN - return (off & (size_default - 1)) == 0; -#else - return (off & (size_default - 1)) + size_access == size_default; -#endif -} - static inline bool bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) { - return bpf_ctx_narrow_align_ok(off, size, size_default) && - size <= size_default && (size & (size - 1)) == 0; + return size <= size_default && (size & (size - 1)) == 0; } #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) @@ -732,6 +721,13 @@ void bpf_prog_free(struct bpf_prog *fp); bool bpf_opcode_in_insntable(u8 code); +void bpf_prog_free_linfo(struct bpf_prog *prog); +void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, + const u32 *insn_to_jit_off); +int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog); +void bpf_prog_free_jited_linfo(struct bpf_prog *prog); +void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog); + struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, gfp_t gfp_extra_flags); @@ -854,7 +850,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, extern int bpf_jit_enable; extern int bpf_jit_harden; extern int bpf_jit_kallsyms; -extern int bpf_jit_limit; +extern long bpf_jit_limit; typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); @@ -866,6 +862,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr); void bpf_jit_free(struct bpf_prog *fp); +int bpf_jit_get_func_addr(const struct bpf_prog *prog, + const struct bpf_insn *insn, bool extra_pass, + u64 *func_addr, bool *func_addr_fixed); + struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h index 29ada609de03..ebc55098faee 100644 --- a/include/linux/firmware/imx/sci.h +++ b/include/linux/firmware/imx/sci.h @@ -14,4 +14,5 @@ #include <linux/firmware/imx/types.h> #include <linux/firmware/imx/svc/misc.h> +#include <linux/firmware/imx/svc/pm.h> #endif /* _SC_SCI_H */ diff --git a/include/linux/firmware/imx/svc/pm.h b/include/linux/firmware/imx/svc/pm.h new file mode 100644 index 000000000000..1f6975dd37b0 --- /dev/null +++ b/include/linux/firmware/imx/svc/pm.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2018 NXP + * + * Header file containing the public API for the System Controller (SC) + * Power Management (PM) function. This includes functions for power state + * control, clock control, reset control, and wake-up event control. + * + * PM_SVC (SVC) Power Management Service + * + * Module for the Power Management (PM) service. + */ + +#ifndef _SC_PM_API_H +#define _SC_PM_API_H + +#include <linux/firmware/imx/sci.h> + +/* + * This type is used to indicate RPC PM function calls. + */ +enum imx_sc_pm_func { + IMX_SC_PM_FUNC_UNKNOWN = 0, + IMX_SC_PM_FUNC_SET_SYS_POWER_MODE = 19, + IMX_SC_PM_FUNC_SET_PARTITION_POWER_MODE = 1, + IMX_SC_PM_FUNC_GET_SYS_POWER_MODE = 2, + IMX_SC_PM_FUNC_SET_RESOURCE_POWER_MODE = 3, + IMX_SC_PM_FUNC_GET_RESOURCE_POWER_MODE = 4, + IMX_SC_PM_FUNC_REQ_LOW_POWER_MODE = 16, + IMX_SC_PM_FUNC_SET_CPU_RESUME_ADDR = 17, + IMX_SC_PM_FUNC_REQ_SYS_IF_POWER_MODE = 18, + IMX_SC_PM_FUNC_SET_CLOCK_RATE = 5, + IMX_SC_PM_FUNC_GET_CLOCK_RATE = 6, + IMX_SC_PM_FUNC_CLOCK_ENABLE = 7, + IMX_SC_PM_FUNC_SET_CLOCK_PARENT = 14, + IMX_SC_PM_FUNC_GET_CLOCK_PARENT = 15, + IMX_SC_PM_FUNC_RESET = 13, + IMX_SC_PM_FUNC_RESET_REASON = 10, + IMX_SC_PM_FUNC_BOOT = 8, + IMX_SC_PM_FUNC_REBOOT = 9, + IMX_SC_PM_FUNC_REBOOT_PARTITION = 12, + IMX_SC_PM_FUNC_CPU_START = 11, +}; + +/* + * Defines for ALL parameters + */ +#define IMX_SC_PM_CLK_ALL UINT8_MAX /* All clocks */ + +/* + * Defines for SC PM Power Mode + */ +#define IMX_SC_PM_PW_MODE_OFF 0 /* Power off */ +#define IMX_SC_PM_PW_MODE_STBY 1 /* Power in standby */ +#define IMX_SC_PM_PW_MODE_LP 2 /* Power in low-power */ +#define IMX_SC_PM_PW_MODE_ON 3 /* Power on */ + +/* + * Defines for SC PM CLK + */ +#define IMX_SC_PM_CLK_SLV_BUS 0 /* Slave bus clock */ +#define IMX_SC_PM_CLK_MST_BUS 1 /* Master bus clock */ +#define IMX_SC_PM_CLK_PER 2 /* Peripheral clock */ +#define IMX_SC_PM_CLK_PHY 3 /* Phy clock */ +#define IMX_SC_PM_CLK_MISC 4 /* Misc clock */ +#define IMX_SC_PM_CLK_MISC0 0 /* Misc 0 clock */ +#define IMX_SC_PM_CLK_MISC1 1 /* Misc 1 clock */ +#define IMX_SC_PM_CLK_MISC2 2 /* Misc 2 clock */ +#define IMX_SC_PM_CLK_MISC3 3 /* Misc 3 clock */ +#define IMX_SC_PM_CLK_MISC4 4 /* Misc 4 clock */ +#define IMX_SC_PM_CLK_CPU 2 /* CPU clock */ +#define IMX_SC_PM_CLK_PLL 4 /* PLL */ +#define IMX_SC_PM_CLK_BYPASS 4 /* Bypass clock */ + +/* + * Defines for SC PM CLK Parent + */ +#define IMX_SC_PM_PARENT_XTAL 0 /* Parent is XTAL. */ +#define IMX_SC_PM_PARENT_PLL0 1 /* Parent is PLL0 */ +#define IMX_SC_PM_PARENT_PLL1 2 /* Parent is PLL1 or PLL0/2 */ +#define IMX_SC_PM_PARENT_PLL2 3 /* Parent in PLL2 or PLL0/4 */ +#define IMX_SC_PM_PARENT_BYPS 4 /* Parent is a bypass clock. */ + +#endif /* _SC_PM_API_H */ diff --git a/include/linux/firmware/imx/types.h b/include/linux/firmware/imx/types.h index 9cbf0c4a6069..80821100e85f 100644 --- a/include/linux/firmware/imx/types.h +++ b/include/linux/firmware/imx/types.h @@ -10,558 +10,6 @@ #define _SC_TYPES_H /* - * This type is used to indicate a resource. Resources include peripherals - * and bus masters (but not memory regions). Note items from list should - * never be changed or removed (only added to at the end of the list). - */ -enum imx_sc_rsrc { - IMX_SC_R_A53 = 0, - IMX_SC_R_A53_0 = 1, - IMX_SC_R_A53_1 = 2, - IMX_SC_R_A53_2 = 3, - IMX_SC_R_A53_3 = 4, - IMX_SC_R_A72 = 5, - IMX_SC_R_A72_0 = 6, - IMX_SC_R_A72_1 = 7, - IMX_SC_R_A72_2 = 8, - IMX_SC_R_A72_3 = 9, - IMX_SC_R_CCI = 10, - IMX_SC_R_DB = 11, - IMX_SC_R_DRC_0 = 12, - IMX_SC_R_DRC_1 = 13, - IMX_SC_R_GIC_SMMU = 14, - IMX_SC_R_IRQSTR_M4_0 = 15, - IMX_SC_R_IRQSTR_M4_1 = 16, - IMX_SC_R_SMMU = 17, - IMX_SC_R_GIC = 18, - IMX_SC_R_DC_0_BLIT0 = 19, - IMX_SC_R_DC_0_BLIT1 = 20, - IMX_SC_R_DC_0_BLIT2 = 21, - IMX_SC_R_DC_0_BLIT_OUT = 22, - IMX_SC_R_DC_0_CAPTURE0 = 23, - IMX_SC_R_DC_0_CAPTURE1 = 24, - IMX_SC_R_DC_0_WARP = 25, - IMX_SC_R_DC_0_INTEGRAL0 = 26, - IMX_SC_R_DC_0_INTEGRAL1 = 27, - IMX_SC_R_DC_0_VIDEO0 = 28, - IMX_SC_R_DC_0_VIDEO1 = 29, - IMX_SC_R_DC_0_FRAC0 = 30, - IMX_SC_R_DC_0_FRAC1 = 31, - IMX_SC_R_DC_0 = 32, - IMX_SC_R_GPU_2_PID0 = 33, - IMX_SC_R_DC_0_PLL_0 = 34, - IMX_SC_R_DC_0_PLL_1 = 35, - IMX_SC_R_DC_1_BLIT0 = 36, - IMX_SC_R_DC_1_BLIT1 = 37, - IMX_SC_R_DC_1_BLIT2 = 38, - IMX_SC_R_DC_1_BLIT_OUT = 39, - IMX_SC_R_DC_1_CAPTURE0 = 40, - IMX_SC_R_DC_1_CAPTURE1 = 41, - IMX_SC_R_DC_1_WARP = 42, - IMX_SC_R_DC_1_INTEGRAL0 = 43, - IMX_SC_R_DC_1_INTEGRAL1 = 44, - IMX_SC_R_DC_1_VIDEO0 = 45, - IMX_SC_R_DC_1_VIDEO1 = 46, - IMX_SC_R_DC_1_FRAC0 = 47, - IMX_SC_R_DC_1_FRAC1 = 48, - IMX_SC_R_DC_1 = 49, - IMX_SC_R_GPU_3_PID0 = 50, - IMX_SC_R_DC_1_PLL_0 = 51, - IMX_SC_R_DC_1_PLL_1 = 52, - IMX_SC_R_SPI_0 = 53, - IMX_SC_R_SPI_1 = 54, - IMX_SC_R_SPI_2 = 55, - IMX_SC_R_SPI_3 = 56, - IMX_SC_R_UART_0 = 57, - IMX_SC_R_UART_1 = 58, - IMX_SC_R_UART_2 = 59, - IMX_SC_R_UART_3 = 60, - IMX_SC_R_UART_4 = 61, - IMX_SC_R_EMVSIM_0 = 62, - IMX_SC_R_EMVSIM_1 = 63, - IMX_SC_R_DMA_0_CH0 = 64, - IMX_SC_R_DMA_0_CH1 = 65, - IMX_SC_R_DMA_0_CH2 = 66, - IMX_SC_R_DMA_0_CH3 = 67, - IMX_SC_R_DMA_0_CH4 = 68, - IMX_SC_R_DMA_0_CH5 = 69, - IMX_SC_R_DMA_0_CH6 = 70, - IMX_SC_R_DMA_0_CH7 = 71, - IMX_SC_R_DMA_0_CH8 = 72, - IMX_SC_R_DMA_0_CH9 = 73, - IMX_SC_R_DMA_0_CH10 = 74, - IMX_SC_R_DMA_0_CH11 = 75, - IMX_SC_R_DMA_0_CH12 = 76, - IMX_SC_R_DMA_0_CH13 = 77, - IMX_SC_R_DMA_0_CH14 = 78, - IMX_SC_R_DMA_0_CH15 = 79, - IMX_SC_R_DMA_0_CH16 = 80, - IMX_SC_R_DMA_0_CH17 = 81, - IMX_SC_R_DMA_0_CH18 = 82, - IMX_SC_R_DMA_0_CH19 = 83, - IMX_SC_R_DMA_0_CH20 = 84, - IMX_SC_R_DMA_0_CH21 = 85, - IMX_SC_R_DMA_0_CH22 = 86, - IMX_SC_R_DMA_0_CH23 = 87, - IMX_SC_R_DMA_0_CH24 = 88, - IMX_SC_R_DMA_0_CH25 = 89, - IMX_SC_R_DMA_0_CH26 = 90, - IMX_SC_R_DMA_0_CH27 = 91, - IMX_SC_R_DMA_0_CH28 = 92, - IMX_SC_R_DMA_0_CH29 = 93, - IMX_SC_R_DMA_0_CH30 = 94, - IMX_SC_R_DMA_0_CH31 = 95, - IMX_SC_R_I2C_0 = 96, - IMX_SC_R_I2C_1 = 97, - IMX_SC_R_I2C_2 = 98, - IMX_SC_R_I2C_3 = 99, - IMX_SC_R_I2C_4 = 100, - IMX_SC_R_ADC_0 = 101, - IMX_SC_R_ADC_1 = 102, - IMX_SC_R_FTM_0 = 103, - IMX_SC_R_FTM_1 = 104, - IMX_SC_R_CAN_0 = 105, - IMX_SC_R_CAN_1 = 106, - IMX_SC_R_CAN_2 = 107, - IMX_SC_R_DMA_1_CH0 = 108, - IMX_SC_R_DMA_1_CH1 = 109, - IMX_SC_R_DMA_1_CH2 = 110, - IMX_SC_R_DMA_1_CH3 = 111, - IMX_SC_R_DMA_1_CH4 = 112, - IMX_SC_R_DMA_1_CH5 = 113, - IMX_SC_R_DMA_1_CH6 = 114, - IMX_SC_R_DMA_1_CH7 = 115, - IMX_SC_R_DMA_1_CH8 = 116, - IMX_SC_R_DMA_1_CH9 = 117, - IMX_SC_R_DMA_1_CH10 = 118, - IMX_SC_R_DMA_1_CH11 = 119, - IMX_SC_R_DMA_1_CH12 = 120, - IMX_SC_R_DMA_1_CH13 = 121, - IMX_SC_R_DMA_1_CH14 = 122, - IMX_SC_R_DMA_1_CH15 = 123, - IMX_SC_R_DMA_1_CH16 = 124, - IMX_SC_R_DMA_1_CH17 = 125, - IMX_SC_R_DMA_1_CH18 = 126, - IMX_SC_R_DMA_1_CH19 = 127, - IMX_SC_R_DMA_1_CH20 = 128, - IMX_SC_R_DMA_1_CH21 = 129, - IMX_SC_R_DMA_1_CH22 = 130, - IMX_SC_R_DMA_1_CH23 = 131, - IMX_SC_R_DMA_1_CH24 = 132, - IMX_SC_R_DMA_1_CH25 = 133, - IMX_SC_R_DMA_1_CH26 = 134, - IMX_SC_R_DMA_1_CH27 = 135, - IMX_SC_R_DMA_1_CH28 = 136, - IMX_SC_R_DMA_1_CH29 = 137, - IMX_SC_R_DMA_1_CH30 = 138, - IMX_SC_R_DMA_1_CH31 = 139, - IMX_SC_R_UNUSED1 = 140, - IMX_SC_R_UNUSED2 = 141, - IMX_SC_R_UNUSED3 = 142, - IMX_SC_R_UNUSED4 = 143, - IMX_SC_R_GPU_0_PID0 = 144, - IMX_SC_R_GPU_0_PID1 = 145, - IMX_SC_R_GPU_0_PID2 = 146, - IMX_SC_R_GPU_0_PID3 = 147, - IMX_SC_R_GPU_1_PID0 = 148, - IMX_SC_R_GPU_1_PID1 = 149, - IMX_SC_R_GPU_1_PID2 = 150, - IMX_SC_R_GPU_1_PID3 = 151, - IMX_SC_R_PCIE_A = 152, - IMX_SC_R_SERDES_0 = 153, - IMX_SC_R_MATCH_0 = 154, - IMX_SC_R_MATCH_1 = 155, - IMX_SC_R_MATCH_2 = 156, - IMX_SC_R_MATCH_3 = 157, - IMX_SC_R_MATCH_4 = 158, - IMX_SC_R_MATCH_5 = 159, - IMX_SC_R_MATCH_6 = 160, - IMX_SC_R_MATCH_7 = 161, - IMX_SC_R_MATCH_8 = 162, - IMX_SC_R_MATCH_9 = 163, - IMX_SC_R_MATCH_10 = 164, - IMX_SC_R_MATCH_11 = 165, - IMX_SC_R_MATCH_12 = 166, - IMX_SC_R_MATCH_13 = 167, - IMX_SC_R_MATCH_14 = 168, - IMX_SC_R_PCIE_B = 169, - IMX_SC_R_SATA_0 = 170, - IMX_SC_R_SERDES_1 = 171, - IMX_SC_R_HSIO_GPIO = 172, - IMX_SC_R_MATCH_15 = 173, - IMX_SC_R_MATCH_16 = 174, - IMX_SC_R_MATCH_17 = 175, - IMX_SC_R_MATCH_18 = 176, - IMX_SC_R_MATCH_19 = 177, - IMX_SC_R_MATCH_20 = 178, - IMX_SC_R_MATCH_21 = 179, - IMX_SC_R_MATCH_22 = 180, - IMX_SC_R_MATCH_23 = 181, - IMX_SC_R_MATCH_24 = 182, - IMX_SC_R_MATCH_25 = 183, - IMX_SC_R_MATCH_26 = 184, - IMX_SC_R_MATCH_27 = 185, - IMX_SC_R_MATCH_28 = 186, - IMX_SC_R_LCD_0 = 187, - IMX_SC_R_LCD_0_PWM_0 = 188, - IMX_SC_R_LCD_0_I2C_0 = 189, - IMX_SC_R_LCD_0_I2C_1 = 190, - IMX_SC_R_PWM_0 = 191, - IMX_SC_R_PWM_1 = 192, - IMX_SC_R_PWM_2 = 193, - IMX_SC_R_PWM_3 = 194, - IMX_SC_R_PWM_4 = 195, - IMX_SC_R_PWM_5 = 196, - IMX_SC_R_PWM_6 = 197, - IMX_SC_R_PWM_7 = 198, - IMX_SC_R_GPIO_0 = 199, - IMX_SC_R_GPIO_1 = 200, - IMX_SC_R_GPIO_2 = 201, - IMX_SC_R_GPIO_3 = 202, - IMX_SC_R_GPIO_4 = 203, - IMX_SC_R_GPIO_5 = 204, - IMX_SC_R_GPIO_6 = 205, - IMX_SC_R_GPIO_7 = 206, - IMX_SC_R_GPT_0 = 207, - IMX_SC_R_GPT_1 = 208, - IMX_SC_R_GPT_2 = 209, - IMX_SC_R_GPT_3 = 210, - IMX_SC_R_GPT_4 = 211, - IMX_SC_R_KPP = 212, - IMX_SC_R_MU_0A = 213, - IMX_SC_R_MU_1A = 214, - IMX_SC_R_MU_2A = 215, - IMX_SC_R_MU_3A = 216, - IMX_SC_R_MU_4A = 217, - IMX_SC_R_MU_5A = 218, - IMX_SC_R_MU_6A = 219, - IMX_SC_R_MU_7A = 220, - IMX_SC_R_MU_8A = 221, - IMX_SC_R_MU_9A = 222, - IMX_SC_R_MU_10A = 223, - IMX_SC_R_MU_11A = 224, - IMX_SC_R_MU_12A = 225, - IMX_SC_R_MU_13A = 226, - IMX_SC_R_MU_5B = 227, - IMX_SC_R_MU_6B = 228, - IMX_SC_R_MU_7B = 229, - IMX_SC_R_MU_8B = 230, - IMX_SC_R_MU_9B = 231, - IMX_SC_R_MU_10B = 232, - IMX_SC_R_MU_11B = 233, - IMX_SC_R_MU_12B = 234, - IMX_SC_R_MU_13B = 235, - IMX_SC_R_ROM_0 = 236, - IMX_SC_R_FSPI_0 = 237, - IMX_SC_R_FSPI_1 = 238, - IMX_SC_R_IEE = 239, - IMX_SC_R_IEE_R0 = 240, - IMX_SC_R_IEE_R1 = 241, - IMX_SC_R_IEE_R2 = 242, - IMX_SC_R_IEE_R3 = 243, - IMX_SC_R_IEE_R4 = 244, - IMX_SC_R_IEE_R5 = 245, - IMX_SC_R_IEE_R6 = 246, - IMX_SC_R_IEE_R7 = 247, - IMX_SC_R_SDHC_0 = 248, - IMX_SC_R_SDHC_1 = 249, - IMX_SC_R_SDHC_2 = 250, - IMX_SC_R_ENET_0 = 251, - IMX_SC_R_ENET_1 = 252, - IMX_SC_R_MLB_0 = 253, - IMX_SC_R_DMA_2_CH0 = 254, - IMX_SC_R_DMA_2_CH1 = 255, - IMX_SC_R_DMA_2_CH2 = 256, - IMX_SC_R_DMA_2_CH3 = 257, - IMX_SC_R_DMA_2_CH4 = 258, - IMX_SC_R_USB_0 = 259, - IMX_SC_R_USB_1 = 260, - IMX_SC_R_USB_0_PHY = 261, - IMX_SC_R_USB_2 = 262, - IMX_SC_R_USB_2_PHY = 263, - IMX_SC_R_DTCP = 264, - IMX_SC_R_NAND = 265, - IMX_SC_R_LVDS_0 = 266, - IMX_SC_R_LVDS_0_PWM_0 = 267, - IMX_SC_R_LVDS_0_I2C_0 = 268, - IMX_SC_R_LVDS_0_I2C_1 = 269, - IMX_SC_R_LVDS_1 = 270, - IMX_SC_R_LVDS_1_PWM_0 = 271, - IMX_SC_R_LVDS_1_I2C_0 = 272, - IMX_SC_R_LVDS_1_I2C_1 = 273, - IMX_SC_R_LVDS_2 = 274, - IMX_SC_R_LVDS_2_PWM_0 = 275, - IMX_SC_R_LVDS_2_I2C_0 = 276, - IMX_SC_R_LVDS_2_I2C_1 = 277, - IMX_SC_R_M4_0_PID0 = 278, - IMX_SC_R_M4_0_PID1 = 279, - IMX_SC_R_M4_0_PID2 = 280, - IMX_SC_R_M4_0_PID3 = 281, - IMX_SC_R_M4_0_PID4 = 282, - IMX_SC_R_M4_0_RGPIO = 283, - IMX_SC_R_M4_0_SEMA42 = 284, - IMX_SC_R_M4_0_TPM = 285, - IMX_SC_R_M4_0_PIT = 286, - IMX_SC_R_M4_0_UART = 287, - IMX_SC_R_M4_0_I2C = 288, - IMX_SC_R_M4_0_INTMUX = 289, - IMX_SC_R_M4_0_SIM = 290, - IMX_SC_R_M4_0_WDOG = 291, - IMX_SC_R_M4_0_MU_0B = 292, - IMX_SC_R_M4_0_MU_0A0 = 293, - IMX_SC_R_M4_0_MU_0A1 = 294, - IMX_SC_R_M4_0_MU_0A2 = 295, - IMX_SC_R_M4_0_MU_0A3 = 296, - IMX_SC_R_M4_0_MU_1A = 297, - IMX_SC_R_M4_1_PID0 = 298, - IMX_SC_R_M4_1_PID1 = 299, - IMX_SC_R_M4_1_PID2 = 300, - IMX_SC_R_M4_1_PID3 = 301, - IMX_SC_R_M4_1_PID4 = 302, - IMX_SC_R_M4_1_RGPIO = 303, - IMX_SC_R_M4_1_SEMA42 = 304, - IMX_SC_R_M4_1_TPM = 305, - IMX_SC_R_M4_1_PIT = 306, - IMX_SC_R_M4_1_UART = 307, - IMX_SC_R_M4_1_I2C = 308, - IMX_SC_R_M4_1_INTMUX = 309, - IMX_SC_R_M4_1_SIM = 310, - IMX_SC_R_M4_1_WDOG = 311, - IMX_SC_R_M4_1_MU_0B = 312, - IMX_SC_R_M4_1_MU_0A0 = 313, - IMX_SC_R_M4_1_MU_0A1 = 314, - IMX_SC_R_M4_1_MU_0A2 = 315, - IMX_SC_R_M4_1_MU_0A3 = 316, - IMX_SC_R_M4_1_MU_1A = 317, - IMX_SC_R_SAI_0 = 318, - IMX_SC_R_SAI_1 = 319, - IMX_SC_R_SAI_2 = 320, - IMX_SC_R_IRQSTR_SCU2 = 321, - IMX_SC_R_IRQSTR_DSP = 322, - IMX_SC_R_UNUSED5 = 323, - IMX_SC_R_UNUSED6 = 324, - IMX_SC_R_AUDIO_PLL_0 = 325, - IMX_SC_R_PI_0 = 326, - IMX_SC_R_PI_0_PWM_0 = 327, - IMX_SC_R_PI_0_PWM_1 = 328, - IMX_SC_R_PI_0_I2C_0 = 329, - IMX_SC_R_PI_0_PLL = 330, - IMX_SC_R_PI_1 = 331, - IMX_SC_R_PI_1_PWM_0 = 332, - IMX_SC_R_PI_1_PWM_1 = 333, - IMX_SC_R_PI_1_I2C_0 = 334, - IMX_SC_R_PI_1_PLL = 335, - IMX_SC_R_SC_PID0 = 336, - IMX_SC_R_SC_PID1 = 337, - IMX_SC_R_SC_PID2 = 338, - IMX_SC_R_SC_PID3 = 339, - IMX_SC_R_SC_PID4 = 340, - IMX_SC_R_SC_SEMA42 = 341, - IMX_SC_R_SC_TPM = 342, - IMX_SC_R_SC_PIT = 343, - IMX_SC_R_SC_UART = 344, - IMX_SC_R_SC_I2C = 345, - IMX_SC_R_SC_MU_0B = 346, - IMX_SC_R_SC_MU_0A0 = 347, - IMX_SC_R_SC_MU_0A1 = 348, - IMX_SC_R_SC_MU_0A2 = 349, - IMX_SC_R_SC_MU_0A3 = 350, - IMX_SC_R_SC_MU_1A = 351, - IMX_SC_R_SYSCNT_RD = 352, - IMX_SC_R_SYSCNT_CMP = 353, - IMX_SC_R_DEBUG = 354, - IMX_SC_R_SYSTEM = 355, - IMX_SC_R_SNVS = 356, - IMX_SC_R_OTP = 357, - IMX_SC_R_VPU_PID0 = 358, - IMX_SC_R_VPU_PID1 = 359, - IMX_SC_R_VPU_PID2 = 360, - IMX_SC_R_VPU_PID3 = 361, - IMX_SC_R_VPU_PID4 = 362, - IMX_SC_R_VPU_PID5 = 363, - IMX_SC_R_VPU_PID6 = 364, - IMX_SC_R_VPU_PID7 = 365, - IMX_SC_R_VPU_UART = 366, - IMX_SC_R_VPUCORE = 367, - IMX_SC_R_VPUCORE_0 = 368, - IMX_SC_R_VPUCORE_1 = 369, - IMX_SC_R_VPUCORE_2 = 370, - IMX_SC_R_VPUCORE_3 = 371, - IMX_SC_R_DMA_4_CH0 = 372, - IMX_SC_R_DMA_4_CH1 = 373, - IMX_SC_R_DMA_4_CH2 = 374, - IMX_SC_R_DMA_4_CH3 = 375, - IMX_SC_R_DMA_4_CH4 = 376, - IMX_SC_R_ISI_CH0 = 377, - IMX_SC_R_ISI_CH1 = 378, - IMX_SC_R_ISI_CH2 = 379, - IMX_SC_R_ISI_CH3 = 380, - IMX_SC_R_ISI_CH4 = 381, - IMX_SC_R_ISI_CH5 = 382, - IMX_SC_R_ISI_CH6 = 383, - IMX_SC_R_ISI_CH7 = 384, - IMX_SC_R_MJPEG_DEC_S0 = 385, - IMX_SC_R_MJPEG_DEC_S1 = 386, - IMX_SC_R_MJPEG_DEC_S2 = 387, - IMX_SC_R_MJPEG_DEC_S3 = 388, - IMX_SC_R_MJPEG_ENC_S0 = 389, - IMX_SC_R_MJPEG_ENC_S1 = 390, - IMX_SC_R_MJPEG_ENC_S2 = 391, - IMX_SC_R_MJPEG_ENC_S3 = 392, - IMX_SC_R_MIPI_0 = 393, - IMX_SC_R_MIPI_0_PWM_0 = 394, - IMX_SC_R_MIPI_0_I2C_0 = 395, - IMX_SC_R_MIPI_0_I2C_1 = 396, - IMX_SC_R_MIPI_1 = 397, - IMX_SC_R_MIPI_1_PWM_0 = 398, - IMX_SC_R_MIPI_1_I2C_0 = 399, - IMX_SC_R_MIPI_1_I2C_1 = 400, - IMX_SC_R_CSI_0 = 401, - IMX_SC_R_CSI_0_PWM_0 = 402, - IMX_SC_R_CSI_0_I2C_0 = 403, - IMX_SC_R_CSI_1 = 404, - IMX_SC_R_CSI_1_PWM_0 = 405, - IMX_SC_R_CSI_1_I2C_0 = 406, - IMX_SC_R_HDMI = 407, - IMX_SC_R_HDMI_I2S = 408, - IMX_SC_R_HDMI_I2C_0 = 409, - IMX_SC_R_HDMI_PLL_0 = 410, - IMX_SC_R_HDMI_RX = 411, - IMX_SC_R_HDMI_RX_BYPASS = 412, - IMX_SC_R_HDMI_RX_I2C_0 = 413, - IMX_SC_R_ASRC_0 = 414, - IMX_SC_R_ESAI_0 = 415, - IMX_SC_R_SPDIF_0 = 416, - IMX_SC_R_SPDIF_1 = 417, - IMX_SC_R_SAI_3 = 418, - IMX_SC_R_SAI_4 = 419, - IMX_SC_R_SAI_5 = 420, - IMX_SC_R_GPT_5 = 421, - IMX_SC_R_GPT_6 = 422, - IMX_SC_R_GPT_7 = 423, - IMX_SC_R_GPT_8 = 424, - IMX_SC_R_GPT_9 = 425, - IMX_SC_R_GPT_10 = 426, - IMX_SC_R_DMA_2_CH5 = 427, - IMX_SC_R_DMA_2_CH6 = 428, - IMX_SC_R_DMA_2_CH7 = 429, - IMX_SC_R_DMA_2_CH8 = 430, - IMX_SC_R_DMA_2_CH9 = 431, - IMX_SC_R_DMA_2_CH10 = 432, - IMX_SC_R_DMA_2_CH11 = 433, - IMX_SC_R_DMA_2_CH12 = 434, - IMX_SC_R_DMA_2_CH13 = 435, - IMX_SC_R_DMA_2_CH14 = 436, - IMX_SC_R_DMA_2_CH15 = 437, - IMX_SC_R_DMA_2_CH16 = 438, - IMX_SC_R_DMA_2_CH17 = 439, - IMX_SC_R_DMA_2_CH18 = 440, - IMX_SC_R_DMA_2_CH19 = 441, - IMX_SC_R_DMA_2_CH20 = 442, - IMX_SC_R_DMA_2_CH21 = 443, - IMX_SC_R_DMA_2_CH22 = 444, - IMX_SC_R_DMA_2_CH23 = 445, - IMX_SC_R_DMA_2_CH24 = 446, - IMX_SC_R_DMA_2_CH25 = 447, - IMX_SC_R_DMA_2_CH26 = 448, - IMX_SC_R_DMA_2_CH27 = 449, - IMX_SC_R_DMA_2_CH28 = 450, - IMX_SC_R_DMA_2_CH29 = 451, - IMX_SC_R_DMA_2_CH30 = 452, - IMX_SC_R_DMA_2_CH31 = 453, - IMX_SC_R_ASRC_1 = 454, - IMX_SC_R_ESAI_1 = 455, - IMX_SC_R_SAI_6 = 456, - IMX_SC_R_SAI_7 = 457, - IMX_SC_R_AMIX = 458, - IMX_SC_R_MQS_0 = 459, - IMX_SC_R_DMA_3_CH0 = 460, - IMX_SC_R_DMA_3_CH1 = 461, - IMX_SC_R_DMA_3_CH2 = 462, - IMX_SC_R_DMA_3_CH3 = 463, - IMX_SC_R_DMA_3_CH4 = 464, - IMX_SC_R_DMA_3_CH5 = 465, - IMX_SC_R_DMA_3_CH6 = 466, - IMX_SC_R_DMA_3_CH7 = 467, - IMX_SC_R_DMA_3_CH8 = 468, - IMX_SC_R_DMA_3_CH9 = 469, - IMX_SC_R_DMA_3_CH10 = 470, - IMX_SC_R_DMA_3_CH11 = 471, - IMX_SC_R_DMA_3_CH12 = 472, - IMX_SC_R_DMA_3_CH13 = 473, - IMX_SC_R_DMA_3_CH14 = 474, - IMX_SC_R_DMA_3_CH15 = 475, - IMX_SC_R_DMA_3_CH16 = 476, - IMX_SC_R_DMA_3_CH17 = 477, - IMX_SC_R_DMA_3_CH18 = 478, - IMX_SC_R_DMA_3_CH19 = 479, - IMX_SC_R_DMA_3_CH20 = 480, - IMX_SC_R_DMA_3_CH21 = 481, - IMX_SC_R_DMA_3_CH22 = 482, - IMX_SC_R_DMA_3_CH23 = 483, - IMX_SC_R_DMA_3_CH24 = 484, - IMX_SC_R_DMA_3_CH25 = 485, - IMX_SC_R_DMA_3_CH26 = 486, - IMX_SC_R_DMA_3_CH27 = 487, - IMX_SC_R_DMA_3_CH28 = 488, - IMX_SC_R_DMA_3_CH29 = 489, - IMX_SC_R_DMA_3_CH30 = 490, - IMX_SC_R_DMA_3_CH31 = 491, - IMX_SC_R_AUDIO_PLL_1 = 492, - IMX_SC_R_AUDIO_CLK_0 = 493, - IMX_SC_R_AUDIO_CLK_1 = 494, - IMX_SC_R_MCLK_OUT_0 = 495, - IMX_SC_R_MCLK_OUT_1 = 496, - IMX_SC_R_PMIC_0 = 497, - IMX_SC_R_PMIC_1 = 498, - IMX_SC_R_SECO = 499, - IMX_SC_R_CAAM_JR1 = 500, - IMX_SC_R_CAAM_JR2 = 501, - IMX_SC_R_CAAM_JR3 = 502, - IMX_SC_R_SECO_MU_2 = 503, - IMX_SC_R_SECO_MU_3 = 504, - IMX_SC_R_SECO_MU_4 = 505, - IMX_SC_R_HDMI_RX_PWM_0 = 506, - IMX_SC_R_A35 = 507, - IMX_SC_R_A35_0 = 508, - IMX_SC_R_A35_1 = 509, - IMX_SC_R_A35_2 = 510, - IMX_SC_R_A35_3 = 511, - IMX_SC_R_DSP = 512, - IMX_SC_R_DSP_RAM = 513, - IMX_SC_R_CAAM_JR1_OUT = 514, - IMX_SC_R_CAAM_JR2_OUT = 515, - IMX_SC_R_CAAM_JR3_OUT = 516, - IMX_SC_R_VPU_DEC_0 = 517, - IMX_SC_R_VPU_ENC_0 = 518, - IMX_SC_R_CAAM_JR0 = 519, - IMX_SC_R_CAAM_JR0_OUT = 520, - IMX_SC_R_PMIC_2 = 521, - IMX_SC_R_DBLOGIC = 522, - IMX_SC_R_HDMI_PLL_1 = 523, - IMX_SC_R_BOARD_R0 = 524, - IMX_SC_R_BOARD_R1 = 525, - IMX_SC_R_BOARD_R2 = 526, - IMX_SC_R_BOARD_R3 = 527, - IMX_SC_R_BOARD_R4 = 528, - IMX_SC_R_BOARD_R5 = 529, - IMX_SC_R_BOARD_R6 = 530, - IMX_SC_R_BOARD_R7 = 531, - IMX_SC_R_MJPEG_DEC_MP = 532, - IMX_SC_R_MJPEG_ENC_MP = 533, - IMX_SC_R_VPU_TS_0 = 534, - IMX_SC_R_VPU_MU_0 = 535, - IMX_SC_R_VPU_MU_1 = 536, - IMX_SC_R_VPU_MU_2 = 537, - IMX_SC_R_VPU_MU_3 = 538, - IMX_SC_R_VPU_ENC_1 = 539, - IMX_SC_R_VPU = 540, - IMX_SC_R_LAST -}; - -/* NOTE - please add by replacing some of the UNUSED from above! */ - -/* * This type is used to indicate a control. */ enum imx_sc_ctrl { diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h new file mode 100644 index 000000000000..5be5dab50b13 --- /dev/null +++ b/include/linux/firmware/intel/stratix10-smc.h @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2017-2018, Intel Corporation + */ + +#ifndef __STRATIX10_SMC_H +#define __STRATIX10_SMC_H + +#include <linux/arm-smccc.h> +#include <linux/bitops.h> + +/** + * This file defines the Secure Monitor Call (SMC) message protocol used for + * service layer driver in normal world (EL1) to communicate with secure + * monitor software in Secure Monitor Exception Level 3 (EL3). + * + * This file is shared with secure firmware (FW) which is out of kernel tree. + * + * An ARM SMC instruction takes a function identifier and up to 6 64-bit + * register values as arguments, and can return up to 4 64-bit register + * value. The operation of the secure monitor is determined by the parameter + * values passed in through registers. + * + * EL1 and EL3 communicates pointer as physical address rather than the + * virtual address. + * + * Functions specified by ARM SMC Calling convention: + * + * FAST call executes atomic operations, returns when the requested operation + * has completed. + * STD call starts a operation which can be preempted by a non-secure + * interrupt. The call can return before the requested operation has + * completed. + * + * a0..a7 is used as register names in the descriptions below, on arm32 + * that translates to r0..r7 and on arm64 to w0..w7. + */ + +/** + * @func_num: function ID + */ +#define INTEL_SIP_SMC_STD_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_SIP, (func_num)) + +#define INTEL_SIP_SMC_FAST_CALL_VAL(func_num) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_SIP, (func_num)) + +/** + * Return values in INTEL_SIP_SMC_* call + * + * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION: + * Secure monitor software doesn't recognize the request. + * + * INTEL_SIP_SMC_STATUS_OK: + * FPGA configuration completed successfully, + * In case of FPGA configuration write operation, it means secure monitor + * software can accept the next chunk of FPGA configuration data. + * + * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY: + * In case of FPGA configuration write operation, it means secure monitor + * software is still processing previous data & can't accept the next chunk + * of data. Service driver needs to issue + * INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE call to query the + * completed block(s). + * + * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR: + * There is error during the FPGA configuration process. + * + * INTEL_SIP_SMC_REG_ERROR: + * There is error during a read or write operation of the protected registers. + * + * INTEL_SIP_SMC_RSU_ERROR: + * There is error during a remote status update. + */ +#define INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF +#define INTEL_SIP_SMC_STATUS_OK 0x0 +#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY 0x1 +#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED 0x2 +#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR 0x4 +#define INTEL_SIP_SMC_REG_ERROR 0x5 +#define INTEL_SIP_SMC_RSU_ERROR 0x7 + +/** + * Request INTEL_SIP_SMC_FPGA_CONFIG_START + * + * Sync call used by service driver at EL1 to request the FPGA in EL3 to + * be prepare to receive a new configuration. + * + * Call register usage: + * a0: INTEL_SIP_SMC_FPGA_CONFIG_START. + * a1: flag for full or partial configuration. 0 for full and 1 for partial + * configuration. + * a2-7: not used. + * + * Return status: + * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a1-3: not used. + */ +#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START 1 +#define INTEL_SIP_SMC_FPGA_CONFIG_START \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START) + +/** + * Request INTEL_SIP_SMC_FPGA_CONFIG_WRITE + * + * Async call used by service driver at EL1 to provide FPGA configuration data + * to secure world. + * + * Call register usage: + * a0: INTEL_SIP_SMC_FPGA_CONFIG_WRITE. + * a1: 64bit physical address of the configuration data memory block + * a2: Size of configuration data block. + * a3-7: not used. + * + * Return status: + * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or + * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a1: 64bit physical address of 1st completed memory block if any completed + * block, otherwise zero value. + * a2: 64bit physical address of 2nd completed memory block if any completed + * block, otherwise zero value. + * a3: 64bit physical address of 3rd completed memory block if any completed + * block, otherwise zero value. + */ +#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_WRITE 2 +#define INTEL_SIP_SMC_FPGA_CONFIG_WRITE \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_WRITE) + +/** + * Request INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE + * + * Sync call used by service driver at EL1 to track the completed write + * transactions. This request is called after INTEL_SIP_SMC_FPGA_CONFIG_WRITE + * call returns INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY. + * + * Call register usage: + * a0: INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE. + * a1-7: not used. + * + * Return status: + * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or + * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a1: 64bit physical address of 1st completed memory block. + * a2: 64bit physical address of 2nd completed memory block if + * any completed block, otherwise zero value. + * a3: 64bit physical address of 3rd completed memory block if + * any completed block, otherwise zero value. + */ +#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE 3 +#define INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE \ +INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) + +/** + * Request INTEL_SIP_SMC_FPGA_CONFIG_ISDONE + * + * Sync call used by service driver at EL1 to inform secure world that all + * data are sent, to check whether or not the secure world had completed + * the FPGA configuration process. + * + * Call register usage: + * a0: INTEL_SIP_SMC_FPGA_CONFIG_ISDONE. + * a1-7: not used. + * + * Return status: + * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or + * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a1-3: not used. + */ +#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE 4 +#define INTEL_SIP_SMC_FPGA_CONFIG_ISDONE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE) + +/** + * Request INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM + * + * Sync call used by service driver at EL1 to query the physical address of + * memory block reserved by secure monitor software. + * + * Call register usage: + * a0:INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM. + * a1-7: not used. + * + * Return status: + * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a1: start of physical address of reserved memory block. + * a2: size of reserved memory block. + * a3: not used. + */ +#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_GET_MEM 5 +#define INTEL_SIP_SMC_FPGA_CONFIG_GET_MEM \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_GET_MEM) + +/** + * Request INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK + * + * For SMC loop-back mode only, used for internal integration, debugging + * or troubleshooting. + * + * Call register usage: + * a0: INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK. + * a1-7: not used. + * + * Return status: + * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a1-3: not used. + */ +#define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK 6 +#define INTEL_SIP_SMC_FPGA_CONFIG_LOOPBACK \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK) + +/* + * Request INTEL_SIP_SMC_REG_READ + * + * Read a protected register at EL3 + * + * Call register usage: + * a0: INTEL_SIP_SMC_REG_READ. + * a1: register address. + * a2-7: not used. + * + * Return status: + * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR. + * a1: value in the register + * a2-3: not used. + */ +#define INTEL_SIP_SMC_FUNCID_REG_READ 7 +#define INTEL_SIP_SMC_REG_READ \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_READ) + +/* + * Request INTEL_SIP_SMC_REG_WRITE + * + * Write a protected register at EL3 + * + * Call register usage: + * a0: INTEL_SIP_SMC_REG_WRITE. + * a1: register address + * a2: value to program into register. + * a3-7: not used. + * + * Return status: + * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR. + * a1-3: not used. + */ +#define INTEL_SIP_SMC_FUNCID_REG_WRITE 8 +#define INTEL_SIP_SMC_REG_WRITE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE) + +/* + * Request INTEL_SIP_SMC_FUNCID_REG_UPDATE + * + * Update one or more bits in a protected register at EL3 using a + * read-modify-write operation. + * + * Call register usage: + * a0: INTEL_SIP_SMC_REG_UPDATE. + * a1: register address + * a2: write Mask. + * a3: value to write. + * a4-7: not used. + * + * Return status: + * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_REG_ERROR. + * a1-3: Not used. + */ +#define INTEL_SIP_SMC_FUNCID_REG_UPDATE 9 +#define INTEL_SIP_SMC_REG_UPDATE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_UPDATE) + +/* + * Request INTEL_SIP_SMC_RSU_STATUS + * + * Request remote status update boot log, call is synchronous. + * + * Call register usage: + * a0 INTEL_SIP_SMC_RSU_STATUS + * a1-7 not used + * + * Return status + * a0: Current Image + * a1: Last Failing Image + * a2: Version | State + * a3: Error details | Error location + * + * Or + * + * a0: INTEL_SIP_SMC_RSU_ERROR + */ +#define INTEL_SIP_SMC_FUNCID_RSU_STATUS 11 +#define INTEL_SIP_SMC_RSU_STATUS \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_STATUS) + +/* + * Request INTEL_SIP_SMC_RSU_UPDATE + * + * Request to set the offset of the bitstream to boot after reboot, call + * is synchronous. + * + * Call register usage: + * a0 INTEL_SIP_SMC_RSU_UPDATE + * a1 64bit physical address of the configuration data memory in flash + * a2-7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + */ +#define INTEL_SIP_SMC_FUNCID_RSU_UPDATE 12 +#define INTEL_SIP_SMC_RSU_UPDATE \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_UPDATE) +#endif diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h new file mode 100644 index 000000000000..e521f172a47a --- /dev/null +++ b/include/linux/firmware/intel/stratix10-svc-client.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2017-2018, Intel Corporation + */ + +#ifndef __STRATIX10_SVC_CLIENT_H +#define __STRATIX10_SVC_CLIENT_H + +/** + * Service layer driver supports client names + * + * fpga: for FPGA configuration + * rsu: for remote status update + */ +#define SVC_CLIENT_FPGA "fpga" +#define SVC_CLIENT_RSU "rsu" + +/** + * Status of the sent command, in bit number + * + * SVC_COMMAND_STATUS_RECONFIG_REQUEST_OK: + * Secure firmware accepts the request of FPGA reconfiguration. + * + * SVC_STATUS_RECONFIG_BUFFER_SUBMITTED: + * Service client successfully submits FPGA configuration + * data buffer to secure firmware. + * + * SVC_COMMAND_STATUS_RECONFIG_BUFFER_DONE: + * Secure firmware completes data process, ready to accept the + * next WRITE transaction. + * + * SVC_COMMAND_STATUS_RECONFIG_COMPLETED: + * Secure firmware completes FPGA configuration successfully, FPGA should + * be in user mode. + * + * SVC_COMMAND_STATUS_RECONFIG_BUSY: + * FPGA configuration is still in process. + * + * SVC_COMMAND_STATUS_RECONFIG_ERROR: + * Error encountered during FPGA configuration. + * + * SVC_STATUS_RSU_OK: + * Secure firmware accepts the request of remote status update (RSU). + */ +#define SVC_STATUS_RECONFIG_REQUEST_OK 0 +#define SVC_STATUS_RECONFIG_BUFFER_SUBMITTED 1 +#define SVC_STATUS_RECONFIG_BUFFER_DONE 2 +#define SVC_STATUS_RECONFIG_COMPLETED 3 +#define SVC_STATUS_RECONFIG_BUSY 4 +#define SVC_STATUS_RECONFIG_ERROR 5 +#define SVC_STATUS_RSU_OK 6 +#define SVC_STATUS_RSU_ERROR 7 +/** + * Flag bit for COMMAND_RECONFIG + * + * COMMAND_RECONFIG_FLAG_PARTIAL: + * Set to FPGA configuration type (full or partial), the default + * is full reconfig. + */ +#define COMMAND_RECONFIG_FLAG_PARTIAL 0 + +/** + * Timeout settings for service clients: + * timeout value used in Stratix10 FPGA manager driver. + * timeout value used in RSU driver + */ +#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 100 +#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 240 +#define SVC_RSU_REQUEST_TIMEOUT_MS 300 + +struct stratix10_svc_chan; + +/** + * enum stratix10_svc_command_code - supported service commands + * + * @COMMAND_NOOP: do 'dummy' request for integration/debug/trouble-shooting + * + * @COMMAND_RECONFIG: ask for FPGA configuration preparation, return status + * is SVC_STATUS_RECONFIG_REQUEST_OK + * + * @COMMAND_RECONFIG_DATA_SUBMIT: submit buffer(s) of bit-stream data for the + * FPGA configuration, return status is SVC_STATUS_RECONFIG_BUFFER_SUBMITTED, + * or SVC_STATUS_RECONFIG_ERROR + * + * @COMMAND_RECONFIG_DATA_CLAIM: check the status of the configuration, return + * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or + * SVC_STATUS_RECONFIG_ERROR + * + * @COMMAND_RECONFIG_STATUS: check the status of the configuration, return + * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or + * SVC_STATUS_RECONFIG_ERROR + * + * @COMMAND_RSU_STATUS: request remote system update boot log, return status + * is log data or SVC_STATUS_RSU_ERROR + * + * @COMMAND_RSU_UPDATE: set the offset of the bitstream to boot after reboot, + * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR + */ +enum stratix10_svc_command_code { + COMMAND_NOOP = 0, + COMMAND_RECONFIG, + COMMAND_RECONFIG_DATA_SUBMIT, + COMMAND_RECONFIG_DATA_CLAIM, + COMMAND_RECONFIG_STATUS, + COMMAND_RSU_STATUS, + COMMAND_RSU_UPDATE +}; + +/** + * struct stratix10_svc_client_msg - message sent by client to service + * @payload: starting address of data need be processed + * @payload_length: data size in bytes + * @command: service command + * @arg: args to be passed via registers and not physically mapped buffers + */ +struct stratix10_svc_client_msg { + void *payload; + size_t payload_length; + enum stratix10_svc_command_code command; + u64 arg[3]; +}; + +/** + * struct stratix10_svc_command_config_type - config type + * @flags: flag bit for the type of FPGA configuration + */ +struct stratix10_svc_command_config_type { + u32 flags; +}; + +/** + * struct stratix10_svc_cb_data - callback data structure from service layer + * @status: the status of sent command + * @kaddr1: address of 1st completed data block + * @kaddr2: address of 2nd completed data block + * @kaddr3: address of 3rd completed data block + */ +struct stratix10_svc_cb_data { + u32 status; + void *kaddr1; + void *kaddr2; + void *kaddr3; +}; + +/** + * struct stratix10_svc_client - service client structure + * @dev: the client device + * @receive_cb: callback to provide service client the received data + * @priv: client private data + */ +struct stratix10_svc_client { + struct device *dev; + void (*receive_cb)(struct stratix10_svc_client *client, + struct stratix10_svc_cb_data *cb_data); + void *priv; +}; + +/** + * stratix10_svc_request_channel_byname() - request service channel + * @client: identity of the client requesting the channel + * @name: supporting client name defined above + * + * Return: a pointer to channel assigned to the client on success, + * or ERR_PTR() on error. + */ +struct stratix10_svc_chan +*stratix10_svc_request_channel_byname(struct stratix10_svc_client *client, + const char *name); + +/** + * stratix10_svc_free_channel() - free service channel. + * @chan: service channel to be freed + */ +void stratix10_svc_free_channel(struct stratix10_svc_chan *chan); + +/** + * stratix10_svc_allocate_memory() - allocate the momory + * @chan: service channel assigned to the client + * @size: number of bytes client requests + * + * Service layer allocates the requested number of bytes from the memory + * pool for the client. + * + * Return: the starting address of allocated memory on success, or + * ERR_PTR() on error. + */ +void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan, + size_t size); + +/** + * stratix10_svc_free_memory() - free allocated memory + * @chan: service channel assigned to the client + * @kaddr: starting address of memory to be free back to pool + */ +void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr); + +/** + * stratix10_svc_send() - send a message to the remote + * @chan: service channel assigned to the client + * @msg: message data to be sent, in the format of + * struct stratix10_svc_client_msg + * + * Return: 0 for success, -ENOMEM or -ENOBUFS on error. + */ +int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg); + +/** + * intel_svc_done() - complete service request + * @chan: service channel assigned to the client + * + * This function is used by service client to inform service layer that + * client's service requests are completed, or there is an error in the + * request process. + */ +void stratix10_svc_done(struct stratix10_svc_chan *chan); +#endif + diff --git a/include/linux/font.h b/include/linux/font.h index d6821769dd1e..51b91c8b69d5 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -32,6 +32,7 @@ struct font_desc { #define ACORN8x8_IDX 8 #define MINI4x6_IDX 9 #define FONT6x10_IDX 10 +#define TER16x32_IDX 11 extern const struct font_desc font_vga_8x8, font_vga_8x16, @@ -43,7 +44,8 @@ extern const struct font_desc font_vga_8x8, font_sun_12x22, font_acorn_8x8, font_mini_4x6, - font_6x10; + font_6x10, + font_ter_16x32; /* Find a font with a specific name */ diff --git a/include/linux/fs.h b/include/linux/fs.h index c95c0807471f..811c77743dad 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1044,10 +1044,15 @@ bool opens_in_grace(struct net *); * Obviously, the last two criteria only matter for POSIX locks. */ struct file_lock { - struct file_lock *fl_next; /* singly linked list for this inode */ + struct file_lock *fl_blocker; /* The lock, that is blocking us */ struct list_head fl_list; /* link into file_lock_context */ struct hlist_node fl_link; /* node in global lists */ - struct list_head fl_block; /* circular list of blocked processes */ + struct list_head fl_blocked_requests; /* list of requests with + * ->fl_blocker pointing here + */ + struct list_head fl_blocked_member; /* node in + * ->fl_blocker->fl_blocked_requests + */ fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; @@ -1119,7 +1124,7 @@ extern void locks_remove_file(struct file *); extern void locks_release_private(struct file_lock *); extern void posix_test_lock(struct file *, struct file_lock *); extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); -extern int posix_unblock_lock(struct file_lock *); +extern int locks_delete_block(struct file_lock *); extern int vfs_test_lock(struct file *, struct file_lock *); extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); @@ -1209,7 +1214,7 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl, return -ENOLCK; } -static inline int posix_unblock_lock(struct file_lock *waiter) +static inline int locks_delete_block(struct file_lock *waiter) { return -ENOENT; } @@ -2021,7 +2026,7 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) .ki_filp = filp, .ki_flags = iocb_flags(filp), .ki_hint = ki_hint_validate(file_write_hint(filp)), - .ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0), + .ki_ioprio = get_current_ioprio(), }; } @@ -3264,8 +3269,12 @@ extern int generic_check_addressable(unsigned, u64); extern int buffer_migrate_page(struct address_space *, struct page *, struct page *, enum migrate_mode); +extern int buffer_migrate_page_norefs(struct address_space *, + struct page *, struct page *, + enum migrate_mode); #else #define buffer_migrate_page NULL +#define buffer_migrate_page_norefs NULL #endif extern int setattr_prepare(struct dentry *, struct iattr *); diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 34cf0fdd7dc7..610815e3f1aa 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -196,8 +196,7 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op) static inline void fscache_retrieval_complete(struct fscache_retrieval *op, int n_pages) { - atomic_sub(n_pages, &op->n_pages); - if (atomic_read(&op->n_pages) <= 0) + if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0) fscache_op_complete(&op->op, false); } diff --git a/include/linux/fsi-occ.h b/include/linux/fsi-occ.h new file mode 100644 index 000000000000..d4cdc2aa6e33 --- /dev/null +++ b/include/linux/fsi-occ.h @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef LINUX_FSI_OCC_H +#define LINUX_FSI_OCC_H + +struct device; + +#define OCC_RESP_CMD_IN_PRG 0xFF +#define OCC_RESP_SUCCESS 0 +#define OCC_RESP_CMD_INVAL 0x11 +#define OCC_RESP_CMD_LEN_INVAL 0x12 +#define OCC_RESP_DATA_INVAL 0x13 +#define OCC_RESP_CHKSUM_ERR 0x14 +#define OCC_RESP_INT_ERR 0x15 +#define OCC_RESP_BAD_STATE 0x16 +#define OCC_RESP_CRIT_EXCEPT 0xE0 +#define OCC_RESP_CRIT_INIT 0xE1 +#define OCC_RESP_CRIT_WATCHDOG 0xE2 +#define OCC_RESP_CRIT_OCB 0xE3 +#define OCC_RESP_CRIT_HW 0xE4 + +int fsi_occ_submit(struct device *dev, const void *request, size_t req_len, + void *response, size_t *resp_len); + +#endif /* LINUX_FSI_OCC_H */ diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h index 9d3f668df7df..741f567253ef 100644 --- a/include/linux/fsl/mc.h +++ b/include/linux/fsl/mc.h @@ -210,8 +210,8 @@ struct mc_cmd_header { }; struct fsl_mc_command { - u64 header; - u64 params[MC_CMD_NUM_OF_PARAMS]; + __le64 header; + __le64 params[MC_CMD_NUM_OF_PARAMS]; }; enum mc_cmd_status { @@ -238,11 +238,11 @@ enum mc_cmd_status { /* Command completion flag */ #define MC_CMD_FLAG_INTR_DIS 0x01 -static inline u64 mc_encode_cmd_header(u16 cmd_id, - u32 cmd_flags, - u16 token) +static inline __le64 mc_encode_cmd_header(u16 cmd_id, + u32 cmd_flags, + u16 token) { - u64 header = 0; + __le64 header = 0; struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header; hdr->cmd_id = cpu_to_le16(cmd_id); diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index fd1ce10553bf..2ccb08cb5d6a 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -26,30 +26,46 @@ static inline int fsnotify_parent(const struct path *path, struct dentry *dentry return __fsnotify_parent(path, dentry, mask); } -/* simple call site for access decisions */ +/* + * Simple wrapper to consolidate calls fsnotify_parent()/fsnotify() when + * an event is on a path. + */ +static inline int fsnotify_path(struct inode *inode, const struct path *path, + __u32 mask) +{ + int ret = fsnotify_parent(path, NULL, mask); + + if (ret) + return ret; + return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); +} + +/* Simple call site for access decisions */ static inline int fsnotify_perm(struct file *file, int mask) { + int ret; const struct path *path = &file->f_path; struct inode *inode = file_inode(file); __u32 fsnotify_mask = 0; - int ret; if (file->f_mode & FMODE_NONOTIFY) return 0; if (!(mask & (MAY_READ | MAY_OPEN))) return 0; - if (mask & MAY_OPEN) + if (mask & MAY_OPEN) { fsnotify_mask = FS_OPEN_PERM; - else if (mask & MAY_READ) - fsnotify_mask = FS_ACCESS_PERM; - else - BUG(); - ret = fsnotify_parent(path, NULL, fsnotify_mask); - if (ret) - return ret; + if (file->f_flags & __FMODE_EXEC) { + ret = fsnotify_path(inode, path, FS_OPEN_EXEC_PERM); - return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + if (ret) + return ret; + } + } else if (mask & MAY_READ) { + fsnotify_mask = FS_ACCESS_PERM; + } + + return fsnotify_path(inode, path, fsnotify_mask); } /* @@ -180,10 +196,8 @@ static inline void fsnotify_access(struct file *file) if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; - if (!(file->f_mode & FMODE_NONOTIFY)) { - fsnotify_parent(path, NULL, mask); - fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); - } + if (!(file->f_mode & FMODE_NONOTIFY)) + fsnotify_path(inode, path, mask); } /* @@ -198,10 +212,8 @@ static inline void fsnotify_modify(struct file *file) if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; - if (!(file->f_mode & FMODE_NONOTIFY)) { - fsnotify_parent(path, NULL, mask); - fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); - } + if (!(file->f_mode & FMODE_NONOTIFY)) + fsnotify_path(inode, path, mask); } /* @@ -215,9 +227,10 @@ static inline void fsnotify_open(struct file *file) if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; + if (file->f_flags & __FMODE_EXEC) + mask |= FS_OPEN_EXEC; - fsnotify_parent(path, NULL, mask); - fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + fsnotify_path(inode, path, mask); } /* @@ -233,10 +246,8 @@ static inline void fsnotify_close(struct file *file) if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; - if (!(file->f_mode & FMODE_NONOTIFY)) { - fsnotify_parent(path, NULL, mask); - fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); - } + if (!(file->f_mode & FMODE_NONOTIFY)) + fsnotify_path(inode, path, mask); } /* diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 135b973e44d1..7639774e7475 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -38,6 +38,7 @@ #define FS_DELETE 0x00000200 /* Subfile was deleted */ #define FS_DELETE_SELF 0x00000400 /* Self was deleted */ #define FS_MOVE_SELF 0x00000800 /* Self was moved */ +#define FS_OPEN_EXEC 0x00001000 /* File was opened for exec */ #define FS_UNMOUNT 0x00002000 /* inode on umount fs */ #define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ @@ -45,6 +46,7 @@ #define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ #define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */ +#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */ #define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */ #define FS_ISDIR 0x40000000 /* event occurred against dir */ @@ -62,11 +64,13 @@ #define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\ FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ - FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM) + FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM | \ + FS_OPEN_EXEC | FS_OPEN_EXEC_PERM) #define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) -#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM) +#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \ + FS_OPEN_EXEC_PERM) /* Events that can be reported to backends */ #define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ @@ -74,7 +78,8 @@ FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \ FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \ FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \ - FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME) + FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME | \ + FS_OPEN_EXEC | FS_OPEN_EXEC_PERM) /* Extra flags that may be reported with event or control handling of events */ #define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index a397907e8d72..730876187344 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -389,6 +389,7 @@ enum { FTRACE_UPDATE_TRACE_FUNC = (1 << 2), FTRACE_START_FUNC_RET = (1 << 3), FTRACE_STOP_FUNC_RET = (1 << 4), + FTRACE_MAY_SLEEP = (1 << 5), }; /* @@ -420,6 +421,9 @@ enum { }; void arch_ftrace_update_code(int command); +void arch_ftrace_update_trampoline(struct ftrace_ops *ops); +void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec); +void arch_ftrace_trampoline_free(struct ftrace_ops *ops); struct ftrace_rec_iter; @@ -749,6 +753,11 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER +struct fgraph_ops { + trace_func_graph_ent_t entryfunc; + trace_func_graph_ret_t retfunc; +}; + /* * Stack of return addresses for functions * of a thread. @@ -777,8 +786,11 @@ struct ftrace_ret_stack { extern void return_to_handler(void); extern int -ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, - unsigned long frame_pointer, unsigned long *retp); +function_graph_enter(unsigned long ret, unsigned long func, + unsigned long frame_pointer, unsigned long *retp); + +struct ftrace_ret_stack * +ftrace_graph_get_ret_stack(struct task_struct *task, int idx); unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, unsigned long *retp); @@ -790,11 +802,11 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, */ #define __notrace_funcgraph notrace -#define FTRACE_NOTRACE_DEPTH 65536 #define FTRACE_RETFUNC_DEPTH 50 #define FTRACE_RETSTACK_ALLOC_SIZE 32 -extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, - trace_func_graph_ent_t entryfunc); + +extern int register_ftrace_graph(struct fgraph_ops *ops); +extern void unregister_ftrace_graph(struct fgraph_ops *ops); extern bool ftrace_graph_is_dead(void); extern void ftrace_graph_stop(void); @@ -803,17 +815,10 @@ extern void ftrace_graph_stop(void); extern trace_func_graph_ret_t ftrace_graph_return; extern trace_func_graph_ent_t ftrace_graph_entry; -extern void unregister_ftrace_graph(void); - extern void ftrace_graph_init_task(struct task_struct *t); extern void ftrace_graph_exit_task(struct task_struct *t); extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); -static inline int task_curr_ret_stack(struct task_struct *t) -{ - return t->curr_ret_stack; -} - static inline void pause_graph_tracing(void) { atomic_inc(¤t->tracing_graph_pause); @@ -831,17 +836,9 @@ static inline void ftrace_graph_init_task(struct task_struct *t) { } static inline void ftrace_graph_exit_task(struct task_struct *t) { } static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } -static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, - trace_func_graph_ent_t entryfunc) -{ - return -1; -} -static inline void unregister_ftrace_graph(void) { } - -static inline int task_curr_ret_stack(struct task_struct *tsk) -{ - return -1; -} +/* Define as macros as fgraph_ops may not be defined */ +#define register_ftrace_graph(ops) ({ -1; }) +#define unregister_ftrace_graph(ops) do { } while (0) static inline unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, diff --git a/include/linux/futex.h b/include/linux/futex.h index 821ae502d3d8..ccaef0097785 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -9,9 +9,6 @@ struct inode; struct mm_struct; struct task_struct; -extern int -handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); - /* * Futexes are matched on equal values of this key. * The key type depends on whether it's a shared or private mapping. @@ -55,11 +52,6 @@ extern void exit_robust_list(struct task_struct *curr); long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3); -#ifdef CONFIG_HAVE_FUTEX_CMPXCHG -#define futex_cmpxchg_enabled 1 -#else -extern int futex_cmpxchg_enabled; -#endif #else static inline void exit_robust_list(struct task_struct *curr) { diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 872f930f1b06..dd0a452373e7 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -51,7 +51,8 @@ typedef unsigned long (*genpool_algo_t)(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data, struct gen_pool *pool); + void *data, struct gen_pool *pool, + unsigned long start_addr); /* * General purpose special memory pool descriptor. @@ -131,24 +132,24 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool); + struct gen_pool *pool, unsigned long start_addr); extern unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data, struct gen_pool *pool); + void *data, struct gen_pool *pool, unsigned long start_addr); extern unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data, struct gen_pool *pool); + void *data, struct gen_pool *pool, unsigned long start_addr); extern unsigned long gen_pool_first_fit_order_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, - void *data, struct gen_pool *pool); + void *data, struct gen_pool *pool, unsigned long start_addr); extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, - struct gen_pool *pool); + struct gen_pool *pool, unsigned long start_addr); extern struct gen_pool *devm_gen_pool_create(struct device *dev, diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 70fc838e6773..06c0fd594097 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -17,6 +17,7 @@ #include <linux/percpu-refcount.h> #include <linux/uuid.h> #include <linux/blk_types.h> +#include <asm/local.h> #ifdef CONFIG_BLOCK @@ -89,6 +90,7 @@ struct disk_stats { unsigned long merges[NR_STAT_GROUPS]; unsigned long io_ticks; unsigned long time_in_queue; + local_t in_flight[2]; }; #define PARTITION_META_INFO_VOLNAMELTH 64 @@ -122,14 +124,13 @@ struct hd_struct { int make_it_fail; #endif unsigned long stamp; - atomic_t in_flight[2]; #ifdef CONFIG_SMP struct disk_stats __percpu *dkstats; #else struct disk_stats dkstats; #endif struct percpu_ref ref; - struct rcu_head rcu_head; + struct rcu_work rcu_work; }; #define GENHD_FL_REMOVABLE 1 @@ -295,8 +296,11 @@ extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, #define part_stat_lock() ({ rcu_read_lock(); get_cpu(); }) #define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0) -#define __part_stat_add(cpu, part, field, addnd) \ - (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd)) +#define part_stat_get_cpu(part, field, cpu) \ + (per_cpu_ptr((part)->dkstats, (cpu))->field) + +#define part_stat_get(part, field) \ + part_stat_get_cpu(part, field, smp_processor_id()) #define part_stat_read(part, field) \ ({ \ @@ -333,10 +337,9 @@ static inline void free_part_stats(struct hd_struct *part) #define part_stat_lock() ({ rcu_read_lock(); 0; }) #define part_stat_unlock() rcu_read_unlock() -#define __part_stat_add(cpu, part, field, addnd) \ - ((part)->dkstats.field += addnd) - -#define part_stat_read(part, field) ((part)->dkstats.field) +#define part_stat_get(part, field) ((part)->dkstats.field) +#define part_stat_get_cpu(part, field, cpu) part_stat_get(part, field) +#define part_stat_read(part, field) part_stat_get(part, field) static inline void part_stat_set_all(struct hd_struct *part, int value) { @@ -362,22 +365,33 @@ static inline void free_part_stats(struct hd_struct *part) part_stat_read(part, field[STAT_WRITE]) + \ part_stat_read(part, field[STAT_DISCARD])) -#define part_stat_add(cpu, part, field, addnd) do { \ - __part_stat_add((cpu), (part), field, addnd); \ +#define __part_stat_add(part, field, addnd) \ + (part_stat_get(part, field) += (addnd)) + +#define part_stat_add(part, field, addnd) do { \ + __part_stat_add((part), field, addnd); \ if ((part)->partno) \ - __part_stat_add((cpu), &part_to_disk((part))->part0, \ + __part_stat_add(&part_to_disk((part))->part0, \ field, addnd); \ } while (0) -#define part_stat_dec(cpu, gendiskp, field) \ - part_stat_add(cpu, gendiskp, field, -1) -#define part_stat_inc(cpu, gendiskp, field) \ - part_stat_add(cpu, gendiskp, field, 1) -#define part_stat_sub(cpu, gendiskp, field, subnd) \ - part_stat_add(cpu, gendiskp, field, -subnd) - -void part_in_flight(struct request_queue *q, struct hd_struct *part, - unsigned int inflight[2]); +#define part_stat_dec(gendiskp, field) \ + part_stat_add(gendiskp, field, -1) +#define part_stat_inc(gendiskp, field) \ + part_stat_add(gendiskp, field, 1) +#define part_stat_sub(gendiskp, field, subnd) \ + part_stat_add(gendiskp, field, -subnd) + +#define part_stat_local_dec(gendiskp, field) \ + local_dec(&(part_stat_get(gendiskp, field))) +#define part_stat_local_inc(gendiskp, field) \ + local_inc(&(part_stat_get(gendiskp, field))) +#define part_stat_local_read(gendiskp, field) \ + local_read(&(part_stat_get(gendiskp, field))) +#define part_stat_local_read_cpu(gendiskp, field, cpu) \ + local_read(&(part_stat_get_cpu(gendiskp, field, cpu))) + +unsigned int part_in_flight(struct request_queue *q, struct hd_struct *part); void part_in_flight_rw(struct request_queue *q, struct hd_struct *part, unsigned int inflight[2]); void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, @@ -398,8 +412,7 @@ static inline void free_part_info(struct hd_struct *part) kfree(part->info); } -/* block/blk-core.c */ -extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part); +void update_io_ticks(struct hd_struct *part, unsigned long now); /* block/genhd.c */ extern void device_add_disk(struct device *parent, struct gendisk *disk, diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h index 5972e4969197..eeae59d3ceb7 100644 --- a/include/linux/genl_magic_struct.h +++ b/include/linux/genl_magic_struct.h @@ -191,6 +191,7 @@ static inline void ct_assert_unique_operations(void) { switch (0) { #include GENL_MAGIC_INCLUDE_FILE + case 0: ; } } @@ -209,6 +210,7 @@ static inline void ct_assert_unique_top_level_attributes(void) { switch (0) { #include GENL_MAGIC_INCLUDE_FILE + case 0: ; } } @@ -218,7 +220,8 @@ static inline void ct_assert_unique_top_level_attributes(void) static inline void ct_assert_unique_ ## s_name ## _attributes(void) \ { \ switch (0) { \ - s_fields \ + s_fields \ + case 0: \ ; \ } \ } diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 76f8db0b0e71..5f5e25fd6149 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -81,7 +81,7 @@ struct vm_area_struct; * * %__GFP_HARDWALL enforces the cpuset memory allocation policy. * - * %__GFP_THISNODE forces the allocation to be satisified from the requested + * %__GFP_THISNODE forces the allocation to be satisfied from the requested * node with no fallbacks or placement policy enforcements. * * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. @@ -510,18 +510,22 @@ alloc_pages(gfp_t gfp_mask, unsigned int order) } extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, struct vm_area_struct *vma, unsigned long addr, - int node); + int node, bool hugepage); +#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ + alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) #else #define alloc_pages(gfp_mask, order) \ alloc_pages_node(numa_node_id(), gfp_mask, order) -#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\ +#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ + alloc_pages(gfp_mask, order) +#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ alloc_pages(gfp_mask, order) #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) #define alloc_page_vma(gfp_mask, vma, addr) \ - alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) + alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ - alloc_pages_vma(gfp_mask, 0, vma, addr, node) + alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index f2f887795d43..9ddcf50a3c59 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -104,6 +104,7 @@ struct gpio_descs *__must_check devm_gpiod_get_array_optional(struct device *dev, const char *con_id, enum gpiod_flags flags); void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); +void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc); void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs); int gpiod_get_direction(struct gpio_desc *desc); @@ -162,7 +163,7 @@ int gpiod_is_active_low(const struct gpio_desc *desc); int gpiod_cansleep(const struct gpio_desc *desc); int gpiod_to_irq(const struct gpio_desc *desc); -void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name); +int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name); /* Convert between the old gpio_ and new gpiod_ interfaces */ struct gpio_desc *gpio_to_desc(unsigned gpio); @@ -172,6 +173,10 @@ int desc_to_gpio(const struct gpio_desc *desc); struct device_node; struct fwnode_handle; +struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label); struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, struct device_node *node, const char *propname, int index, @@ -245,6 +250,15 @@ static inline void gpiod_put(struct gpio_desc *desc) WARN_ON(1); } +static inline void devm_gpiod_unhinge(struct device *dev, + struct gpio_desc *desc) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(1); +} + static inline void gpiod_put_array(struct gpio_descs *descs) { might_sleep(); @@ -495,15 +509,17 @@ static inline int gpiod_to_irq(const struct gpio_desc *desc) return -EINVAL; } -static inline void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name) +static inline int gpiod_set_consumer_name(struct gpio_desc *desc, + const char *name) { /* GPIO can never have been requested */ WARN_ON(1); + return -EINVAL; } static inline struct gpio_desc *gpio_to_desc(unsigned gpio) { - return ERR_PTR(-EINVAL); + return NULL; } static inline int desc_to_gpio(const struct gpio_desc *desc) @@ -518,6 +534,15 @@ struct device_node; struct fwnode_handle; static inline +struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, struct device_node *node, const char *propname, int index, diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 2db62b550b95..07cddbf45186 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -17,6 +17,7 @@ struct device_node; struct seq_file; struct gpio_device; struct module; +enum gpiod_flags; #ifdef CONFIG_GPIOLIB @@ -166,11 +167,6 @@ struct gpio_irq_chip { */ void (*irq_disable)(struct irq_data *data); }; - -static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip) -{ - return container_of(chip, struct gpio_irq_chip, chip); -} #endif /** @@ -422,7 +418,6 @@ static inline int gpiochip_add(struct gpio_chip *chip) extern void gpiochip_remove(struct gpio_chip *chip); extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip, void *data); -extern void devm_gpiochip_remove(struct device *dev, struct gpio_chip *chip); extern struct gpio_chip *gpiochip_find(void *data, int (*match)(struct gpio_chip *chip, void *data)); @@ -610,7 +605,8 @@ gpiochip_remove_pin_ranges(struct gpio_chip *chip) #endif /* CONFIG_PINCTRL */ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum, - const char *label); + const char *label, + enum gpiod_flags flags); void gpiochip_free_own_desc(struct gpio_desc *desc); #else /* CONFIG_GPIOLIB */ diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 4f3febc0f971..d2bacf502429 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -163,6 +163,9 @@ struct hdmi_avi_infoframe { int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame); ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer, size_t size); +ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame, + void *buffer, size_t size); +int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame); enum hdmi_spd_sdi { HDMI_SPD_SDI_UNKNOWN, @@ -194,6 +197,9 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame, const char *vendor, const char *product); ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer, size_t size); +ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame, + void *buffer, size_t size); +int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame); enum hdmi_audio_coding_type { HDMI_AUDIO_CODING_TYPE_STREAM, @@ -272,6 +278,9 @@ struct hdmi_audio_infoframe { int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame); ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame, void *buffer, size_t size); +ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame, + void *buffer, size_t size); +int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame); enum hdmi_3d_structure { HDMI_3D_STRUCTURE_INVALID = -1, @@ -299,6 +308,9 @@ struct hdmi_vendor_infoframe { int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame); ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, void *buffer, size_t size); +ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame, + void *buffer, size_t size); +int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame); union hdmi_vendor_any_infoframe { struct { @@ -330,10 +342,14 @@ union hdmi_infoframe { struct hdmi_audio_infoframe audio; }; -ssize_t -hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size); -int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer); +ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, + size_t size); +ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame, + void *buffer, size_t size); +int hdmi_infoframe_check(union hdmi_infoframe *frame); +int hdmi_infoframe_unpack(union hdmi_infoframe *frame, + const void *buffer, size_t size); void hdmi_infoframe_log(const char *level, struct device *dev, - union hdmi_infoframe *frame); + const union hdmi_infoframe *frame); #endif /* _DRM_HDMI_H */ diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index 331dc377c275..dc12f5c4b076 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h @@ -177,6 +177,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, * @attr_usage_id: Attribute usage id as per spec * @report_id: Report id to look for * @flag: Synchronous or asynchronous read +* @is_signed: If true then fields < 32 bits will be sign-extended * * Issues a synchronous or asynchronous read request for an input attribute. * Returns data upto 32 bits. @@ -190,7 +191,8 @@ enum sensor_hub_read_flags { int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, u32 usage_id, u32 attr_usage_id, u32 report_id, - enum sensor_hub_read_flags flag + enum sensor_hub_read_flags flag, + bool is_signed ); /** diff --git a/include/linux/hid.h b/include/linux/hid.h index 2827b87590d8..d99287327ef2 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -219,6 +219,7 @@ struct hid_item { #define HID_GD_VBRZ 0x00010045 #define HID_GD_VNO 0x00010046 #define HID_GD_FEATURE 0x00010047 +#define HID_GD_RESOLUTION_MULTIPLIER 0x00010048 #define HID_GD_SYSTEM_CONTROL 0x00010080 #define HID_GD_UP 0x00010090 #define HID_GD_DOWN 0x00010091 @@ -232,12 +233,14 @@ struct hid_item { #define HID_DC_BATTERYSTRENGTH 0x00060020 #define HID_CP_CONSUMER_CONTROL 0x000c0001 +#define HID_CP_AC_PAN 0x000c0238 #define HID_DG_DIGITIZER 0x000d0001 #define HID_DG_PEN 0x000d0002 #define HID_DG_LIGHTPEN 0x000d0003 #define HID_DG_TOUCHSCREEN 0x000d0004 #define HID_DG_TOUCHPAD 0x000d0005 +#define HID_DG_WHITEBOARD 0x000d0006 #define HID_DG_STYLUS 0x000d0020 #define HID_DG_PUCK 0x000d0021 #define HID_DG_FINGER 0x000d0022 @@ -427,6 +430,7 @@ struct hid_local { */ struct hid_collection { + struct hid_collection *parent; unsigned type; unsigned usage; unsigned level; @@ -436,12 +440,16 @@ struct hid_usage { unsigned hid; /* hid usage code */ unsigned collection_index; /* index into collection array */ unsigned usage_index; /* index into usage array */ + __s8 resolution_multiplier;/* Effective Resolution Multiplier + (HUT v1.12, 4.3.1), default: 1 */ /* hidinput data */ + __s8 wheel_factor; /* 120/resolution_multiplier */ __u16 code; /* input driver code */ __u8 type; /* input driver type */ __s8 hat_min; /* hat switch fun */ __s8 hat_max; /* ditto */ __s8 hat_dir; /* ditto */ + __s16 wheel_accumulated; /* hi-res wheel */ }; struct hid_input; @@ -650,6 +658,7 @@ struct hid_parser { unsigned int *collection_stack; unsigned int collection_stack_ptr; unsigned int collection_stack_size; + struct hid_collection *active_collection; struct hid_device *device; unsigned int scan_flags; }; @@ -722,8 +731,8 @@ struct hid_usage_id { * input will not be passed to raw_event unless hid_device_io_start is * called. * - * raw_event and event should return 0 on no action performed, 1 when no - * further processing should be done and negative on error + * raw_event and event should return negative on error, any other value will + * pass the event on to .event() typically return 0 for success. * * input_mapping shall return a negative value to completely ignore this usage * (e.g. doubled or invalid usage), zero to continue with parsing of this @@ -836,7 +845,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev, /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ /* We ignore a few input applications that are not widely used */ -#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || ((a >= 0x000d0002) && (a <= 0x000d0006))) +#define IS_INPUT_APPLICATION(a) \ + (((a >= HID_UP_GENDESK) && (a <= HID_GD_MULTIAXIS)) \ + || ((a >= HID_DG_PEN) && (a <= HID_DG_WHITEBOARD)) \ + || (a == HID_GD_SYSTEM_CONTROL) || (a == HID_CP_CONSUMER_CONTROL) \ + || (a == HID_GD_WIRELESS_RADIO_CTLS)) /* HID core API */ @@ -892,6 +905,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid, unsigned int type, unsigned int id, unsigned int field_index, unsigned int report_counts); + +void hid_setup_resolution_multiplier(struct hid_device *hid); int hid_open_report(struct hid_device *device); int hid_check_keys_pressed(struct hid_device *hid); int hid_connect(struct hid_device *hid, unsigned int connect_mask); @@ -1139,34 +1154,6 @@ static inline u32 hid_report_len(struct hid_report *report) int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt); - -/** - * struct hid_scroll_counter - Utility class for processing high-resolution - * scroll events. - * @dev: the input device for which events should be reported. - * @microns_per_hi_res_unit: the amount moved by the user's finger for each - * high-resolution unit reported by the mouse, in - * microns. - * @resolution_multiplier: the wheel's resolution in high-resolution mode as a - * multiple of its lower resolution. For example, if - * moving the wheel by one "notch" would result in a - * value of 1 in low-resolution mode but 8 in - * high-resolution, the multiplier is 8. - * @remainder: counts the number of high-resolution units moved since the last - * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should - * only be used by class methods. - */ -struct hid_scroll_counter { - struct input_dev *dev; - int microns_per_hi_res_unit; - int resolution_multiplier; - - int remainder; -}; - -void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter, - int hi_res_value); - /* HID quirks API */ unsigned long hid_lookup_quirk(const struct hid_device *hdev); int hid_quirks_init(char **quirks_param, __u16 bus, int count); diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 0690679832d4..ea5cdbd8c2c3 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -36,7 +36,31 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size) /* declarations for linux/mm/highmem.c */ unsigned int nr_free_highpages(void); -extern unsigned long totalhigh_pages; +extern atomic_long_t _totalhigh_pages; +static inline unsigned long totalhigh_pages(void) +{ + return (unsigned long)atomic_long_read(&_totalhigh_pages); +} + +static inline void totalhigh_pages_inc(void) +{ + atomic_long_inc(&_totalhigh_pages); +} + +static inline void totalhigh_pages_dec(void) +{ + atomic_long_dec(&_totalhigh_pages); +} + +static inline void totalhigh_pages_add(long count) +{ + atomic_long_add(count, &_totalhigh_pages); +} + +static inline void totalhigh_pages_set(long val) +{ + atomic_long_set(&_totalhigh_pages, val); +} void kmap_flush_unused(void); @@ -51,7 +75,7 @@ static inline struct page *kmap_to_page(void *addr) return virt_to_page(addr); } -#define totalhigh_pages 0UL +static inline unsigned long totalhigh_pages(void) { return 0UL; } #ifndef ARCH_HAS_KMAP static inline void *kmap(struct page *page) diff --git a/include/linux/hmm.h b/include/linux/hmm.h index c6fb869a81c0..66f9ebbb1df3 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -69,6 +69,7 @@ #define LINUX_HMM_H #include <linux/kconfig.h> +#include <asm/pgtable.h> #if IS_ENABLED(CONFIG_HMM) @@ -486,6 +487,7 @@ struct hmm_devmem_ops { * @device: device to bind resource to * @ops: memory operations callback * @ref: per CPU refcount + * @page_fault: callback when CPU fault on an unaddressable device page * * This an helper structure for device drivers that do not wish to implement * the gory details related to hotplugging new memoy and allocating struct @@ -493,7 +495,28 @@ struct hmm_devmem_ops { * * Device drivers can directly use ZONE_DEVICE memory on their own if they * wish to do so. + * + * The page_fault() callback must migrate page back, from device memory to + * system memory, so that the CPU can access it. This might fail for various + * reasons (device issues, device have been unplugged, ...). When such error + * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and + * set the CPU page table entry to "poisoned". + * + * Note that because memory cgroup charges are transferred to the device memory, + * this should never fail due to memory restrictions. However, allocation + * of a regular system page might still fail because we are out of memory. If + * that happens, the page_fault() callback must return VM_FAULT_OOM. + * + * The page_fault() callback can also try to migrate back multiple pages in one + * chunk, as an optimization. It must, however, prioritize the faulting address + * over all the others. */ +typedef int (*dev_page_fault_t)(struct vm_area_struct *vma, + unsigned long addr, + const struct page *page, + unsigned int flags, + pmd_t *pmdp); + struct hmm_devmem { struct completion completion; unsigned long pfn_first; @@ -503,6 +526,7 @@ struct hmm_devmem { struct dev_pagemap pagemap; const struct hmm_devmem_ops *ops; struct percpu_ref ref; + dev_page_fault_t page_fault; }; /* @@ -512,8 +536,7 @@ struct hmm_devmem { * enough and allocate struct page for it. * * The device driver can wrap the hmm_devmem struct inside a private device - * driver struct. The device driver must call hmm_devmem_remove() before the - * device goes away and before freeing the hmm_devmem struct memory. + * driver struct. */ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, struct device *device, @@ -521,7 +544,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, struct device *device, struct resource *res); -void hmm_devmem_remove(struct hmm_devmem *devmem); /* * hmm_devmem_page_set_drvdata - set per-page driver data field diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 3892e9c8b2de..2e8957eac4d4 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * include/linux/hrtimer.h - * * hrtimers - High-resolution kernel timers * * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> @@ -9,8 +8,6 @@ * data type definitions, declarations, prototypes * * Started by: Thomas Gleixner and Ingo Molnar - * - * For licencing details see kernel-base/COPYING */ #ifndef _LINUX_HRTIMER_H #define _LINUX_HRTIMER_H diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 4663ee96cf59..381e872bfde0 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -93,7 +93,11 @@ extern bool is_vma_temporary_stack(struct vm_area_struct *vma); extern unsigned long transparent_hugepage_flags; -static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) +/* + * to be used on vmas which are known to support THP. + * Use transparent_hugepage_enabled otherwise + */ +static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) { if (vma->vm_flags & VM_NOHUGEPAGE) return false; @@ -117,6 +121,8 @@ static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) return false; } +bool transparent_hugepage_enabled(struct vm_area_struct *vma); + #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) @@ -257,6 +263,11 @@ static inline bool thp_migration_supported(void) #define hpage_nr_pages(x) 1 +static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) +{ + return false; +} + static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) { return false; diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h index 1c7b89ae6bdc..473897bbd898 100644 --- a/include/linux/hwmon-sysfs.h +++ b/include/linux/hwmon-sysfs.h @@ -33,10 +33,28 @@ struct sensor_device_attribute{ { .dev_attr = __ATTR(_name, _mode, _show, _store), \ .index = _index } +#define SENSOR_ATTR_RO(_name, _func, _index) \ + SENSOR_ATTR(_name, 0444, _func##_show, NULL, _index) + +#define SENSOR_ATTR_RW(_name, _func, _index) \ + SENSOR_ATTR(_name, 0644, _func##_show, _func##_store, _index) + +#define SENSOR_ATTR_WO(_name, _func, _index) \ + SENSOR_ATTR(_name, 0200, NULL, _func##_store, _index) + #define SENSOR_DEVICE_ATTR(_name, _mode, _show, _store, _index) \ struct sensor_device_attribute sensor_dev_attr_##_name \ = SENSOR_ATTR(_name, _mode, _show, _store, _index) +#define SENSOR_DEVICE_ATTR_RO(_name, _func, _index) \ + SENSOR_DEVICE_ATTR(_name, 0444, _func##_show, NULL, _index) + +#define SENSOR_DEVICE_ATTR_RW(_name, _func, _index) \ + SENSOR_DEVICE_ATTR(_name, 0644, _func##_show, _func##_store, _index) + +#define SENSOR_DEVICE_ATTR_WO(_name, _func, _index) \ + SENSOR_DEVICE_ATTR(_name, 0200, NULL, _func##_store, _index) + struct sensor_device_attribute_2 { struct device_attribute dev_attr; u8 index; @@ -50,8 +68,29 @@ struct sensor_device_attribute_2 { .index = _index, \ .nr = _nr } +#define SENSOR_ATTR_2_RO(_name, _func, _nr, _index) \ + SENSOR_ATTR_2(_name, 0444, _func##_show, NULL, _nr, _index) + +#define SENSOR_ATTR_2_RW(_name, _func, _nr, _index) \ + SENSOR_ATTR_2(_name, 0644, _func##_show, _func##_store, _nr, _index) + +#define SENSOR_ATTR_2_WO(_name, _func, _nr, _index) \ + SENSOR_ATTR_2(_name, 0200, NULL, _func##_store, _nr, _index) + #define SENSOR_DEVICE_ATTR_2(_name,_mode,_show,_store,_nr,_index) \ struct sensor_device_attribute_2 sensor_dev_attr_##_name \ = SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index) +#define SENSOR_DEVICE_ATTR_2_RO(_name, _func, _nr, _index) \ + SENSOR_DEVICE_ATTR_2(_name, 0444, _func##_show, NULL, \ + _nr, _index) + +#define SENSOR_DEVICE_ATTR_2_RW(_name, _func, _nr, _index) \ + SENSOR_DEVICE_ATTR_2(_name, 0644, _func##_show, _func##_store, \ + _nr, _index) + +#define SENSOR_DEVICE_ATTR_2_WO(_name, _func, _nr, _index) \ + SENSOR_DEVICE_ATTR_2(_name, 0200, NULL, _func##_store, \ + _nr, _index) + #endif /* _LINUX_HWMON_SYSFS_H */ diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index b3e24368930a..f0885cc01db6 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -831,15 +831,6 @@ struct vmbus_channel { */ struct list_head sc_list; /* - * Current number of sub-channels. - */ - int num_sc; - /* - * Number of a sub-channel (position within sc_list) which is supposed - * to be used as the next outgoing channel. - */ - int next_oc; - /* * The primary channel this sub-channel belongs to. * This will be NULL for the primary channel. */ @@ -905,6 +896,13 @@ struct vmbus_channel { bool probe_done; + /* + * We must offload the handling of the primary/sub channels + * from the single-threaded vmbus_connection.work_queue to + * two different workqueue, otherwise we can block + * vmbus_connection.work_queue and hang: see vmbus_process_offer(). + */ + struct work_struct add_channel_work; }; static inline bool is_hvsock_channel(const struct vmbus_channel *c) @@ -966,14 +964,6 @@ void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel, void (*chn_rescind_cb)(struct vmbus_channel *)); /* - * Retrieve the (sub) channel on which to send an outgoing request. - * When a primary channel has multiple sub-channels, we choose a - * channel whose VCPU binding is closest to the VCPU on which - * this call is being made. - */ -struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary); - -/* * Check if sub-channels have already been offerred. This API will be useful * when the driver is unloaded after establishing sub-channels. In this case, * when the driver is re-loaded, the driver would have to check if the diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h new file mode 100644 index 000000000000..73b0982cc519 --- /dev/null +++ b/include/linux/i3c/ccc.h @@ -0,0 +1,385 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Cadence Design Systems Inc. + * + * Author: Boris Brezillon <boris.brezillon@bootlin.com> + */ + +#ifndef I3C_CCC_H +#define I3C_CCC_H + +#include <linux/bitops.h> +#include <linux/i3c/device.h> + +/* I3C CCC (Common Command Codes) related definitions */ +#define I3C_CCC_DIRECT BIT(7) + +#define I3C_CCC_ID(id, broadcast) \ + ((id) | ((broadcast) ? 0 : I3C_CCC_DIRECT)) + +/* Commands valid in both broadcast and unicast modes */ +#define I3C_CCC_ENEC(broadcast) I3C_CCC_ID(0x0, broadcast) +#define I3C_CCC_DISEC(broadcast) I3C_CCC_ID(0x1, broadcast) +#define I3C_CCC_ENTAS(as, broadcast) I3C_CCC_ID(0x2 + (as), broadcast) +#define I3C_CCC_RSTDAA(broadcast) I3C_CCC_ID(0x6, broadcast) +#define I3C_CCC_SETMWL(broadcast) I3C_CCC_ID(0x9, broadcast) +#define I3C_CCC_SETMRL(broadcast) I3C_CCC_ID(0xa, broadcast) +#define I3C_CCC_SETXTIME(broadcast) ((broadcast) ? 0x28 : 0x98) +#define I3C_CCC_VENDOR(id, broadcast) ((id) + ((broadcast) ? 0x61 : 0xe0)) + +/* Broadcast-only commands */ +#define I3C_CCC_ENTDAA I3C_CCC_ID(0x7, true) +#define I3C_CCC_DEFSLVS I3C_CCC_ID(0x8, true) +#define I3C_CCC_ENTTM I3C_CCC_ID(0xb, true) +#define I3C_CCC_ENTHDR(x) I3C_CCC_ID(0x20 + (x), true) + +/* Unicast-only commands */ +#define I3C_CCC_SETDASA I3C_CCC_ID(0x7, false) +#define I3C_CCC_SETNEWDA I3C_CCC_ID(0x8, false) +#define I3C_CCC_GETMWL I3C_CCC_ID(0xb, false) +#define I3C_CCC_GETMRL I3C_CCC_ID(0xc, false) +#define I3C_CCC_GETPID I3C_CCC_ID(0xd, false) +#define I3C_CCC_GETBCR I3C_CCC_ID(0xe, false) +#define I3C_CCC_GETDCR I3C_CCC_ID(0xf, false) +#define I3C_CCC_GETSTATUS I3C_CCC_ID(0x10, false) +#define I3C_CCC_GETACCMST I3C_CCC_ID(0x11, false) +#define I3C_CCC_SETBRGTGT I3C_CCC_ID(0x13, false) +#define I3C_CCC_GETMXDS I3C_CCC_ID(0x14, false) +#define I3C_CCC_GETHDRCAP I3C_CCC_ID(0x15, false) +#define I3C_CCC_GETXTIME I3C_CCC_ID(0x19, false) + +#define I3C_CCC_EVENT_SIR BIT(0) +#define I3C_CCC_EVENT_MR BIT(1) +#define I3C_CCC_EVENT_HJ BIT(3) + +/** + * struct i3c_ccc_events - payload passed to ENEC/DISEC CCC + * + * @events: bitmask of I3C_CCC_EVENT_xxx events. + * + * Depending on the CCC command, the specific events coming from all devices + * (broadcast version) or a specific device (unicast version) will be + * enabled (ENEC) or disabled (DISEC). + */ +struct i3c_ccc_events { + u8 events; +}; + +/** + * struct i3c_ccc_mwl - payload passed to SETMWL/GETMWL CCC + * + * @len: maximum write length in bytes + * + * The maximum write length is only applicable to SDR private messages or + * extended Write CCCs (like SETXTIME). + */ +struct i3c_ccc_mwl { + __be16 len; +}; + +/** + * struct i3c_ccc_mrl - payload passed to SETMRL/GETMRL CCC + * + * @len: maximum read length in bytes + * @ibi_len: maximum IBI payload length + * + * The maximum read length is only applicable to SDR private messages or + * extended Read CCCs (like GETXTIME). + * The IBI length is only valid if the I3C slave is IBI capable + * (%I3C_BCR_IBI_REQ_CAP is set). + */ +struct i3c_ccc_mrl { + __be16 read_len; + u8 ibi_len; +} __packed; + +/** + * struct i3c_ccc_dev_desc - I3C/I2C device descriptor used for DEFSLVS + * + * @dyn_addr: dynamic address assigned to the I3C slave or 0 if the entry is + * describing an I2C slave. + * @dcr: DCR value (not applicable to entries describing I2C devices) + * @lvr: LVR value (not applicable to entries describing I3C devices) + * @bcr: BCR value or 0 if this entry is describing an I2C slave + * @static_addr: static address or 0 if the device does not have a static + * address + * + * The DEFSLVS command should be passed an array of i3c_ccc_dev_desc + * descriptors (one entry per I3C/I2C dev controlled by the master). + */ +struct i3c_ccc_dev_desc { + u8 dyn_addr; + union { + u8 dcr; + u8 lvr; + }; + u8 bcr; + u8 static_addr; +}; + +/** + * struct i3c_ccc_defslvs - payload passed to DEFSLVS CCC + * + * @count: number of dev descriptors + * @master: descriptor describing the current master + * @slaves: array of descriptors describing slaves controlled by the + * current master + * + * Information passed to the broadcast DEFSLVS to propagate device + * information to all masters currently acting as slaves on the bus. + * This is only meaningful if you have more than one master. + */ +struct i3c_ccc_defslvs { + u8 count; + struct i3c_ccc_dev_desc master; + struct i3c_ccc_dev_desc slaves[0]; +} __packed; + +/** + * enum i3c_ccc_test_mode - enum listing all available test modes + * + * @I3C_CCC_EXIT_TEST_MODE: exit test mode + * @I3C_CCC_VENDOR_TEST_MODE: enter vendor test mode + */ +enum i3c_ccc_test_mode { + I3C_CCC_EXIT_TEST_MODE, + I3C_CCC_VENDOR_TEST_MODE, +}; + +/** + * struct i3c_ccc_enttm - payload passed to ENTTM CCC + * + * @mode: one of the &enum i3c_ccc_test_mode modes + * + * Information passed to the ENTTM CCC to instruct an I3C device to enter a + * specific test mode. + */ +struct i3c_ccc_enttm { + u8 mode; +}; + +/** + * struct i3c_ccc_setda - payload passed to SETNEWDA and SETDASA CCCs + * + * @addr: dynamic address to assign to an I3C device + * + * Information passed to the SETNEWDA and SETDASA CCCs to assign/change the + * dynamic address of an I3C device. + */ +struct i3c_ccc_setda { + u8 addr; +}; + +/** + * struct i3c_ccc_getpid - payload passed to GETPID CCC + * + * @pid: 48 bits PID in big endian + */ +struct i3c_ccc_getpid { + u8 pid[6]; +}; + +/** + * struct i3c_ccc_getbcr - payload passed to GETBCR CCC + * + * @bcr: BCR (Bus Characteristic Register) value + */ +struct i3c_ccc_getbcr { + u8 bcr; +}; + +/** + * struct i3c_ccc_getdcr - payload passed to GETDCR CCC + * + * @dcr: DCR (Device Characteristic Register) value + */ +struct i3c_ccc_getdcr { + u8 dcr; +}; + +#define I3C_CCC_STATUS_PENDING_INT(status) ((status) & GENMASK(3, 0)) +#define I3C_CCC_STATUS_PROTOCOL_ERROR BIT(5) +#define I3C_CCC_STATUS_ACTIVITY_MODE(status) \ + (((status) & GENMASK(7, 6)) >> 6) + +/** + * struct i3c_ccc_getstatus - payload passed to GETSTATUS CCC + * + * @status: status of the I3C slave (see I3C_CCC_STATUS_xxx macros for more + * information). + */ +struct i3c_ccc_getstatus { + __be16 status; +}; + +/** + * struct i3c_ccc_getaccmst - payload passed to GETACCMST CCC + * + * @newmaster: address of the master taking bus ownership + */ +struct i3c_ccc_getaccmst { + u8 newmaster; +}; + +/** + * struct i3c_ccc_bridged_slave_desc - bridged slave descriptor + * + * @addr: dynamic address of the bridged device + * @id: ID of the slave device behind the bridge + */ +struct i3c_ccc_bridged_slave_desc { + u8 addr; + __be16 id; +} __packed; + +/** + * struct i3c_ccc_setbrgtgt - payload passed to SETBRGTGT CCC + * + * @count: number of bridged slaves + * @bslaves: bridged slave descriptors + */ +struct i3c_ccc_setbrgtgt { + u8 count; + struct i3c_ccc_bridged_slave_desc bslaves[0]; +} __packed; + +/** + * enum i3c_sdr_max_data_rate - max data rate values for private SDR transfers + */ +enum i3c_sdr_max_data_rate { + I3C_SDR0_FSCL_MAX, + I3C_SDR1_FSCL_8MHZ, + I3C_SDR2_FSCL_6MHZ, + I3C_SDR3_FSCL_4MHZ, + I3C_SDR4_FSCL_2MHZ, +}; + +/** + * enum i3c_tsco - clock to data turn-around + */ +enum i3c_tsco { + I3C_TSCO_8NS, + I3C_TSCO_9NS, + I3C_TSCO_10NS, + I3C_TSCO_11NS, + I3C_TSCO_12NS, +}; + +#define I3C_CCC_MAX_SDR_FSCL_MASK GENMASK(2, 0) +#define I3C_CCC_MAX_SDR_FSCL(x) ((x) & I3C_CCC_MAX_SDR_FSCL_MASK) + +/** + * struct i3c_ccc_getmxds - payload passed to GETMXDS CCC + * + * @maxwr: write limitations + * @maxrd: read limitations + * @maxrdturn: maximum read turn-around expressed micro-seconds and + * little-endian formatted + */ +struct i3c_ccc_getmxds { + u8 maxwr; + u8 maxrd; + u8 maxrdturn[3]; +} __packed; + +#define I3C_CCC_HDR_MODE(mode) BIT(mode) + +/** + * struct i3c_ccc_gethdrcap - payload passed to GETHDRCAP CCC + * + * @modes: bitmap of supported HDR modes + */ +struct i3c_ccc_gethdrcap { + u8 modes; +} __packed; + +/** + * enum i3c_ccc_setxtime_subcmd - SETXTIME sub-commands + */ +enum i3c_ccc_setxtime_subcmd { + I3C_CCC_SETXTIME_ST = 0x7f, + I3C_CCC_SETXTIME_DT = 0xbf, + I3C_CCC_SETXTIME_ENTER_ASYNC_MODE0 = 0xdf, + I3C_CCC_SETXTIME_ENTER_ASYNC_MODE1 = 0xef, + I3C_CCC_SETXTIME_ENTER_ASYNC_MODE2 = 0xf7, + I3C_CCC_SETXTIME_ENTER_ASYNC_MODE3 = 0xfb, + I3C_CCC_SETXTIME_ASYNC_TRIGGER = 0xfd, + I3C_CCC_SETXTIME_TPH = 0x3f, + I3C_CCC_SETXTIME_TU = 0x9f, + I3C_CCC_SETXTIME_ODR = 0x8f, +}; + +/** + * struct i3c_ccc_setxtime - payload passed to SETXTIME CCC + * + * @subcmd: one of the sub-commands ddefined in &enum i3c_ccc_setxtime_subcmd + * @data: sub-command payload. Amount of data is determined by + * &i3c_ccc_setxtime->subcmd + */ +struct i3c_ccc_setxtime { + u8 subcmd; + u8 data[0]; +} __packed; + +#define I3C_CCC_GETXTIME_SYNC_MODE BIT(0) +#define I3C_CCC_GETXTIME_ASYNC_MODE(x) BIT((x) + 1) +#define I3C_CCC_GETXTIME_OVERFLOW BIT(7) + +/** + * struct i3c_ccc_getxtime - payload retrieved from GETXTIME CCC + * + * @supported_modes: bitmap describing supported XTIME modes + * @state: current status (enabled mode and overflow status) + * @frequency: slave's internal oscillator frequency in 500KHz steps + * @inaccuracy: slave's internal oscillator inaccuracy in 0.1% steps + */ +struct i3c_ccc_getxtime { + u8 supported_modes; + u8 state; + u8 frequency; + u8 inaccuracy; +} __packed; + +/** + * struct i3c_ccc_cmd_payload - CCC payload + * + * @len: payload length + * @data: payload data. This buffer must be DMA-able + */ +struct i3c_ccc_cmd_payload { + u16 len; + void *data; +}; + +/** + * struct i3c_ccc_cmd_dest - CCC command destination + * + * @addr: can be an I3C device address or the broadcast address if this is a + * broadcast CCC + * @payload: payload to be sent to this device or broadcasted + */ +struct i3c_ccc_cmd_dest { + u8 addr; + struct i3c_ccc_cmd_payload payload; +}; + +/** + * struct i3c_ccc_cmd - CCC command + * + * @rnw: true if the CCC should retrieve data from the device. Only valid for + * unicast commands + * @id: CCC command id + * @ndests: number of destinations. Should always be one for broadcast commands + * @dests: array of destinations and associated payload for this CCC. Most of + * the time, only one destination is provided + * @err: I3C error code + */ +struct i3c_ccc_cmd { + u8 rnw; + u8 id; + unsigned int ndests; + struct i3c_ccc_cmd_dest *dests; + enum i3c_error_code err; +}; + +#endif /* I3C_CCC_H */ diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h new file mode 100644 index 000000000000..5ecb055fd375 --- /dev/null +++ b/include/linux/i3c/device.h @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Cadence Design Systems Inc. + * + * Author: Boris Brezillon <boris.brezillon@bootlin.com> + */ + +#ifndef I3C_DEV_H +#define I3C_DEV_H + +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/i2c.h> +#include <linux/kconfig.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> + +/** + * enum i3c_error_code - I3C error codes + * + * These are the standard error codes as defined by the I3C specification. + * When -EIO is returned by the i3c_device_do_priv_xfers() or + * i3c_device_send_hdr_cmds() one can check the error code in + * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of + * what went wrong. + * + * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C + * related + * @I3C_ERROR_M0: M0 error + * @I3C_ERROR_M1: M1 error + * @I3C_ERROR_M2: M2 error + */ +enum i3c_error_code { + I3C_ERROR_UNKNOWN = 0, + I3C_ERROR_M0 = 1, + I3C_ERROR_M1, + I3C_ERROR_M2, +}; + +/** + * enum i3c_hdr_mode - HDR mode ids + * @I3C_HDR_DDR: DDR mode + * @I3C_HDR_TSP: TSP mode + * @I3C_HDR_TSL: TSL mode + */ +enum i3c_hdr_mode { + I3C_HDR_DDR, + I3C_HDR_TSP, + I3C_HDR_TSL, +}; + +/** + * struct i3c_priv_xfer - I3C SDR private transfer + * @rnw: encodes the transfer direction. true for a read, false for a write + * @len: transfer length in bytes of the transfer + * @data: input/output buffer + * @data.in: input buffer. Must point to a DMA-able buffer + * @data.out: output buffer. Must point to a DMA-able buffer + * @err: I3C error code + */ +struct i3c_priv_xfer { + u8 rnw; + u16 len; + union { + void *in; + const void *out; + } data; + enum i3c_error_code err; +}; + +/** + * enum i3c_dcr - I3C DCR values + * @I3C_DCR_GENERIC_DEVICE: generic I3C device + */ +enum i3c_dcr { + I3C_DCR_GENERIC_DEVICE = 0, +}; + +#define I3C_PID_MANUF_ID(pid) (((pid) & GENMASK_ULL(47, 33)) >> 33) +#define I3C_PID_RND_LOWER_32BITS(pid) (!!((pid) & BIT_ULL(32))) +#define I3C_PID_RND_VAL(pid) ((pid) & GENMASK_ULL(31, 0)) +#define I3C_PID_PART_ID(pid) (((pid) & GENMASK_ULL(31, 16)) >> 16) +#define I3C_PID_INSTANCE_ID(pid) (((pid) & GENMASK_ULL(15, 12)) >> 12) +#define I3C_PID_EXTRA_INFO(pid) ((pid) & GENMASK_ULL(11, 0)) + +#define I3C_BCR_DEVICE_ROLE(bcr) ((bcr) & GENMASK(7, 6)) +#define I3C_BCR_I3C_SLAVE (0 << 6) +#define I3C_BCR_I3C_MASTER (1 << 6) +#define I3C_BCR_HDR_CAP BIT(5) +#define I3C_BCR_BRIDGE BIT(4) +#define I3C_BCR_OFFLINE_CAP BIT(3) +#define I3C_BCR_IBI_PAYLOAD BIT(2) +#define I3C_BCR_IBI_REQ_CAP BIT(1) +#define I3C_BCR_MAX_DATA_SPEED_LIM BIT(0) + +/** + * struct i3c_device_info - I3C device information + * @pid: Provisional ID + * @bcr: Bus Characteristic Register + * @dcr: Device Characteristic Register + * @static_addr: static/I2C address + * @dyn_addr: dynamic address + * @hdr_cap: supported HDR modes + * @max_read_ds: max read speed information + * @max_write_ds: max write speed information + * @max_ibi_len: max IBI payload length + * @max_read_turnaround: max read turn-around time in micro-seconds + * @max_read_len: max private SDR read length in bytes + * @max_write_len: max private SDR write length in bytes + * + * These are all basic information that should be advertised by an I3C device. + * Some of them are optional depending on the device type and device + * capabilities. + * For each I3C slave attached to a master with + * i3c_master_add_i3c_dev_locked(), the core will send the relevant CCC command + * to retrieve these data. + */ +struct i3c_device_info { + u64 pid; + u8 bcr; + u8 dcr; + u8 static_addr; + u8 dyn_addr; + u8 hdr_cap; + u8 max_read_ds; + u8 max_write_ds; + u8 max_ibi_len; + u32 max_read_turnaround; + u16 max_read_len; + u16 max_write_len; +}; + +/* + * I3C device internals are kept hidden from I3C device users. It's just + * simpler to refactor things when everything goes through getter/setters, and + * I3C device drivers should not have to worry about internal representation + * anyway. + */ +struct i3c_device; + +/* These macros should be used to i3c_device_id entries. */ +#define I3C_MATCH_MANUF_AND_PART (I3C_MATCH_MANUF | I3C_MATCH_PART) + +#define I3C_DEVICE(_manufid, _partid, _drvdata) \ + { \ + .match_flags = I3C_MATCH_MANUF_AND_PART, \ + .manuf_id = _manufid, \ + .part_id = _partid, \ + .data = _drvdata, \ + } + +#define I3C_DEVICE_EXTRA_INFO(_manufid, _partid, _info, _drvdata) \ + { \ + .match_flags = I3C_MATCH_MANUF_AND_PART | \ + I3C_MATCH_EXTRA_INFO, \ + .manuf_id = _manufid, \ + .part_id = _partid, \ + .extra_info = _info, \ + .data = _drvdata, \ + } + +#define I3C_CLASS(_dcr, _drvdata) \ + { \ + .match_flags = I3C_MATCH_DCR, \ + .dcr = _dcr, \ + } + +/** + * struct i3c_driver - I3C device driver + * @driver: inherit from device_driver + * @probe: I3C device probe method + * @remove: I3C device remove method + * @id_table: I3C device match table. Will be used by the framework to decide + * which device to bind to this driver + */ +struct i3c_driver { + struct device_driver driver; + int (*probe)(struct i3c_device *dev); + int (*remove)(struct i3c_device *dev); + const struct i3c_device_id *id_table; +}; + +static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv) +{ + return container_of(drv, struct i3c_driver, driver); +} + +struct device *i3cdev_to_dev(struct i3c_device *i3cdev); +struct i3c_device *dev_to_i3cdev(struct device *dev); + +static inline void i3cdev_set_drvdata(struct i3c_device *i3cdev, + void *data) +{ + struct device *dev = i3cdev_to_dev(i3cdev); + + dev_set_drvdata(dev, data); +} + +static inline void *i3cdev_get_drvdata(struct i3c_device *i3cdev) +{ + struct device *dev = i3cdev_to_dev(i3cdev); + + return dev_get_drvdata(dev); +} + +int i3c_driver_register_with_owner(struct i3c_driver *drv, + struct module *owner); +void i3c_driver_unregister(struct i3c_driver *drv); + +#define i3c_driver_register(__drv) \ + i3c_driver_register_with_owner(__drv, THIS_MODULE) + +/** + * module_i3c_driver() - Register a module providing an I3C driver + * @__drv: the I3C driver to register + * + * Provide generic init/exit functions that simply register/unregister an I3C + * driver. + * Should be used by any driver that does not require extra init/cleanup steps. + */ +#define module_i3c_driver(__drv) \ + module_driver(__drv, i3c_driver_register, i3c_driver_unregister) + +/** + * i3c_i2c_driver_register() - Register an i2c and an i3c driver + * @i3cdrv: the I3C driver to register + * @i2cdrv: the I2C driver to register + * + * This function registers both @i2cdev and @i3cdev, and fails if one of these + * registrations fails. This is mainly useful for devices that support both I2C + * and I3C modes. + * Note that when CONFIG_I3C is not enabled, this function only registers the + * I2C driver. + * + * Return: 0 if both registrations succeeds, a negative error code otherwise. + */ +static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv, + struct i2c_driver *i2cdrv) +{ + int ret; + + ret = i2c_add_driver(i2cdrv); + if (ret || !IS_ENABLED(CONFIG_I3C)) + return ret; + + ret = i3c_driver_register(i3cdrv); + if (ret) + i2c_del_driver(i2cdrv); + + return ret; +} + +/** + * i3c_i2c_driver_unregister() - Unregister an i2c and an i3c driver + * @i3cdrv: the I3C driver to register + * @i2cdrv: the I2C driver to register + * + * This function unregisters both @i3cdrv and @i2cdrv. + * Note that when CONFIG_I3C is not enabled, this function only unregisters the + * @i2cdrv. + */ +static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv, + struct i2c_driver *i2cdrv) +{ + if (IS_ENABLED(CONFIG_I3C)) + i3c_driver_unregister(i3cdrv); + + i2c_del_driver(i2cdrv); +} + +/** + * module_i3c_i2c_driver() - Register a module providing an I3C and an I2C + * driver + * @__i3cdrv: the I3C driver to register + * @__i2cdrv: the I3C driver to register + * + * Provide generic init/exit functions that simply register/unregister an I3C + * and an I2C driver. + * This macro can be used even if CONFIG_I3C is disabled, in this case, only + * the I2C driver will be registered. + * Should be used by any driver that does not require extra init/cleanup steps. + */ +#define module_i3c_i2c_driver(__i3cdrv, __i2cdrv) \ + module_driver(__i3cdrv, \ + i3c_i2c_driver_register, \ + i3c_i2c_driver_unregister) + +int i3c_device_do_priv_xfers(struct i3c_device *dev, + struct i3c_priv_xfer *xfers, + int nxfers); + +void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info); + +struct i3c_ibi_payload { + unsigned int len; + const void *data; +}; + +/** + * struct i3c_ibi_setup - IBI setup object + * @max_payload_len: maximum length of the payload associated to an IBI. If one + * IBI appears to have a payload that is bigger than this + * number, the IBI will be rejected. + * @num_slots: number of pre-allocated IBI slots. This should be chosen so that + * the system never runs out of IBI slots, otherwise you'll lose + * IBIs. + * @handler: IBI handler, every time an IBI is received. This handler is called + * in a workqueue context. It is allowed to sleep and send new + * messages on the bus, though it's recommended to keep the + * processing done there as fast as possible to avoid delaying + * processing of other queued on the same workqueue. + * + * Temporary structure used to pass information to i3c_device_request_ibi(). + * This object can be allocated on the stack since i3c_device_request_ibi() + * copies every bit of information and do not use it after + * i3c_device_request_ibi() has returned. + */ +struct i3c_ibi_setup { + unsigned int max_payload_len; + unsigned int num_slots; + void (*handler)(struct i3c_device *dev, + const struct i3c_ibi_payload *payload); +}; + +int i3c_device_request_ibi(struct i3c_device *dev, + const struct i3c_ibi_setup *setup); +void i3c_device_free_ibi(struct i3c_device *dev); +int i3c_device_enable_ibi(struct i3c_device *dev); +int i3c_device_disable_ibi(struct i3c_device *dev); + +#endif /* I3C_DEV_H */ diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h new file mode 100644 index 000000000000..f13fd8b1dd79 --- /dev/null +++ b/include/linux/i3c/master.h @@ -0,0 +1,648 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Cadence Design Systems Inc. + * + * Author: Boris Brezillon <boris.brezillon@bootlin.com> + */ + +#ifndef I3C_MASTER_H +#define I3C_MASTER_H + +#include <asm/bitsperlong.h> + +#include <linux/bitops.h> +#include <linux/i2c.h> +#include <linux/i3c/ccc.h> +#include <linux/i3c/device.h> +#include <linux/rwsem.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#define I3C_HOT_JOIN_ADDR 0x2 +#define I3C_BROADCAST_ADDR 0x7e +#define I3C_MAX_ADDR GENMASK(6, 0) + +struct i3c_master_controller; +struct i3c_bus; +struct i2c_device; +struct i3c_device; + +/** + * struct i3c_i2c_dev_desc - Common part of the I3C/I2C device descriptor + * @node: node element used to insert the slot into the I2C or I3C device + * list + * @master: I3C master that instantiated this device. Will be used to do + * I2C/I3C transfers + * @master_priv: master private data assigned to the device. Can be used to + * add master specific information + * + * This structure is describing common I3C/I2C dev information. + */ +struct i3c_i2c_dev_desc { + struct list_head node; + struct i3c_master_controller *master; + void *master_priv; +}; + +#define I3C_LVR_I2C_INDEX_MASK GENMASK(7, 5) +#define I3C_LVR_I2C_INDEX(x) ((x) << 5) +#define I3C_LVR_I2C_FM_MODE BIT(4) + +#define I2C_MAX_ADDR GENMASK(9, 0) + +/** + * struct i2c_dev_boardinfo - I2C device board information + * @node: used to insert the boardinfo object in the I2C boardinfo list + * @base: regular I2C board information + * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about + * the I2C device limitations + * + * This structure is used to attach board-level information to an I2C device. + * Each I2C device connected on the I3C bus should have one. + */ +struct i2c_dev_boardinfo { + struct list_head node; + struct i2c_board_info base; + u8 lvr; +}; + +/** + * struct i2c_dev_desc - I2C device descriptor + * @common: common part of the I2C device descriptor + * @boardinfo: pointer to the boardinfo attached to this I2C device + * @dev: I2C device object registered to the I2C framework + * + * Each I2C device connected on the bus will have an i2c_dev_desc. + * This object is created by the core and later attached to the controller + * using &struct_i3c_master_controller->ops->attach_i2c_dev(). + * + * &struct_i2c_dev_desc is the internal representation of an I2C device + * connected on an I3C bus. This object is also passed to all + * &struct_i3c_master_controller_ops hooks. + */ +struct i2c_dev_desc { + struct i3c_i2c_dev_desc common; + const struct i2c_dev_boardinfo *boardinfo; + struct i2c_client *dev; +}; + +/** + * struct i3c_ibi_slot - I3C IBI (In-Band Interrupt) slot + * @work: work associated to this slot. The IBI handler will be called from + * there + * @dev: the I3C device that has generated this IBI + * @len: length of the payload associated to this IBI + * @data: payload buffer + * + * An IBI slot is an object pre-allocated by the controller and used when an + * IBI comes in. + * Every time an IBI comes in, the I3C master driver should find a free IBI + * slot in its IBI slot pool, retrieve the IBI payload and queue the IBI using + * i3c_master_queue_ibi(). + * + * How IBI slots are allocated is left to the I3C master driver, though, for + * simple kmalloc-based allocation, the generic IBI slot pool can be used. + */ +struct i3c_ibi_slot { + struct work_struct work; + struct i3c_dev_desc *dev; + unsigned int len; + void *data; +}; + +/** + * struct i3c_device_ibi_info - IBI information attached to a specific device + * @all_ibis_handled: used to be informed when no more IBIs are waiting to be + * processed. Used by i3c_device_disable_ibi() to wait for + * all IBIs to be dequeued + * @pending_ibis: count the number of pending IBIs. Each pending IBI has its + * work element queued to the controller workqueue + * @max_payload_len: maximum payload length for an IBI coming from this device. + * this value is specified when calling + * i3c_device_request_ibi() and should not change at run + * time. All messages IBIs exceeding this limit should be + * rejected by the master + * @num_slots: number of IBI slots reserved for this device + * @enabled: reflect the IBI status + * @handler: IBI handler specified at i3c_device_request_ibi() call time. This + * handler will be called from the controller workqueue, and as such + * is allowed to sleep (though it is recommended to process the IBI + * as fast as possible to not stall processing of other IBIs queued + * on the same workqueue). + * New I3C messages can be sent from the IBI handler + * + * The &struct_i3c_device_ibi_info object is allocated when + * i3c_device_request_ibi() is called and attached to a specific device. This + * object is here to manage IBIs coming from a specific I3C device. + * + * Note that this structure is the generic view of the IBI management + * infrastructure. I3C master drivers may have their own internal + * representation which they can associate to the device using + * controller-private data. + */ +struct i3c_device_ibi_info { + struct completion all_ibis_handled; + atomic_t pending_ibis; + unsigned int max_payload_len; + unsigned int num_slots; + unsigned int enabled; + void (*handler)(struct i3c_device *dev, + const struct i3c_ibi_payload *payload); +}; + +/** + * struct i3c_dev_boardinfo - I3C device board information + * @node: used to insert the boardinfo object in the I3C boardinfo list + * @init_dyn_addr: initial dynamic address requested by the FW. We provide no + * guarantee that the device will end up using this address, + * but try our best to assign this specific address to the + * device + * @static_addr: static address the I3C device listen on before it's been + * assigned a dynamic address by the master. Will be used during + * bus initialization to assign it a specific dynamic address + * before starting DAA (Dynamic Address Assignment) + * @pid: I3C Provisional ID exposed by the device. This is a unique identifier + * that may be used to attach boardinfo to i3c_dev_desc when the device + * does not have a static address + * @of_node: optional DT node in case the device has been described in the DT + * + * This structure is used to attach board-level information to an I3C device. + * Not all I3C devices connected on the bus will have a boardinfo. It's only + * needed if you want to attach extra resources to a device or assign it a + * specific dynamic address. + */ +struct i3c_dev_boardinfo { + struct list_head node; + u8 init_dyn_addr; + u8 static_addr; + u64 pid; + struct device_node *of_node; +}; + +/** + * struct i3c_dev_desc - I3C device descriptor + * @common: common part of the I3C device descriptor + * @info: I3C device information. Will be automatically filled when you create + * your device with i3c_master_add_i3c_dev_locked() + * @ibi_lock: lock used to protect the &struct_i3c_device->ibi + * @ibi: IBI info attached to a device. Should be NULL until + * i3c_device_request_ibi() is called + * @dev: pointer to the I3C device object exposed to I3C device drivers. This + * should never be accessed from I3C master controller drivers. Only core + * code should manipulate it in when updating the dev <-> desc link or + * when propagating IBI events to the driver + * @boardinfo: pointer to the boardinfo attached to this I3C device + * + * Internal representation of an I3C device. This object is only used by the + * core and passed to I3C master controller drivers when they're requested to + * do some operations on the device. + * The core maintains the link between the internal I3C dev descriptor and the + * object exposed to the I3C device drivers (&struct_i3c_device). + */ +struct i3c_dev_desc { + struct i3c_i2c_dev_desc common; + struct i3c_device_info info; + struct mutex ibi_lock; + struct i3c_device_ibi_info *ibi; + struct i3c_device *dev; + const struct i3c_dev_boardinfo *boardinfo; +}; + +/** + * struct i3c_device - I3C device object + * @dev: device object to register the I3C dev to the device model + * @desc: pointer to an i3c device descriptor object. This link is updated + * every time the I3C device is rediscovered with a different dynamic + * address assigned + * @bus: I3C bus this device is attached to + * + * I3C device object exposed to I3C device drivers. The takes care of linking + * this object to the relevant &struct_i3c_dev_desc one. + * All I3C devs on the I3C bus are represented, including I3C masters. For each + * of them, we have an instance of &struct i3c_device. + */ +struct i3c_device { + struct device dev; + struct i3c_dev_desc *desc; + struct i3c_bus *bus; +}; + +/* + * The I3C specification says the maximum number of devices connected on the + * bus is 11, but this number depends on external parameters like trace length, + * capacitive load per Device, and the types of Devices present on the Bus. + * I3C master can also have limitations, so this number is just here as a + * reference and should be adjusted on a per-controller/per-board basis. + */ +#define I3C_BUS_MAX_DEVS 11 + +#define I3C_BUS_MAX_I3C_SCL_RATE 12900000 +#define I3C_BUS_TYP_I3C_SCL_RATE 12500000 +#define I3C_BUS_I2C_FM_PLUS_SCL_RATE 1000000 +#define I3C_BUS_I2C_FM_SCL_RATE 400000 +#define I3C_BUS_TLOW_OD_MIN_NS 200 + +/** + * enum i3c_bus_mode - I3C bus mode + * @I3C_BUS_MODE_PURE: only I3C devices are connected to the bus. No limitation + * expected + * @I3C_BUS_MODE_MIXED_FAST: I2C devices with 50ns spike filter are present on + * the bus. The only impact in this mode is that the + * high SCL pulse has to stay below 50ns to trick I2C + * devices when transmitting I3C frames + * @I3C_BUS_MODE_MIXED_SLOW: I2C devices without 50ns spike filter are present + * on the bus + */ +enum i3c_bus_mode { + I3C_BUS_MODE_PURE, + I3C_BUS_MODE_MIXED_FAST, + I3C_BUS_MODE_MIXED_SLOW, +}; + +/** + * enum i3c_addr_slot_status - I3C address slot status + * @I3C_ADDR_SLOT_FREE: address is free + * @I3C_ADDR_SLOT_RSVD: address is reserved + * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device + * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device + * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask + * + * On an I3C bus, addresses are assigned dynamically, and we need to know which + * addresses are free to use and which ones are already assigned. + * + * Addresses marked as reserved are those reserved by the I3C protocol + * (broadcast address, ...). + */ +enum i3c_addr_slot_status { + I3C_ADDR_SLOT_FREE, + I3C_ADDR_SLOT_RSVD, + I3C_ADDR_SLOT_I2C_DEV, + I3C_ADDR_SLOT_I3C_DEV, + I3C_ADDR_SLOT_STATUS_MASK = 3, +}; + +/** + * struct i3c_bus - I3C bus object + * @cur_master: I3C master currently driving the bus. Since I3C is multi-master + * this can change over the time. Will be used to let a master + * know whether it needs to request bus ownership before sending + * a frame or not + * @id: bus ID. Assigned by the framework when register the bus + * @addrslots: a bitmap with 2-bits per-slot to encode the address status and + * ease the DAA (Dynamic Address Assignment) procedure (see + * &enum i3c_addr_slot_status) + * @mode: bus mode (see &enum i3c_bus_mode) + * @scl_rate.i3c: maximum rate for the clock signal when doing I3C SDR/priv + * transfers + * @scl_rate.i2c: maximum rate for the clock signal when doing I2C transfers + * @scl_rate: SCL signal rate for I3C and I2C mode + * @devs.i3c: contains a list of I3C device descriptors representing I3C + * devices connected on the bus and successfully attached to the + * I3C master + * @devs.i2c: contains a list of I2C device descriptors representing I2C + * devices connected on the bus and successfully attached to the + * I3C master + * @devs: 2 lists containing all I3C/I2C devices connected to the bus + * @lock: read/write lock on the bus. This is needed to protect against + * operations that have an impact on the whole bus and the devices + * connected to it. For example, when asking slaves to drop their + * dynamic address (RSTDAA CCC), we need to make sure no one is trying + * to send I3C frames to these devices. + * Note that this lock does not protect against concurrency between + * devices: several drivers can send different I3C/I2C frames through + * the same master in parallel. This is the responsibility of the + * master to guarantee that frames are actually sent sequentially and + * not interlaced + * + * The I3C bus is represented with its own object and not implicitly described + * by the I3C master to cope with the multi-master functionality, where one bus + * can be shared amongst several masters, each of them requesting bus ownership + * when they need to. + */ +struct i3c_bus { + struct i3c_dev_desc *cur_master; + int id; + unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG]; + enum i3c_bus_mode mode; + struct { + unsigned long i3c; + unsigned long i2c; + } scl_rate; + struct { + struct list_head i3c; + struct list_head i2c; + } devs; + struct rw_semaphore lock; +}; + +/** + * struct i3c_master_controller_ops - I3C master methods + * @bus_init: hook responsible for the I3C bus initialization. You should at + * least call master_set_info() from there and set the bus mode. + * You can also put controller specific initialization in there. + * This method is mandatory. + * @bus_cleanup: cleanup everything done in + * &i3c_master_controller_ops->bus_init(). + * This method is optional. + * @attach_i3c_dev: called every time an I3C device is attached to the bus. It + * can be after a DAA or when a device is statically declared + * by the FW, in which case it will only have a static address + * and the dynamic address will be 0. + * When this function is called, device information have not + * been retrieved yet. + * This is a good place to attach master controller specific + * data to I3C devices. + * This method is optional. + * @reattach_i3c_dev: called every time an I3C device has its addressed + * changed. It can be because the device has been powered + * down and has lost its address, or it can happen when a + * device had a static address and has been assigned a + * dynamic address with SETDASA. + * This method is optional. + * @detach_i3c_dev: called when an I3C device is detached from the bus. Usually + * happens when the master device is unregistered. + * This method is optional. + * @do_daa: do a DAA (Dynamic Address Assignment) procedure. This is procedure + * should send an ENTDAA CCC command and then add all devices + * discovered sure the DAA using i3c_master_add_i3c_dev_locked(). + * Add devices added with i3c_master_add_i3c_dev_locked() will then be + * attached or re-attached to the controller. + * This method is mandatory. + * @supports_ccc_cmd: should return true if the CCC command is supported, false + * otherwise. + * This method is optional, if not provided the core assumes + * all CCC commands are supported. + * @send_ccc_cmd: send a CCC command + * This method is mandatory. + * @priv_xfers: do one or several private I3C SDR transfers + * This method is mandatory. + * @attach_i2c_dev: called every time an I2C device is attached to the bus. + * This is a good place to attach master controller specific + * data to I2C devices. + * This method is optional. + * @detach_i2c_dev: called when an I2C device is detached from the bus. Usually + * happens when the master device is unregistered. + * This method is optional. + * @i2c_xfers: do one or several I2C transfers. Note that, unlike i3c + * transfers, the core does not guarantee that buffers attached to + * the transfers are DMA-safe. If drivers want to have DMA-safe + * buffers, they should use the i2c_get_dma_safe_msg_buf() + * and i2c_put_dma_safe_msg_buf() helpers provided by the I2C + * framework. + * This method is mandatory. + * @i2c_funcs: expose the supported I2C functionalities. + * This method is mandatory. + * @request_ibi: attach an IBI handler to an I3C device. This implies defining + * an IBI handler and the constraints of the IBI (maximum payload + * length and number of pre-allocated slots). + * Some controllers support less IBI-capable devices than regular + * devices, so this method might return -%EBUSY if there's no + * more space for an extra IBI registration + * This method is optional. + * @free_ibi: free an IBI previously requested with ->request_ibi(). The IBI + * should have been disabled with ->disable_irq() prior to that + * This method is mandatory only if ->request_ibi is not NULL. + * @enable_ibi: enable the IBI. Only valid if ->request_ibi() has been called + * prior to ->enable_ibi(). The controller should first enable + * the IBI on the controller end (for example, unmask the hardware + * IRQ) and then send the ENEC CCC command (with the IBI flag set) + * to the I3C device. + * This method is mandatory only if ->request_ibi is not NULL. + * @disable_ibi: disable an IBI. First send the DISEC CCC command with the IBI + * flag set and then deactivate the hardware IRQ on the + * controller end. + * This method is mandatory only if ->request_ibi is not NULL. + * @recycle_ibi_slot: recycle an IBI slot. Called every time an IBI has been + * processed by its handler. The IBI slot should be put back + * in the IBI slot pool so that the controller can re-use it + * for a future IBI + * This method is mandatory only if ->request_ibi is not + * NULL. + */ +struct i3c_master_controller_ops { + int (*bus_init)(struct i3c_master_controller *master); + void (*bus_cleanup)(struct i3c_master_controller *master); + int (*attach_i3c_dev)(struct i3c_dev_desc *dev); + int (*reattach_i3c_dev)(struct i3c_dev_desc *dev, u8 old_dyn_addr); + void (*detach_i3c_dev)(struct i3c_dev_desc *dev); + int (*do_daa)(struct i3c_master_controller *master); + bool (*supports_ccc_cmd)(struct i3c_master_controller *master, + const struct i3c_ccc_cmd *cmd); + int (*send_ccc_cmd)(struct i3c_master_controller *master, + struct i3c_ccc_cmd *cmd); + int (*priv_xfers)(struct i3c_dev_desc *dev, + struct i3c_priv_xfer *xfers, + int nxfers); + int (*attach_i2c_dev)(struct i2c_dev_desc *dev); + void (*detach_i2c_dev)(struct i2c_dev_desc *dev); + int (*i2c_xfers)(struct i2c_dev_desc *dev, + const struct i2c_msg *xfers, int nxfers); + u32 (*i2c_funcs)(struct i3c_master_controller *master); + int (*request_ibi)(struct i3c_dev_desc *dev, + const struct i3c_ibi_setup *req); + void (*free_ibi)(struct i3c_dev_desc *dev); + int (*enable_ibi)(struct i3c_dev_desc *dev); + int (*disable_ibi)(struct i3c_dev_desc *dev); + void (*recycle_ibi_slot)(struct i3c_dev_desc *dev, + struct i3c_ibi_slot *slot); +}; + +/** + * struct i3c_master_controller - I3C master controller object + * @dev: device to be registered to the device-model + * @this: an I3C device object representing this master. This device will be + * added to the list of I3C devs available on the bus + * @i2c: I2C adapter used for backward compatibility. This adapter is + * registered to the I2C subsystem to be as transparent as possible to + * existing I2C drivers + * @ops: master operations. See &struct i3c_master_controller_ops + * @secondary: true if the master is a secondary master + * @init_done: true when the bus initialization is done + * @boardinfo.i3c: list of I3C boardinfo objects + * @boardinfo.i2c: list of I2C boardinfo objects + * @boardinfo: board-level information attached to devices connected on the bus + * @bus: I3C bus exposed by this master + * @wq: workqueue used to execute IBI handlers. Can also be used by master + * drivers if they need to postpone operations that need to take place + * in a thread context. Typical examples are Hot Join processing which + * requires taking the bus lock in maintenance, which in turn, can only + * be done from a sleep-able context + * + * A &struct i3c_master_controller has to be registered to the I3C subsystem + * through i3c_master_register(). None of &struct i3c_master_controller fields + * should be set manually, just pass appropriate values to + * i3c_master_register(). + */ +struct i3c_master_controller { + struct device dev; + struct i3c_dev_desc *this; + struct i2c_adapter i2c; + const struct i3c_master_controller_ops *ops; + unsigned int secondary : 1; + unsigned int init_done : 1; + struct { + struct list_head i3c; + struct list_head i2c; + } boardinfo; + struct i3c_bus bus; + struct workqueue_struct *wq; +}; + +/** + * i3c_bus_for_each_i2cdev() - iterate over all I2C devices present on the bus + * @bus: the I3C bus + * @dev: an I2C device descriptor pointer updated to point to the current slot + * at each iteration of the loop + * + * Iterate over all I2C devs present on the bus. + */ +#define i3c_bus_for_each_i2cdev(bus, dev) \ + list_for_each_entry(dev, &(bus)->devs.i2c, common.node) + +/** + * i3c_bus_for_each_i3cdev() - iterate over all I3C devices present on the bus + * @bus: the I3C bus + * @dev: and I3C device descriptor pointer updated to point to the current slot + * at each iteration of the loop + * + * Iterate over all I3C devs present on the bus. + */ +#define i3c_bus_for_each_i3cdev(bus, dev) \ + list_for_each_entry(dev, &(bus)->devs.i3c, common.node) + +int i3c_master_do_i2c_xfers(struct i3c_master_controller *master, + const struct i2c_msg *xfers, + int nxfers); + +int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr, + u8 evts); +int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr, + u8 evts); +int i3c_master_entdaa_locked(struct i3c_master_controller *master); +int i3c_master_defslvs_locked(struct i3c_master_controller *master); + +int i3c_master_get_free_addr(struct i3c_master_controller *master, + u8 start_addr); + +int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master, + u8 addr); +int i3c_master_do_daa(struct i3c_master_controller *master); + +int i3c_master_set_info(struct i3c_master_controller *master, + const struct i3c_device_info *info); + +int i3c_master_register(struct i3c_master_controller *master, + struct device *parent, + const struct i3c_master_controller_ops *ops, + bool secondary); +int i3c_master_unregister(struct i3c_master_controller *master); + +/** + * i3c_dev_get_master_data() - get master private data attached to an I3C + * device descriptor + * @dev: the I3C device descriptor to get private data from + * + * Return: the private data previously attached with i3c_dev_set_master_data() + * or NULL if no data has been attached to the device. + */ +static inline void *i3c_dev_get_master_data(const struct i3c_dev_desc *dev) +{ + return dev->common.master_priv; +} + +/** + * i3c_dev_set_master_data() - attach master private data to an I3C device + * descriptor + * @dev: the I3C device descriptor to attach private data to + * @data: private data + * + * This functions allows a master controller to attach per-device private data + * which can then be retrieved with i3c_dev_get_master_data(). + */ +static inline void i3c_dev_set_master_data(struct i3c_dev_desc *dev, + void *data) +{ + dev->common.master_priv = data; +} + +/** + * i2c_dev_get_master_data() - get master private data attached to an I2C + * device descriptor + * @dev: the I2C device descriptor to get private data from + * + * Return: the private data previously attached with i2c_dev_set_master_data() + * or NULL if no data has been attached to the device. + */ +static inline void *i2c_dev_get_master_data(const struct i2c_dev_desc *dev) +{ + return dev->common.master_priv; +} + +/** + * i2c_dev_set_master_data() - attach master private data to an I2C device + * descriptor + * @dev: the I2C device descriptor to attach private data to + * @data: private data + * + * This functions allows a master controller to attach per-device private data + * which can then be retrieved with i2c_device_get_master_data(). + */ +static inline void i2c_dev_set_master_data(struct i2c_dev_desc *dev, + void *data) +{ + dev->common.master_priv = data; +} + +/** + * i3c_dev_get_master() - get master used to communicate with a device + * @dev: I3C dev + * + * Return: the master controller driving @dev + */ +static inline struct i3c_master_controller * +i3c_dev_get_master(struct i3c_dev_desc *dev) +{ + return dev->common.master; +} + +/** + * i2c_dev_get_master() - get master used to communicate with a device + * @dev: I2C dev + * + * Return: the master controller driving @dev + */ +static inline struct i3c_master_controller * +i2c_dev_get_master(struct i2c_dev_desc *dev) +{ + return dev->common.master; +} + +/** + * i3c_master_get_bus() - get the bus attached to a master + * @master: master object + * + * Return: the I3C bus @master is connected to + */ +static inline struct i3c_bus * +i3c_master_get_bus(struct i3c_master_controller *master) +{ + return &master->bus; +} + +struct i3c_generic_ibi_pool; + +struct i3c_generic_ibi_pool * +i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev, + const struct i3c_ibi_setup *req); +void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool); + +struct i3c_ibi_slot * +i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool); +void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool, + struct i3c_ibi_slot *slot); + +void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot); + +struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev); + +#endif /* I3C_MASTER_H */ diff --git a/include/linux/i8253.h b/include/linux/i8253.h index e6bb36a97519..8336b2f6f834 100644 --- a/include/linux/i8253.h +++ b/include/linux/i8253.h @@ -21,6 +21,7 @@ #define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) extern raw_spinlock_t i8253_lock; +extern bool i8253_clear_counter_on_shutdown; extern struct clock_event_device i8253_clockevent; extern void clockevent_i8253_init(bool oneshot); diff --git a/include/linux/ide.h b/include/linux/ide.h index c74b0321922a..e7d29ae633cd 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -10,7 +10,7 @@ #include <linux/init.h> #include <linux/ioport.h> #include <linux/ata.h> -#include <linux/blkdev.h> +#include <linux/blk-mq.h> #include <linux/proc_fs.h> #include <linux/interrupt.h> #include <linux/bitops.h> @@ -50,6 +50,7 @@ struct ide_request { struct scsi_request sreq; u8 sense[SCSI_SENSE_BUFFERSIZE]; u8 type; + void *special; }; static inline struct ide_request *ide_req(struct request *rq) @@ -529,6 +530,10 @@ struct ide_drive_s { struct request_queue *queue; /* request queue */ + bool (*prep_rq)(struct ide_drive_s *, struct request *); + + struct blk_mq_tag_set tag_set; + struct request *rq; /* current request */ void *driver_data; /* extra driver data */ u16 *id; /* identification info */ @@ -612,6 +617,10 @@ struct ide_drive_s { bool sense_rq_armed; struct request *sense_rq; struct request_sense sense_data; + + /* async sense insertion */ + struct work_struct rq_work; + struct list_head rq_list; }; typedef struct ide_drive_s ide_drive_t; @@ -1089,6 +1098,7 @@ extern int ide_pci_clk; int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int); void ide_kill_rq(ide_drive_t *, struct request *); +void ide_insert_request_head(ide_drive_t *, struct request *); void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); @@ -1208,7 +1218,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); extern void ide_timer_expiry(struct timer_list *t); extern irqreturn_t ide_intr(int irq, void *dev_id); -extern void do_ide_request(struct request_queue *); +extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); void ide_init_disk(struct gendisk *, ide_drive_t *); diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 0ef67f837ae1..3b04e72315e1 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -812,6 +812,8 @@ enum mesh_config_capab_flags { IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40, }; +#define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1 + /** * mesh channel switch parameters element's flag indicator * @@ -1617,7 +1619,7 @@ struct ieee80211_he_mcs_nss_supp { * struct ieee80211_he_operation - HE capabilities element * * This structure is the "HE operation element" fields as - * described in P802.11ax_D2.0 section 9.4.2.238 + * described in P802.11ax_D3.0 section 9.4.2.238 */ struct ieee80211_he_operation { __le32 he_oper_params; @@ -2009,17 +2011,17 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) } /* HE Operation defines */ -#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x0000003f -#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x000001c0 -#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET 6 -#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200 -#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00 -#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10 -#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x00100000 -#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00200000 -#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000 -#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000 -#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000 +#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003 +#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008 +#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0 +#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4 +#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000 +#define IEEE80211_HE_OPERATION_CO_LOCATED_BSS 0x00008000 +#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000 +#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000 +#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24 +#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000 +#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000 /* * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size @@ -2044,7 +2046,7 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) he_oper_params = le32_to_cpu(he_oper->he_oper_params); if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO) oper_len += 3; - if (he_oper_params & IEEE80211_HE_OPERATION_MULTI_BSSID_AP) + if (he_oper_params & IEEE80211_HE_OPERATION_CO_LOCATED_BSS) oper_len++; /* Add the first byte (extension ID) to the total length */ @@ -2685,6 +2687,10 @@ enum ieee80211_tdls_actioncode { */ #define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7) +/* Defines support for TWT Requester and TWT Responder */ +#define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(5) +#define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(6) + /* TDLS specific payload type in the LLC/SNAP header */ #define WLAN_TDLS_SNAP_RFTYPE 0x2 diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index c20c7e197d07..627b788ba0ff 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -119,6 +119,8 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, struct net_device *br_fdb_find_port(const struct net_device *br_dev, const unsigned char *addr, __u16 vid); +void br_fdb_clear_offload(const struct net_device *dev, u16 vid); +bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag); #else static inline struct net_device * br_fdb_find_port(const struct net_device *br_dev, @@ -127,6 +129,16 @@ br_fdb_find_port(const struct net_device *br_dev, { return NULL; } + +static inline void br_fdb_clear_offload(const struct net_device *dev, u16 vid) +{ +} + +static inline bool +br_port_flag_is_set(const struct net_device *dev, unsigned long flag) +{ + return false; +} #endif #endif diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 83ea4df6ab81..4cca4da7a6de 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -65,8 +65,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ #define VLAN_PRIO_SHIFT 13 -#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ -#define VLAN_TAG_PRESENT VLAN_CFI_MASK +#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */ #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ #define VLAN_N_VID 4096 @@ -78,10 +77,11 @@ static inline bool is_vlan_dev(const struct net_device *dev) return dev->priv_flags & IFF_802_1Q_VLAN; } -#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) -#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) +#define skb_vlan_tag_present(__skb) ((__skb)->vlan_present) +#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci) #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) -#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK) +#define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK)) +#define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT) static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev) { @@ -133,6 +133,9 @@ struct vlan_pcpu_stats { extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, __be16 vlan_proto, u16 vlan_id); +extern int vlan_for_each(struct net_device *dev, + int (*action)(struct net_device *dev, int vid, + void *arg), void *arg); extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); extern u16 vlan_dev_vlan_id(const struct net_device *dev); extern __be16 vlan_dev_vlan_proto(const struct net_device *dev); @@ -236,6 +239,14 @@ __vlan_find_dev_deep_rcu(struct net_device *real_dev, return NULL; } +static inline int +vlan_for_each(struct net_device *dev, + int (*action)(struct net_device *dev, int vid, void *arg), + void *arg) +{ + return 0; +} + static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) { BUG(); @@ -461,6 +472,31 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, return skb; } +/** + * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info + * @skb: skbuff to clear + * + * Clears the VLAN information from @skb + */ +static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb) +{ + skb->vlan_present = 0; +} + +/** + * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb + * @dst: skbuff to copy to + * @src: skbuff to copy from + * + * Copies VLAN information from @src to @dst (for branchless code) + */ +static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src) +{ + dst->vlan_present = src->vlan_present; + dst->vlan_proto = src->vlan_proto; + dst->vlan_tci = src->vlan_tci; +} + /* * __vlan_hwaccel_push_inside - pushes vlan tag to the payload * @skb: skbuff to tag @@ -475,7 +511,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, skb_vlan_tag_get(skb)); if (likely(skb)) - skb->vlan_tci = 0; + __vlan_hwaccel_clear_tag(skb); return skb; } @@ -491,7 +527,8 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) { skb->vlan_proto = vlan_proto; - skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; + skb->vlan_tci = vlan_tci; + skb->vlan_present = 1; } /** @@ -531,8 +568,6 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, } } -#define HAVE_VLAN_GET_TAG - /** * vlan_get_tag - get the VLAN ID from the skb * @skb: skbuff to query diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index 730ead1a46df..7e84351fa2c0 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -39,6 +39,8 @@ struct iio_dev; * if there is just one read-only sample data shift register. * @addr_shift: Shift of the register address in the communications register. * @read_mask: Mask for the communications register having the read bit set. + * @data_reg: Address of the data register, if 0 the default address of 0x3 will + * be used. */ struct ad_sigma_delta_info { int (*set_channel)(struct ad_sigma_delta *, unsigned int channel); @@ -47,6 +49,7 @@ struct ad_sigma_delta_info { bool has_registers; unsigned int addr_shift; unsigned int read_mask; + unsigned int data_reg; }; /** diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index f9bd6e8ab138..8092b8e7f37e 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -40,7 +40,7 @@ #define ST_SENSORS_DEFAULT_STAT_ADDR 0x27 #define ST_SENSORS_MAX_NAME 17 -#define ST_SENSORS_MAX_4WAI 7 +#define ST_SENSORS_MAX_4WAI 8 #define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \ ch2, s, endian, rbits, sbits, addr) \ diff --git a/include/linux/ima.h b/include/linux/ima.h index 97914a2833d1..b5e16b8c50b7 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -30,6 +30,21 @@ extern void ima_post_path_mknod(struct dentry *dentry); extern void ima_add_kexec_buffer(struct kimage *image); #endif +#if defined(CONFIG_X86) && defined(CONFIG_EFI) +extern bool arch_ima_get_secureboot(void); +extern const char * const *arch_get_ima_policy(void); +#else +static inline bool arch_ima_get_secureboot(void) +{ + return false; +} + +static inline const char * const *arch_get_ima_policy(void) +{ + return NULL; +} +#endif + #else static inline int ima_bprm_check(struct linux_binprm *bprm) { diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h new file mode 100644 index 000000000000..00d7e8e919c6 --- /dev/null +++ b/include/linux/indirect_call_wrapper.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_INDIRECT_CALL_WRAPPER_H +#define _LINUX_INDIRECT_CALL_WRAPPER_H + +#ifdef CONFIG_RETPOLINE + +/* + * INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin + * @f: function pointer + * @f$NR: builtin functions names, up to $NR of them + * @__VA_ARGS__: arguments for @f + * + * Avoid retpoline overhead for known builtin, checking @f vs each of them and + * eventually invoking directly the builtin function. The functions are check + * in the given order. Fallback to the indirect call. + */ +#define INDIRECT_CALL_1(f, f1, ...) \ + ({ \ + likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \ + }) +#define INDIRECT_CALL_2(f, f2, f1, ...) \ + ({ \ + likely(f == f2) ? f2(__VA_ARGS__) : \ + INDIRECT_CALL_1(f, f1, __VA_ARGS__); \ + }) + +#define INDIRECT_CALLABLE_DECLARE(f) f +#define INDIRECT_CALLABLE_SCOPE + +#else +#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALLABLE_DECLARE(f) +#define INDIRECT_CALLABLE_SCOPE static +#endif + +/* + * We can use INDIRECT_CALL_$NR for ipv6 related functions only if ipv6 is + * builtin, this macro simplify dealing with indirect calls with only ipv4/ipv6 + * alternatives + */ +#if IS_BUILTIN(CONFIG_IPV6) +#define INDIRECT_CALL_INET(f, f2, f1, ...) \ + INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__) +#elif IS_ENABLED(CONFIG_INET) +#define INDIRECT_CALL_INET(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__) +#else +#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__) +#endif + +#endif diff --git a/include/linux/init.h b/include/linux/init.h index 9c2aba1dbabf..5255069f5a9f 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -146,7 +146,6 @@ extern unsigned int reset_devices; /* used by init/main.c */ void setup_arch(char **); void prepare_namespace(void); -void __init load_default_modules(void); int __init init_rootfs(void); #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX) diff --git a/include/linux/initrd.h b/include/linux/initrd.h index 84b423044088..14beaff9b445 100644 --- a/include/linux/initrd.h +++ b/include/linux/initrd.h @@ -21,4 +21,7 @@ extern int initrd_below_start_ok; extern unsigned long initrd_start, initrd_end; extern void free_initrd_mem(unsigned long, unsigned long); +extern phys_addr_t phys_initrd_start; +extern unsigned long phys_initrd_size; + extern unsigned int real_root_dev; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index b0ae25837361..0605f3bf6e79 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -26,7 +26,6 @@ #include <linux/iova.h> #include <linux/io.h> #include <linux/idr.h> -#include <linux/dma_remapping.h> #include <linux/mmu_notifier.h> #include <linux/list.h> #include <linux/iommu.h> @@ -37,9 +36,29 @@ #include <asm/iommu.h> /* - * Intel IOMMU register specification per version 1.0 public spec. + * VT-d hardware uses 4KiB page size regardless of host page size. */ +#define VTD_PAGE_SHIFT (12) +#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) +#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) +#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) + +#define VTD_STRIDE_SHIFT (9) +#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) + +#define DMA_PTE_READ (1) +#define DMA_PTE_WRITE (2) +#define DMA_PTE_LARGE_PAGE (1 << 7) +#define DMA_PTE_SNP (1 << 11) +#define CONTEXT_TT_MULTI_LEVEL 0 +#define CONTEXT_TT_DEV_IOTLB 1 +#define CONTEXT_TT_PASS_THROUGH 2 +#define CONTEXT_PASIDE BIT_ULL(3) + +/* + * Intel IOMMU register specification per version 1.0 public spec. + */ #define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ #define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ #define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ @@ -151,6 +170,10 @@ * Extended Capability Register */ +#define ecap_smpwc(e) (((e) >> 48) & 0x1) +#define ecap_flts(e) (((e) >> 47) & 0x1) +#define ecap_slts(e) (((e) >> 46) & 0x1) +#define ecap_smts(e) (((e) >> 43) & 0x1) #define ecap_dit(e) ((e >> 41) & 0x1) #define ecap_pasid(e) ((e >> 40) & 0x1) #define ecap_pss(e) ((e >> 35) & 0x1f) @@ -229,6 +252,7 @@ /* DMA_RTADDR_REG */ #define DMA_RTADDR_RTT (((u64)1) << 11) +#define DMA_RTADDR_SMT (((u64)1) << 10) /* CCMD_REG */ #define DMA_CCMD_ICC (((u64)1) << 63) @@ -374,13 +398,18 @@ enum { #define QI_GRAN_NONG_PASID 2 #define QI_GRAN_PSI_PASID 3 +#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap)) + struct qi_desc { - u64 low, high; + u64 qw0; + u64 qw1; + u64 qw2; + u64 qw3; }; struct q_inval { raw_spinlock_t q_lock; - struct qi_desc *desc; /* invalidation queue */ + void *desc; /* invalidation queue */ int *desc_status; /* desc status */ int free_head; /* first free entry */ int free_tail; /* last free entry */ @@ -512,15 +541,8 @@ struct intel_iommu { struct iommu_flush flush; #endif #ifdef CONFIG_INTEL_IOMMU_SVM - /* These are large and need to be contiguous, so we allocate just - * one for now. We'll maybe want to rethink that if we truly give - * devices away to userspace processes (e.g. for DPDK) and don't - * want to trust that userspace will use *only* the PASID it was - * told to. But while it's all driver-arbitrated, we're fine. */ - struct pasid_state_entry *pasid_state_table; struct page_req_dsc *prq; unsigned char prq_name[16]; /* Name for PRQ interrupt */ - u32 pasid_max; #endif struct q_inval *qi; /* Queued invalidation info */ u32 *iommu_state; /* Store iommu states between suspend and resume.*/ @@ -563,6 +585,49 @@ static inline void __iommu_flush_cache( clflush_cache_range(addr, size); } +/* + * 0: readable + * 1: writable + * 2-6: reserved + * 7: super page + * 8-10: available + * 11: snoop behavior + * 12-63: Host physcial address + */ +struct dma_pte { + u64 val; +}; + +static inline void dma_clear_pte(struct dma_pte *pte) +{ + pte->val = 0; +} + +static inline u64 dma_pte_addr(struct dma_pte *pte) +{ +#ifdef CONFIG_64BIT + return pte->val & VTD_PAGE_MASK; +#else + /* Must have a full atomic 64-bit read */ + return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; +#endif +} + +static inline bool dma_pte_present(struct dma_pte *pte) +{ + return (pte->val & 3) != 0; +} + +static inline bool dma_pte_superpage(struct dma_pte *pte) +{ + return (pte->val & DMA_PTE_LARGE_PAGE); +} + +static inline int first_pte_in_page(struct dma_pte *pte) +{ + return !((unsigned long)pte & ~VTD_PAGE_MASK); +} + extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); extern int dmar_find_matched_atsr_unit(struct pci_dev *dev); @@ -587,10 +652,10 @@ void free_pgtable_page(void *vaddr); struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); int for_each_device_domain(int (*fn)(struct device_domain_info *info, void *data), void *data); +void iommu_flush_write_buffer(struct intel_iommu *iommu); #ifdef CONFIG_INTEL_IOMMU_SVM int intel_svm_init(struct intel_iommu *iommu); -int intel_svm_exit(struct intel_iommu *iommu); extern int intel_svm_enable_prq(struct intel_iommu *iommu); extern int intel_svm_finish_prq(struct intel_iommu *iommu); @@ -632,4 +697,23 @@ bool context_present(struct context_entry *context); struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, u8 devfn, int alloc); +#ifdef CONFIG_INTEL_IOMMU +extern int iommu_calculate_agaw(struct intel_iommu *iommu); +extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); +extern int dmar_disabled; +extern int intel_iommu_enabled; +extern int intel_iommu_tboot_noforce; +#else +static inline int iommu_calculate_agaw(struct intel_iommu *iommu) +{ + return 0; +} +static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) +{ + return 0; +} +#define dmar_disabled (1) +#define intel_iommu_enabled (0) +#endif + #endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 1d6711c28271..c672f34235e7 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -247,10 +247,23 @@ struct irq_affinity_notify { * the MSI(-X) vector space * @post_vectors: Don't apply affinity to @post_vectors at end of * the MSI(-X) vector space + * @nr_sets: Length of passed in *sets array + * @sets: Number of affinitized sets */ struct irq_affinity { int pre_vectors; int post_vectors; + int nr_sets; + int *sets; +}; + +/** + * struct irq_affinity_desc - Interrupt affinity descriptor + * @mask: cpumask to hold the affinity assignment + */ +struct irq_affinity_desc { + struct cpumask mask; + unsigned int is_managed : 1; }; #if defined(CONFIG_SMP) @@ -299,7 +312,9 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); -struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); +struct irq_affinity_desc * +irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); + int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); #else /* CONFIG_SMP */ @@ -333,7 +348,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) return 0; } -static inline struct cpumask * +static inline struct irq_affinity_desc * irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) { return NULL; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index a1d28f42cb77..e90da6b6f3d1 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -168,8 +168,8 @@ struct iommu_resv_region { * @map: map a physically contiguous memory region to an iommu domain * @unmap: unmap a physically contiguous memory region from an iommu domain * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain - * @tlb_range_add: Add a given iova range to the flush queue for this domain - * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush + * @iotlb_range_add: Add a given iova range to the flush queue for this domain + * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush * queue * @iova_to_phys: translate iova to physical address * @add_device: add device to iommu grouping @@ -398,6 +398,20 @@ void iommu_fwspec_free(struct device *dev); int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); +static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) +{ + return dev->iommu_fwspec; +} + +static inline void dev_iommu_fwspec_set(struct device *dev, + struct iommu_fwspec *fwspec) +{ + dev->iommu_fwspec = fwspec; +} + +int iommu_probe_device(struct device *dev); +void iommu_release_device(struct device *dev); + #else /* CONFIG_IOMMU_API */ struct iommu_ops {}; diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index 9e30ed6443db..e9bfe6972aed 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -71,6 +71,19 @@ static inline int task_nice_ioclass(struct task_struct *task) } /* + * If the calling process has set an I/O priority, use that. Otherwise, return + * the default I/O priority. + */ +static inline int get_current_ioprio(void) +{ + struct io_context *ioc = current->io_context; + + if (ioc) + return ioc->ioprio; + return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); +} + +/* * For inheritance, return the highest of the two given priorities */ extern int ioprio_best(unsigned short aprio, unsigned short bprio); diff --git a/include/linux/irq.h b/include/linux/irq.h index c9bffda04a45..def2b2aac8b1 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -27,6 +27,7 @@ struct seq_file; struct module; struct msi_msg; +struct irq_affinity_desc; enum irqchip_irq_state; /* @@ -834,11 +835,12 @@ struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) unsigned int arch_dynirq_lower_bound(unsigned int from); int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, - struct module *owner, const struct cpumask *affinity); + struct module *owner, + const struct irq_affinity_desc *affinity); int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, unsigned int cnt, int node, struct module *owner, - const struct cpumask *affinity); + const struct irq_affinity_desc *affinity); /* use macros to avoid needing export.h for THIS_MODULE */ #define irq_alloc_descs(irq, from, cnt, node) \ diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h index 630a57e55db6..4500d453a63e 100644 --- a/include/linux/irq_sim.h +++ b/include/linux/irq_sim.h @@ -16,7 +16,7 @@ struct irq_sim_work_ctx { struct irq_work work; - int irq; + unsigned long *pending; }; struct irq_sim_irq_ctx { diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h index 89c34b200671..950e4b2458f0 100644 --- a/include/linux/irqchip.h +++ b/include/linux/irqchip.h @@ -19,7 +19,7 @@ * the association between their DT compatible string and their * initialization function. * - * @name: name that must be unique accross all IRQCHIP_DECLARE of the + * @name: name that must be unique across all IRQCHIP_DECLARE of the * same file. * @compstr: compatible string of the irqchip driver * @fn: initialization function @@ -30,7 +30,7 @@ * This macro must be used by the different irqchip drivers to declare * the association between their version and their initialization function. * - * @name: name that must be unique accross all IRQCHIP_ACPI_DECLARE of the + * @name: name that must be unique across all IRQCHIP_ACPI_DECLARE of the * same file. * @subtable: Subtable to be identified in MADT * @validate: Function to be called on that subtable to check its validity. diff --git a/include/linux/irqchip/irq-madera.h b/include/linux/irqchip/irq-madera.h new file mode 100644 index 000000000000..1160fa3769ae --- /dev/null +++ b/include/linux/irqchip/irq-madera.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Interrupt support for Cirrus Logic Madera codecs + * + * Copyright (C) 2016-2018 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + */ + +#ifndef IRQCHIP_MADERA_H +#define IRQCHIP_MADERA_H + +#include <linux/interrupt.h> +#include <linux/mfd/madera/core.h> + +#define MADERA_IRQ_FLL1_LOCK 0 +#define MADERA_IRQ_FLL2_LOCK 1 +#define MADERA_IRQ_FLL3_LOCK 2 +#define MADERA_IRQ_FLLAO_LOCK 3 +#define MADERA_IRQ_CLK_SYS_ERR 4 +#define MADERA_IRQ_CLK_ASYNC_ERR 5 +#define MADERA_IRQ_CLK_DSP_ERR 6 +#define MADERA_IRQ_HPDET 7 +#define MADERA_IRQ_MICDET1 8 +#define MADERA_IRQ_MICDET2 9 +#define MADERA_IRQ_JD1_RISE 10 +#define MADERA_IRQ_JD1_FALL 11 +#define MADERA_IRQ_JD2_RISE 12 +#define MADERA_IRQ_JD2_FALL 13 +#define MADERA_IRQ_MICD_CLAMP_RISE 14 +#define MADERA_IRQ_MICD_CLAMP_FALL 15 +#define MADERA_IRQ_DRC2_SIG_DET 16 +#define MADERA_IRQ_DRC1_SIG_DET 17 +#define MADERA_IRQ_ASRC1_IN1_LOCK 18 +#define MADERA_IRQ_ASRC1_IN2_LOCK 19 +#define MADERA_IRQ_ASRC2_IN1_LOCK 20 +#define MADERA_IRQ_ASRC2_IN2_LOCK 21 +#define MADERA_IRQ_DSP_IRQ1 22 +#define MADERA_IRQ_DSP_IRQ2 23 +#define MADERA_IRQ_DSP_IRQ3 24 +#define MADERA_IRQ_DSP_IRQ4 25 +#define MADERA_IRQ_DSP_IRQ5 26 +#define MADERA_IRQ_DSP_IRQ6 27 +#define MADERA_IRQ_DSP_IRQ7 28 +#define MADERA_IRQ_DSP_IRQ8 29 +#define MADERA_IRQ_DSP_IRQ9 30 +#define MADERA_IRQ_DSP_IRQ10 31 +#define MADERA_IRQ_DSP_IRQ11 32 +#define MADERA_IRQ_DSP_IRQ12 33 +#define MADERA_IRQ_DSP_IRQ13 34 +#define MADERA_IRQ_DSP_IRQ14 35 +#define MADERA_IRQ_DSP_IRQ15 36 +#define MADERA_IRQ_DSP_IRQ16 37 +#define MADERA_IRQ_HP1L_SC 38 +#define MADERA_IRQ_HP1R_SC 39 +#define MADERA_IRQ_HP2L_SC 40 +#define MADERA_IRQ_HP2R_SC 41 +#define MADERA_IRQ_HP3L_SC 42 +#define MADERA_IRQ_HP3R_SC 43 +#define MADERA_IRQ_SPKOUTL_SC 44 +#define MADERA_IRQ_SPKOUTR_SC 45 +#define MADERA_IRQ_HP1L_ENABLE_DONE 46 +#define MADERA_IRQ_HP1R_ENABLE_DONE 47 +#define MADERA_IRQ_HP2L_ENABLE_DONE 48 +#define MADERA_IRQ_HP2R_ENABLE_DONE 49 +#define MADERA_IRQ_HP3L_ENABLE_DONE 50 +#define MADERA_IRQ_HP3R_ENABLE_DONE 51 +#define MADERA_IRQ_SPKOUTL_ENABLE_DONE 52 +#define MADERA_IRQ_SPKOUTR_ENABLE_DONE 53 +#define MADERA_IRQ_SPK_SHUTDOWN 54 +#define MADERA_IRQ_SPK_OVERHEAT 55 +#define MADERA_IRQ_SPK_OVERHEAT_WARN 56 +#define MADERA_IRQ_GPIO1 57 +#define MADERA_IRQ_GPIO2 58 +#define MADERA_IRQ_GPIO3 59 +#define MADERA_IRQ_GPIO4 60 +#define MADERA_IRQ_GPIO5 61 +#define MADERA_IRQ_GPIO6 62 +#define MADERA_IRQ_GPIO7 63 +#define MADERA_IRQ_GPIO8 64 +#define MADERA_IRQ_DSP1_BUS_ERR 65 +#define MADERA_IRQ_DSP2_BUS_ERR 66 +#define MADERA_IRQ_DSP3_BUS_ERR 67 +#define MADERA_IRQ_DSP4_BUS_ERR 68 +#define MADERA_IRQ_DSP5_BUS_ERR 69 +#define MADERA_IRQ_DSP6_BUS_ERR 70 +#define MADERA_IRQ_DSP7_BUS_ERR 71 + +#define MADERA_NUM_IRQ 72 + +/* + * These wrapper functions are for use by other child drivers of the + * same parent MFD. + */ +static inline int madera_get_irq_mapping(struct madera *madera, int irq) +{ + if (!madera->irq_dev) + return -ENODEV; + + return regmap_irq_get_virq(madera->irq_data, irq); +} + +static inline int madera_request_irq(struct madera *madera, int irq, + const char *name, + irq_handler_t handler, void *data) +{ + irq = madera_get_irq_mapping(madera, irq); + if (irq < 0) + return irq; + + return request_threaded_irq(irq, NULL, handler, IRQF_ONESHOT, name, + data); +} + +static inline void madera_free_irq(struct madera *madera, int irq, void *data) +{ + irq = madera_get_irq_mapping(madera, irq); + if (irq < 0) + return; + + free_irq(irq, data); +} + +static inline int madera_set_irq_wake(struct madera *madera, int irq, int on) +{ + irq = madera_get_irq_mapping(madera, irq); + if (irq < 0) + return irq; + + return irq_set_irq_wake(irq, on); +} + +#endif diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 068aa46f0d55..35965f41d7be 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -43,6 +43,7 @@ struct irq_chip; struct irq_data; struct cpumask; struct seq_file; +struct irq_affinity_desc; /* Number of irqs reserved for a legacy isa controller */ #define NUM_ISA_INTERRUPTS 16 @@ -266,7 +267,7 @@ extern bool irq_domain_check_msi_remap(void); extern void irq_set_default_host(struct irq_domain *host); extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, irq_hw_number_t hwirq, int node, - const struct cpumask *affinity); + const struct irq_affinity_desc *affinity); static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) { @@ -449,7 +450,8 @@ static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *par extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, - bool realloc, const struct cpumask *affinity); + bool realloc, + const struct irq_affinity_desc *affinity); extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early); extern void irq_domain_deactivate_irq(struct irq_data *irq_data); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index b708e5169d1d..0f919d5fe84f 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -575,6 +575,7 @@ struct transaction_s enum { T_RUNNING, T_LOCKED, + T_SWITCH, T_FLUSH, T_COMMIT, T_COMMIT_DFLUSH, @@ -662,13 +663,13 @@ struct transaction_s /* * Number of outstanding updates running on this transaction - * [t_handle_lock] + * [none] */ atomic_t t_updates; /* * Number of buffers reserved for use by all handles in this transaction - * handle but not yet modified. [t_handle_lock] + * handle but not yet modified. [none] */ atomic_t t_outstanding_credits; @@ -690,7 +691,7 @@ struct transaction_s ktime_t t_start_time; /* - * How many handles used this transaction? [t_handle_lock] + * How many handles used this transaction? [none] */ atomic_t t_handle_count; diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 46aae129917c..b40ea104dd36 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -14,13 +14,13 @@ struct task_struct; #include <asm/kasan.h> #include <asm/pgtable.h> -extern unsigned char kasan_zero_page[PAGE_SIZE]; -extern pte_t kasan_zero_pte[PTRS_PER_PTE]; -extern pmd_t kasan_zero_pmd[PTRS_PER_PMD]; -extern pud_t kasan_zero_pud[PTRS_PER_PUD]; -extern p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D]; +extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; +extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE]; +extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD]; +extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD]; +extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; -int kasan_populate_zero_shadow(const void *shadow_start, +int kasan_populate_early_shadow(const void *shadow_start, const void *shadow_end); static inline void *kasan_mem_to_shadow(const void *addr) @@ -45,22 +45,24 @@ void kasan_free_pages(struct page *page, unsigned int order); void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags); -void kasan_cache_shrink(struct kmem_cache *cache); -void kasan_cache_shutdown(struct kmem_cache *cache); void kasan_poison_slab(struct page *page); void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); void kasan_poison_object_data(struct kmem_cache *cache, void *object); -void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); +void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, + const void *object); -void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); +void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, + gfp_t flags); void kasan_kfree_large(void *ptr, unsigned long ip); void kasan_poison_kfree(void *ptr, unsigned long ip); -void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, - gfp_t flags); -void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); +void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object, + size_t size, gfp_t flags); +void * __must_check kasan_krealloc(const void *object, size_t new_size, + gfp_t flags); -void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); +void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object, + gfp_t flags); bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); struct kasan_cache { @@ -97,27 +99,40 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags) {} -static inline void kasan_cache_shrink(struct kmem_cache *cache) {} -static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) {} static inline void kasan_poison_object_data(struct kmem_cache *cache, void *object) {} -static inline void kasan_init_slab_obj(struct kmem_cache *cache, - const void *object) {} +static inline void *kasan_init_slab_obj(struct kmem_cache *cache, + const void *object) +{ + return (void *)object; +} -static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} +static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) +{ + return ptr; +} static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} -static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, - size_t size, gfp_t flags) {} -static inline void kasan_krealloc(const void *object, size_t new_size, - gfp_t flags) {} +static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, + size_t size, gfp_t flags) +{ + return (void *)object; +} +static inline void *kasan_krealloc(const void *object, size_t new_size, + gfp_t flags) +{ + return (void *)object; +} -static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, - gfp_t flags) {} +static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, + gfp_t flags) +{ + return object; +} static inline bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip) { @@ -140,4 +155,40 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } #endif /* CONFIG_KASAN */ +#ifdef CONFIG_KASAN_GENERIC + +#define KASAN_SHADOW_INIT 0 + +void kasan_cache_shrink(struct kmem_cache *cache); +void kasan_cache_shutdown(struct kmem_cache *cache); + +#else /* CONFIG_KASAN_GENERIC */ + +static inline void kasan_cache_shrink(struct kmem_cache *cache) {} +static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} + +#endif /* CONFIG_KASAN_GENERIC */ + +#ifdef CONFIG_KASAN_SW_TAGS + +#define KASAN_SHADOW_INIT 0xFF + +void kasan_init_tags(void); + +void *kasan_reset_tag(const void *addr); + +void kasan_report(unsigned long addr, size_t size, + bool is_write, unsigned long ip); + +#else /* CONFIG_KASAN_SW_TAGS */ + +static inline void kasan_init_tags(void) { } + +static inline void *kasan_reset_tag(const void *addr) +{ + return (void *)addr; +} + +#endif /* CONFIG_KASAN_SW_TAGS */ + #endif /* LINUX_KASAN_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index d6aac75b51ba..8f0e68e250a7 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -527,6 +527,7 @@ static inline u32 int_sqrt64(u64 x) extern void bust_spinlocks(int yes); extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ extern int panic_timeout; +extern unsigned long panic_print; extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 9e4e638fb505..b9b1bc5f9669 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -143,6 +143,15 @@ extern const struct kexec_file_ops * const kexec_file_loaders[]; int kexec_image_probe_default(struct kimage *image, void *buf, unsigned long buf_len); +int kexec_image_post_load_cleanup_default(struct kimage *image); + +/* + * If kexec_buf.mem is set to this value, kexec_locate_mem_hole() + * will try to allocate free memory. Arch may overwrite it. + */ +#ifndef KEXEC_BUF_MEM_UNKNOWN +#define KEXEC_BUF_MEM_UNKNOWN 0 +#endif /** * struct kexec_buf - parameters for finding a place for a buffer in memory @@ -174,6 +183,7 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, bool get_value); void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); +void * __weak arch_kexec_kernel_image_load(struct kimage *image); int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section, const Elf_Shdr *relsec, @@ -183,8 +193,6 @@ int __weak arch_kexec_apply_relocations(struct purgatory_info *pi, const Elf_Shdr *relsec, const Elf_Shdr *symtab); -int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, - int (*func)(struct resource *, void *)); extern int kexec_add_buffer(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf); diff --git a/include/linux/key.h b/include/linux/key.h index e58ee10f6e58..7099985e35a9 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -346,6 +346,9 @@ static inline key_serial_t key_serial(const struct key *key) extern void key_set_timeout(struct key *, unsigned); +extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags, + key_perm_t perm); + /* * The permissions required on a key that we're looking up. */ diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index e465bb15912d..fbf144aaa749 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -177,22 +177,28 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code, struct pt_regs *regs); /** + * kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU + * @ignored: This parameter is only here to match the prototype. + * + * If you're using the default implementation of kgdb_roundup_cpus() + * this function will be called per CPU. If you don't implement + * kgdb_call_nmi_hook() a default will be used. + */ + +extern void kgdb_call_nmi_hook(void *ignored); + +/** * kgdb_roundup_cpus - Get other CPUs into a holding pattern - * @flags: Current IRQ state * * On SMP systems, we need to get the attention of the other CPUs * and get them into a known state. This should do what is needed * to get the other CPUs to call kgdb_wait(). Note that on some arches, - * the NMI approach is not used for rounding up all the CPUs. For example, - * in case of MIPS, smp_call_function() is used to roundup CPUs. In - * this case, we have to make sure that interrupts are enabled before - * calling smp_call_function(). The argument to this function is - * the flags that will be used when restoring the interrupts. There is - * local_irq_save() call before kgdb_roundup_cpus(). + * the NMI approach is not used for rounding up all the CPUs. Normally + * those architectures can just not implement this and get the default. * * On non-SMP systems, this is not called. */ -extern void kgdb_roundup_cpus(unsigned long flags); +extern void kgdb_roundup_cpus(void); /** * kgdb_arch_set_pc - Generic call back to the program counter @@ -281,7 +287,7 @@ struct kgdb_io { int is_console; }; -extern struct kgdb_arch arch_kgdb_ops; +extern const struct kgdb_arch arch_kgdb_ops; extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index e909413e4e38..e07e91daaacc 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -242,10 +242,13 @@ extern int arch_init_kprobes(void); extern void show_registers(struct pt_regs *regs); extern void kprobes_inc_nmissed_count(struct kprobe *p); extern bool arch_within_kprobe_blacklist(unsigned long addr); +extern int arch_populate_kprobe_blacklist(void); extern bool arch_kprobe_on_func_entry(unsigned long offset); extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); extern bool within_kprobe_blacklist(unsigned long addr); +extern int kprobe_add_ksym_blacklist(unsigned long entry); +extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end); struct kprobe_insn_cache { struct mutex mutex; @@ -379,6 +382,9 @@ int enable_kprobe(struct kprobe *kp); void dump_kprobe(struct kprobe *kp); +void *alloc_insn_page(void); +void free_insn_page(void *page); + #else /* !CONFIG_KPROBES: */ static inline int kprobes_built_in(void) diff --git a/include/linux/kref.h b/include/linux/kref.h index 29220724bf1c..cb00a0268061 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -53,10 +53,7 @@ static inline void kref_get(struct kref *kref) * @release: pointer to the function that will clean up the object when the * last reference to the object is released. * This pointer is required, and it is not acceptable to pass kfree - * in as this function. If the caller does pass kfree to this - * function, you will be publicly mocked mercilessly by the kref - * maintainer, and anyone else who happens to notice it. You have - * been warned. + * in as this function. * * Decrement the refcount, and if 0, call release(). * Return 1 if the object was removed, otherwise return 0. Beware, if this diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c926698040e0..c38cc5eb7e73 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -449,6 +449,7 @@ struct kvm { #endif long tlbs_dirty; struct list_head devices; + bool manual_dirty_log_protect; struct dentry *debugfs_dentry; struct kvm_stat_data **debugfs_stat_data; struct srcu_struct srcu; @@ -694,7 +695,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - void *data, int offset, unsigned long len); + void *data, unsigned int offset, + unsigned long len); int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); @@ -753,7 +755,9 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty); int kvm_get_dirty_log_protect(struct kvm *kvm, - struct kvm_dirty_log *log, bool *is_dirty); + struct kvm_dirty_log *log, bool *flush); +int kvm_clear_dirty_log_protect(struct kvm *kvm, + struct kvm_clear_dirty_log *log, bool *flush); void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, @@ -762,9 +766,13 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); +int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, + struct kvm_clear_dirty_log *log); int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status); +int kvm_vm_ioctl_enable_cap(struct kvm *kvm, + struct kvm_enable_cap *cap); long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); diff --git a/include/linux/lantiq.h b/include/linux/lantiq.h new file mode 100644 index 000000000000..67921169d84d --- /dev/null +++ b/include/linux/lantiq.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_LANTIQ_H +#define __LINUX_LANTIQ_H + +#ifdef CONFIG_LANTIQ +#include <lantiq_soc.h> +#else + +#ifndef LTQ_EARLY_ASC +#define LTQ_EARLY_ASC 0 +#endif + +#ifndef CPHYSADDR +#define CPHYSADDR(a) 0 +#endif + +static inline struct clk *clk_get_fpi(void) +{ + return NULL; +} +#endif /* CONFIG_LANTIQ */ +#endif /* __LINUX_LANTIQ_H */ diff --git a/include/linux/leds.h b/include/linux/leds.h index 7393a316d9fa..5263f87e1d2c 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -51,6 +51,7 @@ struct led_classdev { #define LED_PANIC_INDICATOR BIT(20) #define LED_BRIGHT_HW_CHANGED BIT(21) #define LED_RETAIN_AT_SHUTDOWN BIT(22) +#define LED_INIT_DEFAULT_TRIGGER BIT(23) /* set_brightness_work / blink_timer flags, atomic, private. */ unsigned long work_flags; @@ -487,4 +488,24 @@ struct led_pattern { int brightness; }; +enum led_audio { + LED_AUDIO_MUTE, /* master mute LED */ + LED_AUDIO_MICMUTE, /* mic mute LED */ + NUM_AUDIO_LEDS +}; + +#if IS_ENABLED(CONFIG_LEDS_TRIGGER_AUDIO) +enum led_brightness ledtrig_audio_get(enum led_audio type); +void ledtrig_audio_set(enum led_audio type, enum led_brightness state); +#else +static inline enum led_brightness ledtrig_audio_get(enum led_audio type) +{ + return LED_OFF; +} +static inline void ledtrig_audio_set(enum led_audio type, + enum led_brightness state) +{ +} +#endif + #endif /* __LINUX_LEDS_H_INCLUDED */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 38c95d66ab12..68133842e6d7 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -135,7 +135,6 @@ enum { ATA_SHT_EMULATED = 1, ATA_SHT_THIS_ID = -1, - ATA_SHT_USE_CLUSTERING = 1, /* struct ata_taskfile flags */ ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */ @@ -1360,7 +1359,6 @@ extern struct device_attribute *ata_common_sdev_attrs[]; .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ .this_id = ATA_SHT_THIS_ID, \ .emulated = ATA_SHT_EMULATED, \ - .use_clustering = ATA_SHT_USE_CLUSTERING, \ .proc_name = drv_name, \ .slave_configure = ata_scsi_slave_config, \ .slave_destroy = ata_scsi_slave_destroy, \ diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 097072c5a852..5440f11b0907 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -38,6 +38,10 @@ enum { NDD_UNARMED = 1, /* locked memory devices should not be accessed */ NDD_LOCKED = 2, + /* memory under security wipes should not be accessed */ + NDD_SECURITY_OVERWRITE = 3, + /* tracking whether or not there is a pending device reference */ + NDD_WORK_PENDING = 4, /* need to set a limit somewhere, but yes, this is likely overkill */ ND_IOCTL_MAX_BUFLEN = SZ_4M, @@ -87,7 +91,7 @@ struct nvdimm_bus_descriptor { ndctl_fn ndctl; int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc, - struct nvdimm *nvdimm, unsigned int cmd); + struct nvdimm *nvdimm, unsigned int cmd, void *data); }; struct nd_cmd_desc { @@ -155,6 +159,46 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( } +enum nvdimm_security_state { + NVDIMM_SECURITY_DISABLED, + NVDIMM_SECURITY_UNLOCKED, + NVDIMM_SECURITY_LOCKED, + NVDIMM_SECURITY_FROZEN, + NVDIMM_SECURITY_OVERWRITE, +}; + +#define NVDIMM_PASSPHRASE_LEN 32 +#define NVDIMM_KEY_DESC_LEN 22 + +struct nvdimm_key_data { + u8 data[NVDIMM_PASSPHRASE_LEN]; +}; + +enum nvdimm_passphrase_type { + NVDIMM_USER, + NVDIMM_MASTER, +}; + +struct nvdimm_security_ops { + enum nvdimm_security_state (*state)(struct nvdimm *nvdimm, + enum nvdimm_passphrase_type pass_type); + int (*freeze)(struct nvdimm *nvdimm); + int (*change_key)(struct nvdimm *nvdimm, + const struct nvdimm_key_data *old_data, + const struct nvdimm_key_data *new_data, + enum nvdimm_passphrase_type pass_type); + int (*unlock)(struct nvdimm *nvdimm, + const struct nvdimm_key_data *key_data); + int (*disable)(struct nvdimm *nvdimm, + const struct nvdimm_key_data *key_data); + int (*erase)(struct nvdimm *nvdimm, + const struct nvdimm_key_data *key_data, + enum nvdimm_passphrase_type pass_type); + int (*overwrite)(struct nvdimm *nvdimm, + const struct nvdimm_key_data *key_data); + int (*query_overwrite)(struct nvdimm *nvdimm); +}; + void badrange_init(struct badrange *badrange); int badrange_add(struct badrange *badrange, u64 addr, u64 length); void badrange_forget(struct badrange *badrange, phys_addr_t start, @@ -165,6 +209,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent, struct nvdimm_bus_descriptor *nfit_desc); void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus); struct nvdimm_bus *to_nvdimm_bus(struct device *dev); +struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm); struct nvdimm *to_nvdimm(struct device *dev); struct nd_region *to_nd_region(struct device *dev); struct device *nd_region_dev(struct nd_region *nd_region); @@ -175,10 +220,21 @@ const char *nvdimm_name(struct nvdimm *nvdimm); struct kobject *nvdimm_kobj(struct nvdimm *nvdimm); unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm); void *nvdimm_provider_data(struct nvdimm *nvdimm); -struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, - const struct attribute_group **groups, unsigned long flags, - unsigned long cmd_mask, int num_flush, - struct resource *flush_wpq); +struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, + void *provider_data, const struct attribute_group **groups, + unsigned long flags, unsigned long cmd_mask, int num_flush, + struct resource *flush_wpq, const char *dimm_id, + const struct nvdimm_security_ops *sec_ops); +static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, + void *provider_data, const struct attribute_group **groups, + unsigned long flags, unsigned long cmd_mask, int num_flush, + struct resource *flush_wpq) +{ + return __nvdimm_create(nvdimm_bus, provider_data, groups, flags, + cmd_mask, num_flush, flush_wpq, NULL, NULL); +} + +int nvdimm_security_setup_events(struct nvdimm *nvdimm); const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, @@ -204,6 +260,16 @@ u64 nd_fletcher64(void *addr, size_t len, bool le); void nvdimm_flush(struct nd_region *nd_region); int nvdimm_has_flush(struct nd_region *nd_region); int nvdimm_has_cache(struct nd_region *nd_region); +int nvdimm_in_overwrite(struct nvdimm *nvdimm); + +static inline int nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, void *buf, + unsigned int buf_len, int *cmd_rc) +{ + struct nvdimm_bus *nvdimm_bus = nvdimm_to_bus(nvdimm); + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + + return nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, cmd_rc); +} #ifdef CONFIG_ARCH_HAS_PMEM_API #define ARCH_MEMREMAP_PMEM MEMREMAP_WB diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 2fdeac1a420d..5d865a5d5cdc 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -90,7 +90,7 @@ typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int, struct nvm_chk_meta *); typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *); -typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *); +typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int); typedef void (nvm_destroy_dma_pool_fn)(void *); typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, dma_addr_t *); @@ -357,6 +357,7 @@ struct nvm_geo { u32 clba; /* sectors per chunk */ u16 csecs; /* sector size */ u16 sos; /* out-of-band area size */ + bool ext; /* metadata in extended data buffer */ /* device write constrains */ u32 ws_min; /* minimum write size */ diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 7c47b1a471d4..7e020782ade2 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -79,6 +79,12 @@ #define ALIGN __ALIGN #define ALIGN_STR __ALIGN_STR +#ifndef GLOBAL +#define GLOBAL(name) \ + .globl name ASM_NL \ + name: +#endif + #ifndef ENTRY #define ENTRY(name) \ .globl name ASM_NL \ diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h index 22443d7fb5cd..a99c58866860 100644 --- a/include/linux/linkmode.h +++ b/include/linux/linkmode.h @@ -57,6 +57,15 @@ static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr) __clear_bit(nr, addr); } +static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr, + int set) +{ + if (set) + linkmode_set_bit(nr, addr); + else + linkmode_clear_bit(nr, addr); +} + static inline void linkmode_change_bit(int nr, volatile unsigned long *addr) { __change_bit(nr, addr); diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 1fd82ff99c65..c5335df2372f 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -97,8 +97,6 @@ struct lock_class { * Generation counter, when doing certain classes of graph walking, * to ensure that we check one node only once: */ - unsigned int version; - int name_version; const char *name; diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index aaeb7fa24dc4..9a0bdf91e646 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1461,9 +1461,10 @@ union security_list_options { int (*sb_alloc_security)(struct super_block *sb); void (*sb_free_security)(struct super_block *sb); - int (*sb_copy_data)(char *orig, char *copy); - int (*sb_remount)(struct super_block *sb, void *data); - int (*sb_kern_mount)(struct super_block *sb, int flags, void *data); + void (*sb_free_mnt_opts)(void *mnt_opts); + int (*sb_eat_lsm_opts)(char *orig, void **mnt_opts); + int (*sb_remount)(struct super_block *sb, void *mnt_opts); + int (*sb_kern_mount)(struct super_block *sb); int (*sb_show_options)(struct seq_file *m, struct super_block *sb); int (*sb_statfs)(struct dentry *dentry); int (*sb_mount)(const char *dev_name, const struct path *path, @@ -1471,14 +1472,15 @@ union security_list_options { int (*sb_umount)(struct vfsmount *mnt, int flags); int (*sb_pivotroot)(const struct path *old_path, const struct path *new_path); int (*sb_set_mnt_opts)(struct super_block *sb, - struct security_mnt_opts *opts, + void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags); int (*sb_clone_mnt_opts)(const struct super_block *oldsb, struct super_block *newsb, unsigned long kern_flags, unsigned long *set_kern_flags); - int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts); + int (*sb_add_mnt_opt)(const char *option, const char *val, int len, + void **mnt_opts); int (*dentry_init_security)(struct dentry *dentry, int mode, const struct qstr *name, void **ctx, u32 *ctxlen); @@ -1800,7 +1802,8 @@ struct security_hook_heads { struct hlist_head bprm_committed_creds; struct hlist_head sb_alloc_security; struct hlist_head sb_free_security; - struct hlist_head sb_copy_data; + struct hlist_head sb_free_mnt_opts; + struct hlist_head sb_eat_lsm_opts; struct hlist_head sb_remount; struct hlist_head sb_kern_mount; struct hlist_head sb_show_options; @@ -1810,7 +1813,7 @@ struct security_hook_heads { struct hlist_head sb_pivotroot; struct hlist_head sb_set_mnt_opts; struct hlist_head sb_clone_mnt_opts; - struct hlist_head sb_parse_opts_str; + struct hlist_head sb_add_mnt_opt; struct hlist_head dentry_init_security; struct hlist_head dentry_create_files_as; #ifdef CONFIG_SECURITY_PATH diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h index 44348710953f..faa7da3c9c8b 100644 --- a/include/linux/mailbox_client.h +++ b/include/linux/mailbox_client.h @@ -44,6 +44,7 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, const char *name); struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index); int mbox_send_message(struct mbox_chan *chan, void *mssg); +int mbox_flush(struct mbox_chan *chan, unsigned long timeout); void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */ bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */ void mbox_free_channel(struct mbox_chan *chan); /* may sleep */ diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h index 74deadb42d76..4994a438444c 100644 --- a/include/linux/mailbox_controller.h +++ b/include/linux/mailbox_controller.h @@ -24,6 +24,9 @@ struct mbox_chan; * transmission of data is reported by the controller via * mbox_chan_txdone (if it has some TX ACK irq). It must not * sleep. + * @flush: Called when a client requests transmissions to be blocking but + * the context doesn't allow sleeping. Typically the controller + * will implement a busy loop waiting for the data to flush out. * @startup: Called when a client requests the chan. The controller * could ask clients for additional parameters of communication * to be provided via client's chan_data. This call may @@ -46,6 +49,7 @@ struct mbox_chan; */ struct mbox_chan_ops { int (*send_data)(struct mbox_chan *chan, void *data); + int (*flush)(struct mbox_chan *chan, unsigned long timeout); int (*startup)(struct mbox_chan *chan); void (*shutdown)(struct mbox_chan *chan); bool (*last_tx_done)(struct mbox_chan *chan); @@ -131,4 +135,9 @@ void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */ void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */ void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */ +int devm_mbox_controller_register(struct device *dev, + struct mbox_controller *mbox); +void devm_mbox_controller_unregister(struct device *dev, + struct mbox_controller *mbox); + #endif /* __MAILBOX_CONTROLLER_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index aee299a6aa76..64c41cf45590 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -154,7 +154,6 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, phys_addr_t *out_end); -void __memblock_free_early(phys_addr_t base, phys_addr_t size); void __memblock_free_late(phys_addr_t base, phys_addr_t size); /** @@ -320,6 +319,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r) /* Flags for memblock allocation APIs */ #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) #define MEMBLOCK_ALLOC_ACCESSIBLE 0 +#define MEMBLOCK_ALLOC_KASAN 1 /* We are using top down, so it is safe to use 0 here */ #define MEMBLOCK_LOW_LIMIT 0 @@ -414,13 +414,13 @@ static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size, static inline void __init memblock_free_early(phys_addr_t base, phys_addr_t size) { - __memblock_free_early(base, size); + memblock_free(base, size); } static inline void __init memblock_free_early_nid(phys_addr_t base, phys_addr_t size, int nid) { - __memblock_free_early(base, size); + memblock_free(base, size); } static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 7ab2120155a4..83ae11cbd12c 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -526,9 +526,11 @@ void mem_cgroup_handle_over_high(void); unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); -void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, +void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p); +void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); + static inline void mem_cgroup_enter_user_fault(void) { WARN_ON(current->in_user_fault); @@ -970,7 +972,12 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) } static inline void -mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) +mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) +{ +} + +static inline void +mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) { } diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index ffd9cd10fcf3..07da5c6c5ba0 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -107,8 +107,8 @@ static inline bool movable_node_is_enabled(void) } #ifdef CONFIG_MEMORY_HOTREMOVE -extern int arch_remove_memory(u64 start, u64 size, - struct vmem_altmap *altmap); +extern int arch_remove_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap); extern int __remove_pages(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); #endif /* CONFIG_MEMORY_HOTREMOVE */ @@ -326,15 +326,14 @@ extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, void *arg, int (*func)(struct memory_block *, void *)); extern int __add_memory(int nid, u64 start, u64 size); extern int add_memory(int nid, u64 start, u64 size); -extern int add_memory_resource(int nid, struct resource *resource, bool online); +extern int add_memory_resource(int nid, struct resource *resource); extern int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, bool want_memblock); extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); -extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern bool is_memblock_offlined(struct memory_block *mem); -extern int sparse_add_one_section(struct pglist_data *pgdat, - unsigned long start_pfn, struct vmem_altmap *altmap); +extern int sparse_add_one_section(int nid, unsigned long start_pfn, + struct vmem_altmap *altmap); extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, unsigned long map_offset, struct vmem_altmap *altmap); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index bac395f1d00a..5228c62af416 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -139,8 +139,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, struct mempolicy *get_task_policy(struct task_struct *p); struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr); -struct mempolicy *get_vma_policy(struct vm_area_struct *vma, - unsigned long addr); bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 0ac69ddf5fc4..f0628660d541 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -4,8 +4,6 @@ #include <linux/ioport.h> #include <linux/percpu-refcount.h> -#include <asm/pgtable.h> - struct resource; struct device; @@ -66,62 +64,34 @@ enum memory_type { }; /* - * For MEMORY_DEVICE_PRIVATE we use ZONE_DEVICE and extend it with two - * callbacks: - * page_fault() - * page_free() - * * Additional notes about MEMORY_DEVICE_PRIVATE may be found in * include/linux/hmm.h and Documentation/vm/hmm.rst. There is also a brief * explanation in include/linux/memory_hotplug.h. * - * The page_fault() callback must migrate page back, from device memory to - * system memory, so that the CPU can access it. This might fail for various - * reasons (device issues, device have been unplugged, ...). When such error - * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and - * set the CPU page table entry to "poisoned". - * - * Note that because memory cgroup charges are transferred to the device memory, - * this should never fail due to memory restrictions. However, allocation - * of a regular system page might still fail because we are out of memory. If - * that happens, the page_fault() callback must return VM_FAULT_OOM. - * - * The page_fault() callback can also try to migrate back multiple pages in one - * chunk, as an optimization. It must, however, prioritize the faulting address - * over all the others. - * - * * The page_free() callback is called once the page refcount reaches 1 * (ZONE_DEVICE pages never reach 0 refcount unless there is a refcount bug. * This allows the device driver to implement its own memory management.) - * - * For MEMORY_DEVICE_PUBLIC only the page_free() callback matter. */ -typedef int (*dev_page_fault_t)(struct vm_area_struct *vma, - unsigned long addr, - const struct page *page, - unsigned int flags, - pmd_t *pmdp); typedef void (*dev_page_free_t)(struct page *page, void *data); /** * struct dev_pagemap - metadata for ZONE_DEVICE mappings - * @page_fault: callback when CPU fault on an unaddressable device page * @page_free: free page callback when page refcount reaches 1 * @altmap: pre-allocated/reserved memory for vmemmap allocations * @res: physical address range covered by @ref * @ref: reference count that pins the devm_memremap_pages() mapping + * @kill: callback to transition @ref to the dead state * @dev: host device of the mapping for debug * @data: private data pointer for page_free() * @type: memory type: see MEMORY_* in memory_hotplug.h */ struct dev_pagemap { - dev_page_fault_t page_fault; dev_page_free_t page_free; struct vmem_altmap altmap; bool altmap_valid; struct resource res; struct percpu_ref *ref; + void (*kill)(struct percpu_ref *ref); struct device *dev; void *data; enum memory_type type; diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index 517e60eecbcb..a353cd22b388 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -35,7 +35,7 @@ enum axp20x_variants { #define AXP152_ALDO_OP_MODE 0x13 #define AXP152_LDO0_CTRL 0x15 #define AXP152_DCDC2_V_OUT 0x23 -#define AXP152_DCDC2_V_SCAL 0x25 +#define AXP152_DCDC2_V_RAMP 0x25 #define AXP152_DCDC1_V_OUT 0x26 #define AXP152_DCDC3_V_OUT 0x27 #define AXP152_ALDO12_V_OUT 0x28 @@ -53,7 +53,7 @@ enum axp20x_variants { #define AXP20X_USB_OTG_STATUS 0x02 #define AXP20X_PWR_OUT_CTRL 0x12 #define AXP20X_DCDC2_V_OUT 0x23 -#define AXP20X_DCDC2_LDO3_V_SCAL 0x25 +#define AXP20X_DCDC2_LDO3_V_RAMP 0x25 #define AXP20X_DCDC3_V_OUT 0x27 #define AXP20X_LDO24_V_OUT 0x28 #define AXP20X_LDO3_V_OUT 0x29 @@ -266,6 +266,7 @@ enum axp20x_variants { #define AXP288_RT_BATT_V_H 0xa0 #define AXP288_RT_BATT_V_L 0xa1 +#define AXP813_ACIN_PATH_CTRL 0x3a #define AXP813_ADC_RATE 0x85 /* Fuel Gauge */ diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h index 6c1ad160ed87..c1b25f5e386d 100644 --- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h @@ -440,6 +440,7 @@ #define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1) #define IMX6SX_GPR12_PCIE_TEST_POWERDOWN BIT(30) +#define IMX6SX_GPR12_PCIE_PM_TURN_OFF BIT(16) #define IMX6SX_GPR12_PCIE_RX_EQ_MASK (0x7 << 0) #define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0) diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 1e70060c92ce..e2687a30e5a1 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -54,12 +54,8 @@ * idle before writing to some registers. */ #define TMIO_MMC_HAS_IDLE_WAIT BIT(4) -/* - * A GPIO is used for card hotplug detection. We need an extra flag for this, - * because 0 is a valid GPIO number too, and requiring users to specify - * cd_gpio < 0 to disable GPIO hotplug would break backwards compatibility. - */ -#define TMIO_MMC_USE_GPIO_CD BIT(5) + +/* BIT(5) is unused */ /* * Some controllers have CMD12 automatically @@ -104,7 +100,6 @@ struct tmio_mmc_data { unsigned long capabilities2; unsigned long flags; u32 ocr_mask; /* available voltages */ - unsigned int cd_gpio; int alignment_shift; dma_addr_t dma_rx_offset; unsigned int max_blk_count; diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h index b19c370fe81a..f346167c0e00 100644 --- a/include/linux/mfd/wm8994/pdata.h +++ b/include/linux/mfd/wm8994/pdata.h @@ -20,9 +20,6 @@ #define WM8994_NUM_AIF 3 struct wm8994_ldo_pdata { - /** GPIOs to enable regulator, 0 or less if not available */ - int enable; - const struct regulator_init_data *init_data; }; diff --git a/include/linux/migrate.h b/include/linux/migrate.h index f2b4abbca55e..e13d9bf2f9a5 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -29,7 +29,7 @@ enum migrate_reason { }; /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ -extern char *migrate_reason_names[MR_TYPES]; +extern const char *migrate_reason_names[MR_TYPES]; static inline struct page *new_page_nodemask(struct page *page, int preferred_nid, nodemask_t *nodemask) @@ -77,8 +77,7 @@ extern void migrate_page_copy(struct page *newpage, struct page *page); extern int migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page); extern int migrate_page_move_mapping(struct address_space *mapping, - struct page *newpage, struct page *page, - struct buffer_head *head, enum migrate_mode mode, + struct page *newpage, struct page *page, enum migrate_mode mode, int extra_count); #else diff --git a/include/linux/mii.h b/include/linux/mii.h index 2da85b02e1c0..6fee8b1a4400 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -209,7 +209,7 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) /** * linkmode_adv_to_mii_ctrl1000_t - * advertising: the linkmode advertisement settings + * @advertising: the linkmode advertisement settings * * A small helper function that translates linkmode advertisement * settings to phy autonegotiation advertisements for the @@ -288,6 +288,25 @@ static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa) } /** + * mii_stat1000_mod_linkmode_lpa_t + * @advertising: target the linkmode advertisement settings + * @adv: value of the MII_STAT1000 register + * + * A small helper function that translates MII_STAT1000 bits, when in + * 1000Base-T mode, to linkmode advertisement settings. Other bits in + * advertising are not changes. + */ +static inline void mii_stat1000_mod_linkmode_lpa_t(unsigned long *advertising, + u32 lpa) +{ + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + advertising, lpa & LPA_1000HALF); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + advertising, lpa & LPA_1000FULL); +} + +/** * ethtool_adv_to_mii_adv_x * @ethadv: the ethtool advertisement settings * @@ -354,50 +373,104 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa) } /** + * mii_adv_mod_linkmode_adv_t + * @advertising:pointer to destination link mode. + * @adv: value of the MII_ADVERTISE register + * + * A small helper function that translates MII_ADVERTISE bits to + * linkmode advertisement settings. Leaves other bits unchanged. + */ +static inline void mii_adv_mod_linkmode_adv_t(unsigned long *advertising, + u32 adv) +{ + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + advertising, adv & ADVERTISE_10HALF); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + advertising, adv & ADVERTISE_10FULL); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + advertising, adv & ADVERTISE_100HALF); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + advertising, adv & ADVERTISE_100FULL); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising, + adv & ADVERTISE_PAUSE_CAP); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + advertising, adv & ADVERTISE_PAUSE_ASYM); +} + +/** * mii_adv_to_linkmode_adv_t * @advertising:pointer to destination link mode. * @adv: value of the MII_ADVERTISE register * * A small helper function that translates MII_ADVERTISE bits - * to linkmode advertisement settings. + * to linkmode advertisement settings. Clears the old value + * of advertising. */ static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising, u32 adv) { linkmode_zero(advertising); - if (adv & ADVERTISE_10HALF) - linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, - advertising); - if (adv & ADVERTISE_10FULL) - linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, - advertising); - if (adv & ADVERTISE_100HALF) - linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, - advertising); - if (adv & ADVERTISE_100FULL) - linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, - advertising); - if (adv & ADVERTISE_PAUSE_CAP) - linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising); - if (adv & ADVERTISE_PAUSE_ASYM) - linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising); + mii_adv_mod_linkmode_adv_t(advertising, adv); +} + +/** + * mii_lpa_to_linkmode_lpa_t + * @adv: value of the MII_LPA register + * + * A small helper function that translates MII_LPA bits, when in + * 1000Base-T mode, to linkmode LP advertisement settings. Clears the + * old value of advertising + */ +static inline void mii_lpa_to_linkmode_lpa_t(unsigned long *lp_advertising, + u32 lpa) +{ + mii_adv_to_linkmode_adv_t(lp_advertising, lpa); + + if (lpa & LPA_LPACK) + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + lp_advertising); + +} + +/** + * mii_lpa_mod_linkmode_lpa_t + * @adv: value of the MII_LPA register + * + * A small helper function that translates MII_LPA bits, when in + * 1000Base-T mode, to linkmode LP advertisement settings. Leaves + * other bits unchanged. + */ +static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising, + u32 lpa) +{ + mii_adv_mod_linkmode_adv_t(lp_advertising, lpa); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + lp_advertising, lpa & LPA_LPACK); } /** - * ethtool_adv_to_lcl_adv_t - * @advertising:pointer to ethtool advertising + * linkmode_adv_to_lcl_adv_t + * @advertising:pointer to linkmode advertising * - * A small helper function that translates ethtool advertising to LVL + * A small helper function that translates linkmode advertising to LVL * pause capabilities. */ -static inline u32 ethtool_adv_to_lcl_adv_t(u32 advertising) +static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising) { u32 lcl_adv = 0; - if (advertising & ADVERTISED_Pause) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + advertising)) lcl_adv |= ADVERTISE_PAUSE_CAP; - if (advertising & ADVERTISED_Asym_Pause) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + advertising)) lcl_adv |= ADVERTISE_PAUSE_ASYM; return lcl_adv; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index dca6ab4eaa99..36e412c3d657 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -226,6 +226,7 @@ enum { MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37, MLX4_DEV_CAP_FLAG2_USER_MAC_EN = 1ULL << 38, MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1ULL << 39, + MLX4_DEV_CAP_FLAG2_SW_CQ_INIT = 1ULL << 40, }; enum { @@ -1136,7 +1137,8 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, - unsigned vector, int collapsed, int timestamp_en); + unsigned int vector, int collapsed, int timestamp_en, + void *buf_addr, bool user_cq); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base, u8 flags, u8 usage); diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 31a750570c38..612c8c2f2466 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -60,7 +60,7 @@ struct mlx5_core_cq { } tasklet_ctx; int reset_notify_added; struct list_head reset_notify; - struct mlx5_eq *eq; + struct mlx5_eq_comp *eq; u16 uid; }; @@ -125,9 +125,9 @@ struct mlx5_cq_modify_params { }; enum { - CQE_SIZE_64 = 0, - CQE_SIZE_128 = 1, - CQE_SIZE_128_PAD = 2, + CQE_STRIDE_64 = 0, + CQE_STRIDE_128 = 1, + CQE_STRIDE_128_PAD = 2, }; #define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1) @@ -135,8 +135,8 @@ enum { static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en) { - return padding_128_en ? CQE_SIZE_128_PAD : - size == 64 ? CQE_SIZE_64 : CQE_SIZE_128; + return padding_128_en ? CQE_STRIDE_128_PAD : + size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128; } static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index b4c0457fbebd..8c4a820bd4c1 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -212,6 +212,13 @@ enum { MLX5_PFAULT_SUBTYPE_RDMA = 1, }; +enum wqe_page_fault_type { + MLX5_WQE_PF_TYPE_RMP = 0, + MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1, + MLX5_WQE_PF_TYPE_RESP = 2, + MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3, +}; + enum { MLX5_PERM_LOCAL_READ = 1 << 2, MLX5_PERM_LOCAL_WRITE = 1 << 3, @@ -294,9 +301,15 @@ enum { MLX5_EVENT_QUEUE_TYPE_DCT = 6, }; +/* mlx5 components can subscribe to any one of these events via + * mlx5_eq_notifier_register API. + */ enum mlx5_event { + /* Special value to subscribe to any event */ + MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0, + /* HW events enum start: comp events are not subscribable */ MLX5_EVENT_TYPE_COMP = 0x0, - + /* HW Async events enum start: subscribable events */ MLX5_EVENT_TYPE_PATH_MIG = 0x01, MLX5_EVENT_TYPE_COMM_EST = 0x02, MLX5_EVENT_TYPE_SQ_DRAINED = 0x03, @@ -317,6 +330,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, + MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24, MLX5_EVENT_TYPE_PPS_EVENT = 0x25, MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, @@ -334,6 +348,8 @@ enum mlx5_event { MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26, + + MLX5_EVENT_TYPE_MAX = MLX5_EVENT_TYPE_DEVICE_TRACER + 1, }; enum { @@ -405,6 +421,7 @@ enum { MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15, MLX5_OPCODE_BIND_MW = 0x18, MLX5_OPCODE_CONFIG_CMD = 0x1f, + MLX5_OPCODE_ENHANCED_MPSW = 0x29, MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, MLX5_RECV_OPCODE_SEND = 0x01, @@ -766,6 +783,11 @@ static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) return (cqe->op_own >> 2) & 0x3; } +static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) +{ + return cqe->op_own >> 4; +} + static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) { return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index aa5963b5d38e..54299251d40d 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -46,10 +46,11 @@ #include <linux/mempool.h> #include <linux/interrupt.h> #include <linux/idr.h> +#include <linux/notifier.h> #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> -#include <linux/mlx5/srq.h> +#include <linux/mlx5/eq.h> #include <linux/timecounter.h> #include <linux/ptp_clock_kernel.h> @@ -85,18 +86,6 @@ enum { }; enum { - MLX5_EQ_VEC_PAGES = 0, - MLX5_EQ_VEC_CMD = 1, - MLX5_EQ_VEC_ASYNC = 2, - MLX5_EQ_VEC_PFAULT = 3, - MLX5_EQ_VEC_COMP_BASE, -}; - -enum { - MLX5_MAX_IRQ_NAME = 32 -}; - -enum { MLX5_ATOMIC_MODE_OFFSET = 16, MLX5_ATOMIC_MODE_IB_COMP = 1, MLX5_ATOMIC_MODE_CX = 2, @@ -205,16 +194,7 @@ struct mlx5_rsc_debug { }; enum mlx5_dev_event { - MLX5_DEV_EVENT_SYS_ERROR, - MLX5_DEV_EVENT_PORT_UP, - MLX5_DEV_EVENT_PORT_DOWN, - MLX5_DEV_EVENT_PORT_INITIALIZED, - MLX5_DEV_EVENT_LID_CHANGE, - MLX5_DEV_EVENT_PKEY_CHANGE, - MLX5_DEV_EVENT_GUID_CHANGE, - MLX5_DEV_EVENT_CLIENT_REREG, - MLX5_DEV_EVENT_PPS, - MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, + MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */ }; enum mlx5_port_status { @@ -222,14 +202,6 @@ enum mlx5_port_status { MLX5_PORT_DOWN = 2, }; -enum mlx5_eq_type { - MLX5_EQ_TYPE_COMP, - MLX5_EQ_TYPE_ASYNC, -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - MLX5_EQ_TYPE_PF, -#endif -}; - struct mlx5_bfreg_info { u32 *sys_pages; int num_low_latency_bfregs; @@ -297,6 +269,8 @@ struct mlx5_cmd_stats { }; struct mlx5_cmd { + struct mlx5_nb nb; + void *cmd_alloc_buf; dma_addr_t alloc_dma; int alloc_size; @@ -366,51 +340,6 @@ struct mlx5_frag_buf_ctrl { u8 log_frag_strides; }; -struct mlx5_eq_tasklet { - struct list_head list; - struct list_head process_list; - struct tasklet_struct task; - /* lock on completion tasklet list */ - spinlock_t lock; -}; - -struct mlx5_eq_pagefault { - struct work_struct work; - /* Pagefaults lock */ - spinlock_t lock; - struct workqueue_struct *wq; - mempool_t *pool; -}; - -struct mlx5_cq_table { - /* protect radix tree */ - spinlock_t lock; - struct radix_tree_root tree; -}; - -struct mlx5_eq { - struct mlx5_core_dev *dev; - struct mlx5_cq_table cq_table; - __be32 __iomem *doorbell; - u32 cons_index; - struct mlx5_frag_buf buf; - int size; - unsigned int irqn; - u8 eqn; - int nent; - u64 mask; - struct list_head list; - int index; - struct mlx5_rsc_debug *dbg; - enum mlx5_eq_type type; - union { - struct mlx5_eq_tasklet tasklet_ctx; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - struct mlx5_eq_pagefault pf_ctx; -#endif - }; -}; - struct mlx5_core_psv { u32 psv_idx; struct psv_layout { @@ -463,36 +392,6 @@ struct mlx5_core_rsc_common { struct completion free; }; -struct mlx5_core_srq { - struct mlx5_core_rsc_common common; /* must be first */ - u32 srqn; - int max; - size_t max_gs; - size_t max_avail_gather; - int wqe_shift; - void (*event) (struct mlx5_core_srq *, enum mlx5_event); - - atomic_t refcount; - struct completion free; - u16 uid; -}; - -struct mlx5_eq_table { - void __iomem *update_ci; - void __iomem *update_arm_ci; - struct list_head comp_eqs_list; - struct mlx5_eq pages_eq; - struct mlx5_eq async_eq; - struct mlx5_eq cmd_eq; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - struct mlx5_eq pfault_eq; -#endif - int num_comp_vectors; - /* protect EQs list - */ - spinlock_t lock; -}; - struct mlx5_uars_page { void __iomem *map; bool wc; @@ -542,13 +441,8 @@ struct mlx5_core_health { }; struct mlx5_qp_table { - /* protect radix tree - */ - spinlock_t lock; - struct radix_tree_root tree; -}; + struct notifier_block nb; -struct mlx5_srq_table { /* protect radix tree */ spinlock_t lock; @@ -575,11 +469,6 @@ struct mlx5_core_sriov { int enabled_vfs; }; -struct mlx5_irq_info { - cpumask_var_t mask; - char name[MLX5_MAX_IRQ_NAME]; -}; - struct mlx5_fc_stats { spinlock_t counters_idr_lock; /* protects counters_idr */ struct idr counters_idr; @@ -593,10 +482,12 @@ struct mlx5_fc_stats { unsigned long sampling_interval; /* jiffies */ }; +struct mlx5_events; struct mlx5_mpfs; struct mlx5_eswitch; struct mlx5_lag; -struct mlx5_pagefault; +struct mlx5_devcom; +struct mlx5_eq_table; struct mlx5_rate_limit { u32 rate; @@ -619,37 +510,12 @@ struct mlx5_rl_table { struct mlx5_rl_entry *rl_entry; }; -enum port_module_event_status_type { - MLX5_MODULE_STATUS_PLUGGED = 0x1, - MLX5_MODULE_STATUS_UNPLUGGED = 0x2, - MLX5_MODULE_STATUS_ERROR = 0x3, - MLX5_MODULE_STATUS_NUM = 0x3, -}; - -enum port_module_event_error_type { - MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED, - MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE, - MLX5_MODULE_EVENT_ERROR_BUS_STUCK, - MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT, - MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST, - MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER, - MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE, - MLX5_MODULE_EVENT_ERROR_BAD_CABLE, - MLX5_MODULE_EVENT_ERROR_UNKNOWN, - MLX5_MODULE_EVENT_ERROR_NUM, -}; - -struct mlx5_port_module_event_stats { - u64 status_counters[MLX5_MODULE_STATUS_NUM]; - u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM]; -}; - struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; - struct mlx5_eq_table eq_table; - struct mlx5_irq_info *irq_info; + struct mlx5_eq_table *eq_table; /* pages stuff */ + struct mlx5_nb pg_nb; struct workqueue_struct *pg_wq; struct rb_root page_root; int fw_pages; @@ -659,8 +525,6 @@ struct mlx5_priv { struct mlx5_core_health health; - struct mlx5_srq_table srq_table; - /* start: qp staff */ struct mlx5_qp_table qp_table; struct dentry *qp_debugfs; @@ -690,28 +554,18 @@ struct mlx5_priv { struct list_head dev_list; struct list_head ctx_list; spinlock_t ctx_lock; - - struct list_head waiting_events_list; - bool is_accum_events; + struct mlx5_events *events; struct mlx5_flow_steering *steering; struct mlx5_mpfs *mpfs; struct mlx5_eswitch *eswitch; struct mlx5_core_sriov sriov; struct mlx5_lag *lag; + struct mlx5_devcom *devcom; unsigned long pci_dev_data; struct mlx5_fc_stats fc_stats; struct mlx5_rl_table rl_table; - struct mlx5_port_module_event_stats pme_stats; - -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - void (*pfault)(struct mlx5_core_dev *dev, - void *context, - struct mlx5_pagefault *pfault); - void *pfault_ctx; - struct srcu_struct pfault_srcu; -#endif struct mlx5_bfreg_data bfregs; struct mlx5_uars_page *uar; }; @@ -736,44 +590,6 @@ enum mlx5_pagefault_type_flags { MLX5_PFAULT_RDMA = 1 << 2, }; -/* Contains the details of a pagefault. */ -struct mlx5_pagefault { - u32 bytes_committed; - u32 token; - u8 event_subtype; - u8 type; - union { - /* Initiator or send message responder pagefault details. */ - struct { - /* Received packet size, only valid for responders. */ - u32 packet_size; - /* - * Number of resource holding WQE, depends on type. - */ - u32 wq_num; - /* - * WQE index. Refers to either the send queue or - * receive queue, according to event_subtype. - */ - u16 wqe_index; - } wqe; - /* RDMA responder pagefault details */ - struct { - u32 r_key; - /* - * Received packet size, minimal size page fault - * resolution required for forward progress. - */ - u32 packet_size; - u32 rdma_op_len; - u64 rdma_va; - } rdma; - }; - - struct mlx5_eq *eq; - struct work_struct work; -}; - struct mlx5_td { struct list_head tirs_list; u32 tdn; @@ -803,6 +619,8 @@ struct mlx5_pps { }; struct mlx5_clock { + struct mlx5_core_dev *mdev; + struct mlx5_nb pps_nb; seqlock_t lock; struct cyclecounter cycles; struct timecounter tc; @@ -810,7 +628,6 @@ struct mlx5_clock { u32 nominal_c_mult; unsigned long overflow_period; struct delayed_work overflow_work; - struct mlx5_core_dev *mdev; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; struct mlx5_pps pps_info; @@ -843,9 +660,6 @@ struct mlx5_core_dev { /* sync interface state */ struct mutex intf_state_mutex; unsigned long intf_state; - void (*event) (struct mlx5_core_dev *dev, - enum mlx5_dev_event event, - unsigned long param); struct mlx5_priv priv; struct mlx5_profile *profile; atomic_t num_qps; @@ -859,9 +673,6 @@ struct mlx5_core_dev { #ifdef CONFIG_MLX5_FPGA struct mlx5_fpga_device *fpga; #endif -#ifdef CONFIG_RFS_ACCEL - struct cpu_rmap *rmap; -#endif struct mlx5_clock clock; struct mlx5_ib_clock_info *clock_info; struct page *clock_info_page; @@ -940,8 +751,8 @@ struct mlx5_hca_vport_context { u64 node_guid; u32 cap_mask1; u32 cap_mask1_perm; - u32 cap_mask2; - u32 cap_mask2_perm; + u16 cap_mask2; + u16 cap_mask2_perm; u16 lid; u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */ u8 lmc; @@ -1070,13 +881,6 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, gfp_t flags, int npages); void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *head); -int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_srq_attr *in); -int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); -int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_srq_attr *out); -int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - u16 lwm, int is_srq); void mlx5_init_mkey_table(struct mlx5_core_dev *dev); void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev); int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, @@ -1095,9 +899,9 @@ int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, u16 opmod, u8 port); -void mlx5_pagealloc_init(struct mlx5_core_dev *dev); +int mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); -int mlx5_pagealloc_start(struct mlx5_core_dev *dev); +void mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages); @@ -1108,9 +912,6 @@ void mlx5_unregister_debugfs(void); void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); -void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); -void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); -struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, unsigned int *irqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); @@ -1155,6 +956,9 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, bool map_wc, bool fast_path); void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); +unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev); +struct cpumask * +mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector); unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, u8 roce_version, u8 roce_l3_type, const u8 *gid, @@ -1202,23 +1006,21 @@ struct mlx5_interface { void (*remove)(struct mlx5_core_dev *dev, void *context); int (*attach)(struct mlx5_core_dev *dev, void *context); void (*detach)(struct mlx5_core_dev *dev, void *context); - void (*event)(struct mlx5_core_dev *dev, void *context, - enum mlx5_dev_event event, unsigned long param); - void (*pfault)(struct mlx5_core_dev *dev, - void *context, - struct mlx5_pagefault *pfault); - void * (*get_dev)(void *context); int protocol; struct list_head list; }; -void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); +int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); +int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); + int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); +bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); +bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, @@ -1306,10 +1108,4 @@ enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; -static inline const struct cpumask * -mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector) -{ - return dev->priv.irq_info[vector].mask; -} - #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h new file mode 100644 index 000000000000..00045cc4ea11 --- /dev/null +++ b/include/linux/mlx5/eq.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2018 Mellanox Technologies. */ + +#ifndef MLX5_CORE_EQ_H +#define MLX5_CORE_EQ_H + +enum { + MLX5_EQ_PAGEREQ_IDX = 0, + MLX5_EQ_CMD_IDX = 1, + MLX5_EQ_ASYNC_IDX = 2, + /* reserved to be used by mlx5_core ulps (mlx5e/mlx5_ib) */ + MLX5_EQ_PFAULT_IDX = 3, + MLX5_EQ_MAX_ASYNC_EQS, + /* completion eqs vector indices start here */ + MLX5_EQ_VEC_COMP_BASE = MLX5_EQ_MAX_ASYNC_EQS, +}; + +#define MLX5_NUM_CMD_EQE (32) +#define MLX5_NUM_ASYNC_EQE (0x1000) +#define MLX5_NUM_SPARE_EQE (0x80) + +struct mlx5_eq; +struct mlx5_core_dev; + +struct mlx5_eq_param { + u8 index; + int nent; + u64 mask; + void *context; + irq_handler_t handler; +}; + +struct mlx5_eq * +mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name, + struct mlx5_eq_param *param); +int +mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq); + +struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc); +void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm); + +/* The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create EQs with MLX5_NUM_SPARE_EQE extra entries, + * so we must update our consumer index at + * least that often. + * + * mlx5_eq_update_cc must be called on every EQE @EQ irq handler + */ +static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc) +{ + if (unlikely(cc >= MLX5_NUM_SPARE_EQE)) { + mlx5_eq_update_ci(eq, cc, 0); + cc = 0; + } + return cc; +} + +struct mlx5_nb { + struct notifier_block nb; + u8 event_type; +}; + +#define mlx5_nb_cof(ptr, type, member) \ + (container_of(container_of(ptr, struct mlx5_nb, nb), type, member)) + +#define MLX5_NB_INIT(name, handler, event) do { \ + (name)->nb.notifier_call = handler; \ + (name)->event_type = MLX5_EVENT_TYPE_##event; \ +} while (0) + +#endif /* MLX5_CORE_EQ_H */ diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 5660f07d3be0..9df51da04621 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -86,6 +86,11 @@ struct mlx5_flow_spec { u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; }; +enum { + MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0), + MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1), +}; + struct mlx5_flow_destination { enum mlx5_flow_destination_type type; union { @@ -96,7 +101,8 @@ struct mlx5_flow_destination { struct { u16 num; u16 vhca_id; - bool vhca_id_valid; + u32 reformat_id; + u8 flags; } vport; }; }; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index dbff9ff28f2c..35fe5217b244 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -76,13 +76,7 @@ enum { }; enum { - MLX5_GENERAL_OBJ_TYPES_CAP_UCTX = (1ULL << 4), - MLX5_GENERAL_OBJ_TYPES_CAP_UMEM = (1ULL << 5), -}; - -enum { - MLX5_OBJ_TYPE_UCTX = 0x0004, - MLX5_OBJ_TYPE_UMEM = 0x0005, + MLX5_SHARED_RESOURCE_UID = 0xffff, }; enum { @@ -144,6 +138,9 @@ enum { MLX5_CMD_OP_DESTROY_XRQ = 0x718, MLX5_CMD_OP_QUERY_XRQ = 0x719, MLX5_CMD_OP_ARM_XRQ = 0x71a, + MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725, + MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726, + MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727, MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, @@ -161,6 +158,8 @@ enum { MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, + MLX5_CMD_OP_SET_MONITOR_COUNTER = 0x774, + MLX5_CMD_OP_ARM_MONITOR_COUNTER = 0x775, MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780, MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, @@ -245,6 +244,7 @@ enum { MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d, MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e, + MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT = 0x93f, MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940, MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941, MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942, @@ -257,9 +257,19 @@ enum { MLX5_CMD_OP_MODIFY_GENERAL_OBJECT = 0xa01, MLX5_CMD_OP_QUERY_GENERAL_OBJECT = 0xa02, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT = 0xa03, + MLX5_CMD_OP_CREATE_UCTX = 0xa04, + MLX5_CMD_OP_DESTROY_UCTX = 0xa06, + MLX5_CMD_OP_CREATE_UMEM = 0xa08, + MLX5_CMD_OP_DESTROY_UMEM = 0xa0a, MLX5_CMD_OP_MAX }; +/* Valid range for general commands that don't work over an object */ +enum { + MLX5_CMD_OP_GENERAL_START = 0xb00, + MLX5_CMD_OP_GENERAL_END = 0xd00, +}; + struct mlx5_ifc_flow_table_fields_supported_bits { u8 outer_dmac[0x1]; u8 outer_smac[0x1]; @@ -349,7 +359,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reformat_l3_tunnel_to_l2[0x1]; u8 reformat_l2_to_l3_tunnel[0x1]; u8 reformat_and_modify_action[0x1]; - u8 reserved_at_14[0xb]; + u8 reserved_at_15[0xb]; u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; u8 log_max_modify_header_context[0x8]; @@ -421,6 +431,16 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; }; +struct mlx5_ifc_nvgre_key_bits { + u8 hi[0x18]; + u8 lo[0x8]; +}; + +union mlx5_ifc_gre_key_bits { + struct mlx5_ifc_nvgre_key_bits nvgre; + u8 key[0x20]; +}; + struct mlx5_ifc_fte_match_set_misc_bits { u8 reserved_at_0[0x8]; u8 source_sqn[0x18]; @@ -442,8 +462,7 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 reserved_at_64[0xc]; u8 gre_protocol[0x10]; - u8 gre_key_h[0x18]; - u8 gre_key_l[0x8]; + union mlx5_ifc_gre_key_bits gre_key; u8 vxlan_vni[0x18]; u8 reserved_at_b8[0x8]; @@ -582,11 +601,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits { }; struct mlx5_ifc_flow_table_eswitch_cap_bits { - u8 reserved_at_0[0x1c]; - u8 fdb_multi_path_to_table[0x1]; - u8 reserved_at_1d[0x1]; + u8 reserved_at_0[0x1a]; u8 multi_fdb_encap[0x1]; - u8 reserved_at_1e[0x1e1]; + u8 reserved_at_1b[0x1]; + u8 fdb_multi_path_to_table[0x1]; + u8 reserved_at_1d[0x3]; + + u8 reserved_at_20[0x1e0]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; @@ -597,20 +618,28 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits { u8 reserved_at_800[0x7800]; }; +enum { + MLX5_COUNTER_SOURCE_ESWITCH = 0x0, + MLX5_COUNTER_FLOW_ESWITCH = 0x1, +}; + struct mlx5_ifc_e_switch_cap_bits { u8 vport_svlan_strip[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_overwrite[0x1]; - u8 reserved_at_5[0x18]; + u8 reserved_at_5[0x17]; + u8 counter_eswitch_affinity[0x1]; u8 merged_eswitch[0x1]; u8 nic_vport_node_guid_modify[0x1]; u8 nic_vport_port_guid_modify[0x1]; u8 vxlan_encap_decap[0x1]; u8 nvgre_encap_decap[0x1]; - u8 reserved_at_22[0x9]; + u8 reserved_at_22[0x1]; + u8 log_max_fdb_encap_uplink[0x5]; + u8 reserved_at_21[0x3]; u8 log_max_packet_reformat_context[0x5]; u8 reserved_2b[0x6]; u8 max_encap_header_size[0xa]; @@ -829,7 +858,7 @@ struct mlx5_ifc_vector_calc_cap_bits { struct mlx5_ifc_calc_op calc2; struct mlx5_ifc_calc_op calc3; - u8 reserved_at_e0[0x720]; + u8 reserved_at_c0[0x720]; }; enum { @@ -883,6 +912,10 @@ enum { MLX5_CAP_UMR_FENCE_NONE = 0x2, }; +enum { + MLX5_UCTX_CAP_RAW_TX = 1UL << 0, +}; + struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x30]; u8 vhca_id[0x10]; @@ -1043,7 +1076,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 vector_calc[0x1]; u8 umr_ptr_rlky[0x1]; u8 imaicl[0x1]; - u8 reserved_at_232[0x4]; + u8 qp_packet_based[0x1]; + u8 reserved_at_233[0x3]; u8 qkv[0x1]; u8 pkv[0x1]; u8 set_deth_sqpn[0x1]; @@ -1153,7 +1187,10 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_440[0x20]; - u8 reserved_at_460[0x10]; + u8 reserved_at_460[0x3]; + u8 log_max_uctx[0x5]; + u8 reserved_at_468[0x3]; + u8 log_max_umem[0x5]; u8 max_num_eqs[0x10]; u8 reserved_at_480[0x3]; @@ -1193,7 +1230,19 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 num_vhca_ports[0x8]; u8 reserved_at_618[0x6]; u8 sw_owner_id[0x1]; - u8 reserved_at_61f[0x1e1]; + u8 reserved_at_61f[0x1]; + + u8 max_num_of_monitor_counters[0x10]; + u8 num_ppcnt_monitor_counters[0x10]; + + u8 reserved_at_640[0x10]; + u8 num_q_monitor_counters[0x10]; + + u8 reserved_at_660[0x40]; + + u8 uctx_cap[0x20]; + + u8 reserved_at_6c0[0x140]; }; enum mlx5_flow_destination_type { @@ -1209,8 +1258,10 @@ enum mlx5_flow_destination_type { struct mlx5_ifc_dest_format_struct_bits { u8 destination_type[0x8]; u8 destination_id[0x18]; + u8 destination_eswitch_owner_vhca_id_valid[0x1]; - u8 reserved_at_21[0xf]; + u8 packet_reformat[0x1]; + u8 reserved_at_22[0xe]; u8 destination_eswitch_owner_vhca_id[0x10]; }; @@ -1220,6 +1271,14 @@ struct mlx5_ifc_flow_counter_list_bits { u8 reserved_at_20[0x20]; }; +struct mlx5_ifc_extended_dest_format_bits { + struct mlx5_ifc_dest_format_struct_bits destination_entry; + + u8 packet_reformat_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits { struct mlx5_ifc_dest_format_struct_bits dest_format_struct; struct mlx5_ifc_flow_counter_list_bits flow_counter_list; @@ -2249,7 +2308,8 @@ struct mlx5_ifc_qpc_bits { u8 st[0x8]; u8 reserved_at_10[0x3]; u8 pm_state[0x2]; - u8 reserved_at_15[0x3]; + u8 reserved_at_15[0x1]; + u8 req_e2e_credit_mode[0x2]; u8 offload_type[0x4]; u8 end_padding_mode[0x2]; u8 reserved_at_1e[0x2]; @@ -2440,7 +2500,8 @@ struct mlx5_ifc_flow_context_bits { u8 reserved_at_60[0x10]; u8 action[0x10]; - u8 reserved_at_80[0x8]; + u8 extended_destination[0x1]; + u8 reserved_at_80[0x7]; u8 destination_list_size[0x18]; u8 reserved_at_a0[0x8]; @@ -2473,14 +2534,15 @@ struct mlx5_ifc_xrc_srqc_bits { u8 wq_signature[0x1]; u8 cont_srq[0x1]; - u8 dbr_umem_valid[0x1]; + u8 reserved_at_22[0x1]; u8 rlky[0x1]; u8 basic_cyclic_rcv_wqe[0x1]; u8 log_rq_stride[0x3]; u8 xrcd[0x18]; u8 page_offset[0x6]; - u8 reserved_at_46[0x2]; + u8 reserved_at_46[0x1]; + u8 dbr_umem_valid[0x1]; u8 cqn[0x18]; u8 reserved_at_60[0x20]; @@ -3795,6 +3857,83 @@ enum { MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1, }; +struct mlx5_ifc_arm_monitor_counter_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_arm_monitor_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT = 0x0, + MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER = 0x1, +}; + +enum mlx5_monitor_counter_ppcnt { + MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS = 0x0, + MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD = 0x1, + MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS = 0x2, + MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0x3, + MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS = 0x4, + MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS = 0x5, +}; + +enum { + MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER = 0x4, +}; + +struct mlx5_ifc_monitor_counter_output_bits { + u8 reserved_at_0[0x4]; + u8 type[0x4]; + u8 reserved_at_8[0x8]; + u8 counter[0x10]; + + u8 counter_group_id[0x20]; +}; + +#define MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 (6) +#define MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1 (1) +#define MLX5_CMD_SET_MONITOR_NUM_COUNTER (MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 +\ + MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1) + +struct mlx5_ifc_set_monitor_counter_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 num_of_counters[0x10]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_monitor_counter_output_bits monitor_counter[MLX5_CMD_SET_MONITOR_NUM_COUNTER]; +}; + +struct mlx5_ifc_set_monitor_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_query_vport_state_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -4660,7 +4799,7 @@ enum { MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, - MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0X3, + MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3, }; struct mlx5_ifc_query_flow_group_out_bits { @@ -5566,7 +5705,7 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits { struct mlx5_ifc_modify_nic_vport_field_select_bits { u8 reserved_at_0[0x12]; u8 affiliation[0x1]; - u8 reserved_at_e[0x1]; + u8 reserved_at_13[0x1]; u8 disable_uc_local_lb[0x1]; u8 disable_mc_local_lb[0x1]; u8 node_guid[0x1]; @@ -6566,7 +6705,7 @@ struct mlx5_ifc_dealloc_transport_domain_out_bits { struct mlx5_ifc_dealloc_transport_domain_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -6689,9 +6828,12 @@ struct mlx5_ifc_create_xrc_srq_in_bits { struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; - u8 reserved_at_280[0x40]; + u8 reserved_at_280[0x60]; + u8 xrc_srq_umem_valid[0x1]; - u8 reserved_at_2c1[0x5bf]; + u8 reserved_at_2e1[0x1f]; + + u8 reserved_at_300[0x580]; u8 pas[0][0x40]; }; @@ -7416,7 +7558,7 @@ struct mlx5_ifc_alloc_transport_domain_out_bits { struct mlx5_ifc_alloc_transport_domain_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7438,7 +7580,7 @@ struct mlx5_ifc_alloc_q_counter_out_bits { struct mlx5_ifc_alloc_q_counter_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -8160,7 +8302,9 @@ struct mlx5_ifc_pcam_regs_5000_to_507f_bits { u8 port_access_reg_cap_mask_31_to_13[0x13]; u8 pbmc[0x1]; u8 pptb[0x1]; - u8 port_access_reg_cap_mask_10_to_0[0xb]; + u8 port_access_reg_cap_mask_10_to_09[0x2]; + u8 ppcnt[0x1]; + u8 port_access_reg_cap_mask_07_to_00[0x8]; }; struct mlx5_ifc_pcam_reg_bits { @@ -9024,7 +9168,7 @@ struct mlx5_ifc_dcbx_param_bits { u8 dcbx_cee_cap[0x1]; u8 dcbx_ieee_cap[0x1]; u8 dcbx_standby_cap[0x1]; - u8 reserved_at_0[0x5]; + u8 reserved_at_3[0x5]; u8 port_number[0x8]; u8 reserved_at_10[0xa]; u8 max_application_table_size[6]; @@ -9257,9 +9401,9 @@ struct mlx5_ifc_general_obj_out_cmd_hdr_bits { }; struct mlx5_ifc_umem_bits { - u8 modify_field_select[0x40]; + u8 reserved_at_0[0x80]; - u8 reserved_at_40[0x5b]; + u8 reserved_at_80[0x1b]; u8 log_page_size[0x5]; u8 page_offset[0x20]; @@ -9270,19 +9414,46 @@ struct mlx5_ifc_umem_bits { }; struct mlx5_ifc_uctx_bits { - u8 modify_field_select[0x40]; + u8 cap[0x20]; - u8 reserved_at_40[0x1c0]; + u8 reserved_at_20[0x160]; }; struct mlx5_ifc_create_umem_in_bits { - struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; - struct mlx5_ifc_umem_bits umem; + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_umem_bits umem; }; struct mlx5_ifc_create_uctx_in_bits { - struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; - struct mlx5_ifc_uctx_bits uctx; + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + struct mlx5_ifc_uctx_bits uctx; +}; + +struct mlx5_ifc_destroy_uctx_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 uid[0x10]; + + u8 reserved_at_60[0x20]; }; struct mlx5_ifc_mtrc_string_db_param_bits { diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 34aed6032f86..bf4bc01ffb0c 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -107,9 +107,6 @@ enum mlx5e_connector_type { #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) -#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF -#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF - int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port); diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index fbe322c966bc..b26ea9077384 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -596,6 +596,11 @@ int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id); int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, int reset, void *out, int out_size); +struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev, + int res_num, + enum mlx5_res_type res_type); +void mlx5_core_res_put(struct mlx5_core_rsc_common *res); + static inline const char *mlx5_qp_type_str(int type) { switch (type) { diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h deleted file mode 100644 index 1b1f3c20c6a3..000000000000 --- a/include/linux/mlx5/srq.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef MLX5_SRQ_H -#define MLX5_SRQ_H - -#include <linux/mlx5/driver.h> - -enum { - MLX5_SRQ_FLAG_ERR = (1 << 0), - MLX5_SRQ_FLAG_WQ_SIG = (1 << 1), - MLX5_SRQ_FLAG_RNDV = (1 << 2), -}; - -struct mlx5_srq_attr { - u32 type; - u32 flags; - u32 log_size; - u32 wqe_shift; - u32 log_page_size; - u32 wqe_cnt; - u32 srqn; - u32 xrcd; - u32 page_offset; - u32 cqn; - u32 pd; - u32 lwm; - u32 user_index; - u64 db_record; - __be64 *pas; - u32 tm_log_list_size; - u32 tm_next_tag; - u32 tm_hw_phase_cnt; - u32 tm_sw_phase_cnt; - u16 uid; -}; - -struct mlx5_core_dev; - -void mlx5_init_srq_table(struct mlx5_core_dev *dev); -void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev); - -#endif /* MLX5_SRQ_H */ diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index 7f5ca2cd3a32..a261d5528ff7 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -58,17 +58,6 @@ int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in, int inlen); void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); -int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *rmpn); -int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen); -int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn); -int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); -int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); -int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *rmpn); -int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn); -int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); - int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqtn); int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, diff --git a/include/linux/mm.h b/include/linux/mm.h index fcf9cc9d535f..80bb6408fe73 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -48,7 +48,32 @@ static inline void set_max_mapnr(unsigned long limit) static inline void set_max_mapnr(unsigned long limit) { } #endif -extern unsigned long totalram_pages; +extern atomic_long_t _totalram_pages; +static inline unsigned long totalram_pages(void) +{ + return (unsigned long)atomic_long_read(&_totalram_pages); +} + +static inline void totalram_pages_inc(void) +{ + atomic_long_inc(&_totalram_pages); +} + +static inline void totalram_pages_dec(void) +{ + atomic_long_dec(&_totalram_pages); +} + +static inline void totalram_pages_add(long count) +{ + atomic_long_add(count, &_totalram_pages); +} + +static inline void totalram_pages_set(long val) +{ + atomic_long_set(&_totalram_pages, val); +} + extern void * high_memory; extern int page_cluster; @@ -146,6 +171,8 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) +#define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) + /* * Linux kernel virtual memory manager primitives. * The idea being to have a "virtual" mm in the same way @@ -804,6 +831,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) +#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) /* * Define the bit shifts to access each section. For non-existent @@ -814,6 +842,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) +#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ #ifdef NODE_NOT_IN_PAGE_FLAGS @@ -836,6 +865,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #define NODES_MASK ((1UL << NODES_WIDTH) - 1) #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) +#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) static inline enum zone_type page_zonenum(const struct page *page) @@ -1101,6 +1131,32 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) } #endif /* CONFIG_NUMA_BALANCING */ +#ifdef CONFIG_KASAN_SW_TAGS +static inline u8 page_kasan_tag(const struct page *page) +{ + return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; +} + +static inline void page_kasan_tag_set(struct page *page, u8 tag) +{ + page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT); + page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT; +} + +static inline void page_kasan_tag_reset(struct page *page) +{ + page_kasan_tag_set(page, 0xff); +} +#else +static inline u8 page_kasan_tag(const struct page *page) +{ + return 0xff; +} + +static inline void page_kasan_tag_set(struct page *page, u8 tag) { } +static inline void page_kasan_tag_reset(struct page *page) { } +#endif + static inline struct zone *page_zone(const struct page *page) { return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; @@ -1397,6 +1453,8 @@ struct mm_walk { void *private; }; +struct mmu_notifier_range; + int walk_page_range(unsigned long addr, unsigned long end, struct mm_walk *walk); int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk); @@ -1405,8 +1463,8 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); int follow_pte_pmd(struct mm_struct *mm, unsigned long address, - unsigned long *start, unsigned long *end, - pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); + struct mmu_notifier_range *range, + pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn); int follow_phys(struct vm_area_struct *vma, unsigned long address, @@ -1744,11 +1802,15 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); static inline void mm_inc_nr_puds(struct mm_struct *mm) { + if (mm_pud_folded(mm)) + return; atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); } static inline void mm_dec_nr_puds(struct mm_struct *mm) { + if (mm_pud_folded(mm)) + return; atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); } #endif @@ -1768,11 +1830,15 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); static inline void mm_inc_nr_pmds(struct mm_struct *mm) { + if (mm_pmd_folded(mm)) + return; atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); } static inline void mm_dec_nr_pmds(struct mm_struct *mm) { + if (mm_pmd_folded(mm)) + return; atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); } #endif @@ -1809,8 +1875,8 @@ static inline void mm_inc_nr_ptes(struct mm_struct *mm) {} static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} #endif -int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); -int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); +int __pte_alloc(struct mm_struct *mm, pmd_t *pmd); +int __pte_alloc_kernel(pmd_t *pmd); /* * The following ifdef needed to get the 4level-fixup.h header to work. @@ -1892,13 +1958,6 @@ static inline bool ptlock_init(struct page *page) return true; } -/* Reset page->mapping so free_pages_check won't complain. */ -static inline void pte_lock_deinit(struct page *page) -{ - page->mapping = NULL; - ptlock_free(page); -} - #else /* !USE_SPLIT_PTE_PTLOCKS */ /* * We use mm->page_table_lock to guard all pagetable pages of the mm. @@ -1909,7 +1968,7 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) } static inline void ptlock_cache_init(void) {} static inline bool ptlock_init(struct page *page) { return true; } -static inline void pte_lock_deinit(struct page *page) {} +static inline void ptlock_free(struct page *page) {} #endif /* USE_SPLIT_PTE_PTLOCKS */ static inline void pgtable_init(void) @@ -1929,7 +1988,7 @@ static inline bool pgtable_page_ctor(struct page *page) static inline void pgtable_page_dtor(struct page *page) { - pte_lock_deinit(page); + ptlock_free(page); __ClearPageTable(page); dec_zone_page_state(page, NR_PAGETABLE); } @@ -1948,18 +2007,17 @@ static inline void pgtable_page_dtor(struct page *page) pte_unmap(pte); \ } while (0) -#define pte_alloc(mm, pmd, address) \ - (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address)) +#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd)) #define pte_alloc_map(mm, pmd, address) \ - (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address)) + (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address)) #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ - (pte_alloc(mm, pmd, address) ? \ + (pte_alloc(mm, pmd) ? \ NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) #define pte_alloc_kernel(pmd, address) \ - ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ + ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ NULL: pte_offset_kernel(pmd, address)) #if USE_SPLIT_PMD_PTLOCKS @@ -2046,7 +2104,7 @@ extern void free_initmem(void); * Return pages freed into the buddy system. */ extern unsigned long free_reserved_area(void *start, void *end, - int poison, char *s); + int poison, const char *s); #ifdef CONFIG_HIGHMEM /* @@ -2194,6 +2252,7 @@ extern void zone_pcp_reset(struct zone *zone); /* page_alloc.c */ extern int min_free_kbytes; +extern int watermark_boost_factor; extern int watermark_scale_factor; /* nommu.c */ diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 10191c28fc04..04ec454d44ce 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -124,7 +124,4 @@ static __always_inline enum lru_list page_lru(struct page *page) } return lru; } - -#define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) - #endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5ed8f6292a53..2c471a2c43fa 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -206,6 +206,11 @@ struct page { #endif } _struct_page_alignment; +/* + * Used for sizing the vmemmap region on some architectures + */ +#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page))) + #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 2a5fe75dd082..4d35ff36ceff 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -147,6 +147,9 @@ struct mmc_host_ops { /* Prepare HS400 target operating frequency depending host driver */ int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios); + /* Prepare switch to DDR during the HS400 init sequence */ + int (*hs400_prepare_ddr)(struct mmc_host *host); + /* Prepare for switching from HS400 to HS200 */ void (*hs400_downgrade)(struct mmc_host *host); @@ -331,7 +334,7 @@ struct mmc_host { #define MMC_CAP_UHS (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | \ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \ MMC_CAP_UHS_DDR50) -/* (1 << 21) is free for reuse */ +#define MMC_CAP_SYNC_RUNTIME_PM (1 << 21) /* Synced runtime PM suspends. */ #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 4224902a8e22..4332199c71c2 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -42,6 +42,7 @@ #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 #define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 #define SDIO_DEVICE_ID_CYPRESS_4373 0x4373 +#define SDIO_DEVICE_ID_CYPRESS_43012 43012 #define SDIO_VENDOR_ID_INTEL 0x0089 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index 06607c59c4d0..feebd7aa6f5c 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h @@ -17,12 +17,7 @@ struct mmc_host; int mmc_gpio_get_ro(struct mmc_host *host); -int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio); - int mmc_gpio_get_cd(struct mmc_host *host); -int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio, - unsigned int debounce); - int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int idx, bool override_active_level, unsigned int debounce, bool *gpio_invert); diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 9893a6432adf..4050ec1c3b45 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -25,6 +25,13 @@ struct mmu_notifier_mm { spinlock_t lock; }; +struct mmu_notifier_range { + struct mm_struct *mm; + unsigned long start; + unsigned long end; + bool blockable; +}; + struct mmu_notifier_ops { /* * Called either by mmu_notifier_unregister or when the mm is @@ -146,12 +153,9 @@ struct mmu_notifier_ops { * */ int (*invalidate_range_start)(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long start, unsigned long end, - bool blockable); + const struct mmu_notifier_range *range); void (*invalidate_range_end)(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long start, unsigned long end); + const struct mmu_notifier_range *range); /* * invalidate_range() is either called between @@ -216,11 +220,8 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm, unsigned long address); extern void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, pte_t pte); -extern int __mmu_notifier_invalidate_range_start(struct mm_struct *mm, - unsigned long start, unsigned long end, - bool blockable); -extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, - unsigned long start, unsigned long end, +extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r); +extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r, bool only_end); extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, unsigned long start, unsigned long end); @@ -264,33 +265,37 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, __mmu_notifier_change_pte(mm, address, pte); } -static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, - unsigned long start, unsigned long end) +static inline void +mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) { - if (mm_has_notifiers(mm)) - __mmu_notifier_invalidate_range_start(mm, start, end, true); + if (mm_has_notifiers(range->mm)) { + range->blockable = true; + __mmu_notifier_invalidate_range_start(range); + } } -static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm, - unsigned long start, unsigned long end) +static inline int +mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range) { - if (mm_has_notifiers(mm)) - return __mmu_notifier_invalidate_range_start(mm, start, end, false); + if (mm_has_notifiers(range->mm)) { + range->blockable = false; + return __mmu_notifier_invalidate_range_start(range); + } return 0; } -static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, - unsigned long start, unsigned long end) +static inline void +mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) { - if (mm_has_notifiers(mm)) - __mmu_notifier_invalidate_range_end(mm, start, end, false); + if (mm_has_notifiers(range->mm)) + __mmu_notifier_invalidate_range_end(range, false); } -static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm, - unsigned long start, unsigned long end) +static inline void +mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range) { - if (mm_has_notifiers(mm)) - __mmu_notifier_invalidate_range_end(mm, start, end, true); + if (mm_has_notifiers(range->mm)) + __mmu_notifier_invalidate_range_end(range, true); } static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, @@ -311,6 +316,17 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) __mmu_notifier_mm_destroy(mm); } + +static inline void mmu_notifier_range_init(struct mmu_notifier_range *range, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + range->mm = mm; + range->start = start; + range->end = end; +} + #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ ({ \ int __young; \ @@ -420,10 +436,26 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) extern void mmu_notifier_call_srcu(struct rcu_head *rcu, void (*func)(struct rcu_head *rcu)); -extern void mmu_notifier_synchronize(void); #else /* CONFIG_MMU_NOTIFIER */ +struct mmu_notifier_range { + unsigned long start; + unsigned long end; +}; + +static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range, + unsigned long start, + unsigned long end) +{ + range->start = start; + range->end = end; +} + +#define mmu_notifier_range_init(range, mm, start, end) \ + _mmu_notifier_range_init(range, start, end) + + static inline int mm_has_notifiers(struct mm_struct *mm) { return 0; @@ -451,24 +483,24 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm, { } -static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm, - unsigned long start, unsigned long end) +static inline void +mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) { } -static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm, - unsigned long start, unsigned long end) +static inline int +mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range) { return 0; } -static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm, - unsigned long start, unsigned long end) +static inline +void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) { } -static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm, - unsigned long start, unsigned long end) +static inline void +mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range) { } diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 847705a6d0ec..cc4a507d7ca4 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -65,7 +65,7 @@ enum migratetype { }; /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ -extern char * const migratetype_names[MIGRATE_TYPES]; +extern const char * const migratetype_names[MIGRATE_TYPES]; #ifdef CONFIG_CMA # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) @@ -269,9 +269,10 @@ enum zone_watermarks { NR_WMARK }; -#define min_wmark_pages(z) (z->watermark[WMARK_MIN]) -#define low_wmark_pages(z) (z->watermark[WMARK_LOW]) -#define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) +#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) +#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) +#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) +#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) struct per_cpu_pages { int count; /* number of pages in the list */ @@ -314,7 +315,7 @@ enum zone_type { * Architecture Limit * --------------------------- * parisc, ia64, sparc <4G - * s390 <2G + * s390, powerpc <2G * arm Various * alpha Unlimited or 0-16MB. * @@ -362,7 +363,8 @@ struct zone { /* Read-mostly fields */ /* zone watermarks, access with *_wmark_pages(zone) macros */ - unsigned long watermark[NR_WMARK]; + unsigned long _watermark[NR_WMARK]; + unsigned long watermark_boost; unsigned long nr_reserved_highatomic; @@ -428,14 +430,8 @@ struct zone { * Write access to present_pages at runtime should be protected by * mem_hotplug_begin/end(). Any reader who can't tolerant drift of * present_pages should get_online_mems() to get a stable value. - * - * Read access to managed_pages should be safe because it's unsigned - * long. Write access to zone->managed_pages and totalram_pages are - * protected by managed_page_count_lock at runtime. Idealy only - * adjust_managed_page_count() should be used instead of directly - * touching zone->managed_pages and totalram_pages. */ - unsigned long managed_pages; + atomic_long_t managed_pages; unsigned long spanned_pages; unsigned long present_pages; @@ -524,6 +520,11 @@ enum pgdat_flags { PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ }; +static inline unsigned long zone_managed_pages(struct zone *zone) +{ + return (unsigned long)atomic_long_read(&zone->managed_pages); +} + static inline unsigned long zone_end_pfn(const struct zone *zone) { return zone->zone_start_pfn + zone->spanned_pages; @@ -635,9 +636,8 @@ typedef struct pglist_data { #endif #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) /* - * Must be held any time you expect node_start_pfn, node_present_pages - * or node_spanned_pages stay constant. Holding this will also - * guarantee that any pfn_valid() stays that way. + * Must be held any time you expect node_start_pfn, + * node_present_pages, node_spanned_pages or nr_zones to stay constant. * * pgdat_resize_lock() and pgdat_resize_unlock() are provided to * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG @@ -691,8 +691,6 @@ typedef struct pglist_data { * is the first PFN that needs to be initialised. */ unsigned long first_deferred_pfn; - /* Number of non-deferred pages */ - unsigned long static_init_pgcnt; #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -783,6 +781,12 @@ void memory_present(int nid, unsigned long start, unsigned long end); static inline void memory_present(int nid, unsigned long start, unsigned long end) {} #endif +#if defined(CONFIG_SPARSEMEM) +void memblocks_present(void); +#else +static inline void memblocks_present(void) {} +#endif + #ifdef CONFIG_HAVE_MEMORYLESS_NODES int local_memory_node(int node_id); #else @@ -814,7 +818,7 @@ static inline bool is_dev_zone(const struct zone *zone) */ static inline bool managed_zone(struct zone *zone) { - return zone->managed_pages; + return zone_managed_pages(zone); } /* Returns true if a zone has memory */ @@ -884,6 +888,8 @@ static inline int is_highmem(struct zone *zone) struct ctl_table; int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); +int watermark_boost_factor_sysctl_handler(struct ctl_table *, int, + void __user *, size_t *, loff_t *); int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 01797cb4587e..f9bd2f34b99f 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -448,6 +448,23 @@ struct pci_epf_device_id { kernel_ulong_t driver_data; }; +/* i3c */ + +#define I3C_MATCH_DCR 0x1 +#define I3C_MATCH_MANUF 0x2 +#define I3C_MATCH_PART 0x4 +#define I3C_MATCH_EXTRA_INFO 0x8 + +struct i3c_device_id { + __u8 match_flags; + __u8 dcr; + __u16 manuf_id; + __u16 part_id; + __u16 extra_info; + + const void *data; +}; + /* spi */ #define SPI_NAME_SIZE 32 @@ -565,7 +582,7 @@ struct platform_device_id { /** * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus * @phy_id: The result of - * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask + * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&MII_PHYSID2)) & @phy_id_mask * for this PHY type * @phy_id_mask: Defines the significant bits of @phy_id. A value of 0 * is used to terminate an array of struct mdio_device_id. diff --git a/include/linux/module.h b/include/linux/module.h index fce6b4335e36..d5453eb5a68b 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -432,6 +432,10 @@ struct module { unsigned int num_tracepoints; tracepoint_ptr_t *tracepoints_ptrs; #endif +#ifdef CONFIG_BPF_EVENTS + unsigned int num_bpf_raw_events; + struct bpf_raw_event_map *bpf_raw_events; +#endif #ifdef HAVE_JUMP_LABEL struct jump_entry *jump_entries; unsigned int num_jump_entries; @@ -486,6 +490,13 @@ struct module { #define MODULE_ARCH_INIT {} #endif +#ifndef HAVE_ARCH_KALLSYMS_SYMBOL_VALUE +static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym) +{ + return sym->st_value; +} +#endif + extern struct mutex module_mutex; /* FIXME: It'd be nice to isolate modules during init, too, so they diff --git a/include/linux/mount.h b/include/linux/mount.h index 45b1f56c6c2f..037eed52164b 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -81,7 +81,7 @@ extern void mnt_drop_write_file(struct file *file); extern void mntput(struct vfsmount *mnt); extern struct vfsmount *mntget(struct vfsmount *mnt); extern struct vfsmount *mnt_clone_internal(const struct path *path); -extern int __mnt_is_readonly(struct vfsmount *mnt); +extern bool __mnt_is_readonly(struct vfsmount *mnt); extern bool mnt_may_suid(struct vfsmount *mnt); struct path; diff --git a/include/linux/msi.h b/include/linux/msi.h index 0e9c50052ff3..784fb52b9900 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -76,7 +76,7 @@ struct msi_desc { unsigned int nvec_used; struct device *dev; struct msi_msg msg; - struct cpumask *affinity; + struct irq_affinity_desc *affinity; union { /* PCI MSI/X specific data */ @@ -116,6 +116,8 @@ struct msi_desc { list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) #define for_each_msi_entry(desc, dev) \ list_for_each_entry((desc), dev_to_msi_list((dev)), list) +#define for_each_msi_entry_safe(desc, tmp, dev) \ + list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) #ifdef CONFIG_PCI_MSI #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) @@ -136,7 +138,7 @@ static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) #endif /* CONFIG_PCI_MSI */ struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, - const struct cpumask *affinity); + const struct irq_affinity_desc *affinity); void free_msi_entry(struct msi_desc *entry); void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 9b57a9b1b081..cbf77168658c 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -377,6 +377,7 @@ struct cfi_fixup { #define CFI_MFR_SHARP 0x00B0 #define CFI_MFR_SST 0x00BF #define CFI_MFR_ST 0x0020 /* STMicroelectronics */ +#define CFI_MFR_MICRON 0x002C /* Micron */ #define CFI_MFR_TOSHIBA 0x0098 #define CFI_MFR_WINBOND 0x00DA diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index cd0be91bdefa..677768b21a1d 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -25,6 +25,7 @@ #include <linux/notifier.h> #include <linux/device.h> #include <linux/of.h> +#include <linux/nvmem-provider.h> #include <mtd/mtd-abi.h> @@ -207,6 +208,7 @@ struct mtd_debug_info { struct mtd_info { u_char type; uint32_t flags; + uint32_t orig_flags; /* Flags as before running mtd checks */ uint64_t size; // Total size of the MTD /* "Major" erase size for the device. Naïve users may take this @@ -341,6 +343,7 @@ struct mtd_info { struct device dev; int usecount; struct mtd_debug_info dbg; + struct nvmem_device *nvmem; }; int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, @@ -386,7 +389,7 @@ static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd) return dev_of_node(&mtd->dev); } -static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops) +static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops) { return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize; } diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index abe975c87b90..7f53ece2c039 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -324,9 +324,8 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand) */ static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand) { - return (u64)nand->memorg.luns_per_target * - nand->memorg.eraseblocks_per_lun * - nand->memorg.pages_per_eraseblock; + return nand->memorg.ntargets * nand->memorg.luns_per_target * + nand->memorg.eraseblocks_per_lun; } /** @@ -569,7 +568,7 @@ static inline void nanddev_pos_next_eraseblock(struct nand_device *nand, } /** - * nanddev_pos_next_eraseblock() - Move a position to the next page + * nanddev_pos_next_page() - Move a position to the next page * @nand: NAND device * @pos: the position to update * diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index e10b126e148f..33e240acdc6d 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -203,9 +203,12 @@ enum nand_ecc_algo { */ #define NAND_IS_BOOT_MEDIUM 0x00400000 -/* Options set by nand scan */ -/* Nand scan has allocated controller struct */ -#define NAND_CONTROLLER_ALLOC 0x80000000 +/* + * Do not try to tweak the timings at runtime. This is needed when the + * controller initializes the timings on itself or when it relies on + * configuration done by the bootloader. + */ +#define NAND_KEEP_TIMINGS 0x00800000 /* Cell info constants */ #define NAND_CI_CHIPNR_MSK 0x03 @@ -245,49 +248,6 @@ struct nand_id { }; /** - * struct nand_controller_ops - Controller operations - * - * @attach_chip: this method is called after the NAND detection phase after - * flash ID and MTD fields such as erase size, page size and OOB - * size have been set up. ECC requirements are available if - * provided by the NAND chip or device tree. Typically used to - * choose the appropriate ECC configuration and allocate - * associated resources. - * This hook is optional. - * @detach_chip: free all resources allocated/claimed in - * nand_controller_ops->attach_chip(). - * This hook is optional. - */ -struct nand_controller_ops { - int (*attach_chip)(struct nand_chip *chip); - void (*detach_chip)(struct nand_chip *chip); -}; - -/** - * struct nand_controller - Structure used to describe a NAND controller - * - * @lock: protection lock - * @active: the mtd device which holds the controller currently - * @wq: wait queue to sleep on if a NAND operation is in - * progress used instead of the per chip wait queue - * when a hw controller is available. - * @ops: NAND controller operations. - */ -struct nand_controller { - spinlock_t lock; - struct nand_chip *active; - wait_queue_head_t wq; - const struct nand_controller_ops *ops; -}; - -static inline void nand_controller_init(struct nand_controller *nfc) -{ - nfc->active = NULL; - spin_lock_init(&nfc->lock); - init_waitqueue_head(&nfc->wq); -} - -/** * struct nand_ecc_step_info - ECC step information of ECC engine * @stepsize: data bytes per ECC step * @strengths: array of supported strengths @@ -879,18 +839,21 @@ struct nand_op_parser { /** * struct nand_operation - NAND operation descriptor + * @cs: the CS line to select for this NAND operation * @instrs: array of instructions to execute * @ninstrs: length of the @instrs array * * The actual operation structure that will be passed to chip->exec_op(). */ struct nand_operation { + unsigned int cs; const struct nand_op_instr *instrs; unsigned int ninstrs; }; -#define NAND_OPERATION(_instrs) \ +#define NAND_OPERATION(_cs, _instrs) \ { \ + .cs = _cs, \ .instrs = _instrs, \ .ninstrs = ARRAY_SIZE(_instrs), \ } @@ -898,11 +861,68 @@ struct nand_operation { int nand_op_parser_exec_op(struct nand_chip *chip, const struct nand_op_parser *parser, const struct nand_operation *op, bool check_only); +/** + * struct nand_controller_ops - Controller operations + * + * @attach_chip: this method is called after the NAND detection phase after + * flash ID and MTD fields such as erase size, page size and OOB + * size have been set up. ECC requirements are available if + * provided by the NAND chip or device tree. Typically used to + * choose the appropriate ECC configuration and allocate + * associated resources. + * This hook is optional. + * @detach_chip: free all resources allocated/claimed in + * nand_controller_ops->attach_chip(). + * This hook is optional. + * @exec_op: controller specific method to execute NAND operations. + * This method replaces chip->legacy.cmdfunc(), + * chip->legacy.{read,write}_{buf,byte,word}(), + * chip->legacy.dev_ready() and chip->legacy.waifunc(). + * @setup_data_interface: setup the data interface and timing. If + * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this + * means the configuration should not be applied but + * only checked. + * This hook is optional. + */ +struct nand_controller_ops { + int (*attach_chip)(struct nand_chip *chip); + void (*detach_chip)(struct nand_chip *chip); + int (*exec_op)(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only); + int (*setup_data_interface)(struct nand_chip *chip, int chipnr, + const struct nand_data_interface *conf); +}; + +/** + * struct nand_controller - Structure used to describe a NAND controller + * + * @lock: protection lock + * @active: the mtd device which holds the controller currently + * @wq: wait queue to sleep on if a NAND operation is in + * progress used instead of the per chip wait queue + * when a hw controller is available. + * @ops: NAND controller operations. + */ +struct nand_controller { + spinlock_t lock; + struct nand_chip *active; + wait_queue_head_t wq; + const struct nand_controller_ops *ops; +}; + +static inline void nand_controller_init(struct nand_controller *nfc) +{ + nfc->active = NULL; + spin_lock_init(&nfc->lock); + init_waitqueue_head(&nfc->wq); +} /** * struct nand_legacy - NAND chip legacy fields/hooks * @IO_ADDR_R: address to read the 8 I/O lines of the flash device * @IO_ADDR_W: address to write the 8 I/O lines of the flash device + * @select_chip: select/deselect a specific target/die * @read_byte: read one byte from the chip * @write_byte: write a single byte to the chip on the low 8 I/O lines * @write_buf: write data from the buffer to the chip @@ -921,6 +941,8 @@ int nand_op_parser_exec_op(struct nand_chip *chip, * @get_features: get the NAND chip features * @chip_delay: chip dependent delay for transferring data from array to read * regs (tR). + * @dummy_controller: dummy controller implementation for drivers that can + * only control a single chip * * If you look at this structure you're already wrong. These fields/hooks are * all deprecated. @@ -928,6 +950,7 @@ int nand_op_parser_exec_op(struct nand_chip *chip, struct nand_legacy { void __iomem *IO_ADDR_R; void __iomem *IO_ADDR_W; + void (*select_chip)(struct nand_chip *chip, int cs); u8 (*read_byte)(struct nand_chip *chip); void (*write_byte)(struct nand_chip *chip, u8 byte); void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len); @@ -945,6 +968,7 @@ struct nand_legacy { int (*get_features)(struct nand_chip *chip, int feature_addr, u8 *subfeature_para); int chip_delay; + struct nand_controller dummy_controller; }; /** @@ -955,17 +979,10 @@ struct nand_legacy { * you're modifying an existing driver that is using those * fields/hooks, you should consider reworking the driver * avoid using them. - * @select_chip: [REPLACEABLE] select chip nr - * @exec_op: controller specific method to execute NAND operations. - * This method replaces ->cmdfunc(), - * ->legacy.{read,write}_{buf,byte,word}(), - * ->legacy.dev_ready() and ->waifunc(). * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for * setting the read-retry mode. Mostly needed for MLC NAND. * @ecc: [BOARDSPECIFIC] ECC control structure * @buf_align: minimum buffer alignment required by a platform - * @dummy_controller: dummy controller implementation for drivers that can - * only control a single chip * @state: [INTERN] the current state of the NAND device * @oob_poi: "poison value buffer," used for laying out OOB data * before writing @@ -1012,11 +1029,11 @@ struct nand_legacy { * this nand device will encounter their life times. * @blocks_per_die: [INTERN] The number of PEBs in a die * @data_interface: [INTERN] NAND interface timing information + * @cur_cs: currently selected target. -1 means no target selected, + * otherwise we should always have cur_cs >= 0 && + * cur_cs < numchips. NAND Controller drivers should not + * modify this value, but they're allowed to read it. * @read_retries: [INTERN] the number of read retry modes supported - * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If - * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this - * means the configuration should not be applied but - * only checked. * @bbt: [INTERN] bad block table pointer * @bbt_td: [REPLACEABLE] bad block table descriptor for flash * lookup. @@ -1037,13 +1054,7 @@ struct nand_chip { struct nand_legacy legacy; - void (*select_chip)(struct nand_chip *chip, int cs); - int (*exec_op)(struct nand_chip *chip, - const struct nand_operation *op, - bool check_only); int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); - int (*setup_data_interface)(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf); unsigned int options; unsigned int bbt_options; @@ -1073,6 +1084,8 @@ struct nand_chip { struct nand_data_interface data_interface; + int cur_cs; + int read_retries; flstate_t state; @@ -1082,7 +1095,6 @@ struct nand_chip { struct nand_ecc_ctrl ecc; unsigned long buf_align; - struct nand_controller dummy_controller; uint8_t *bbt; struct nand_bbt_descr *bbt_td; @@ -1098,15 +1110,6 @@ struct nand_chip { } manufacturer; }; -static inline int nand_exec_op(struct nand_chip *chip, - const struct nand_operation *op) -{ - if (!chip->exec_op) - return -ENOTSUPP; - - return chip->exec_op(chip, op, false); -} - extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; @@ -1345,5 +1348,12 @@ void nand_release(struct nand_chip *chip); * instruction and have no physical pin to check it. */ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms); +struct gpio_desc; +int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, + unsigned long timeout_ms); + +/* Select/deselect a NAND target. */ +void nand_select_target(struct nand_chip *chip, unsigned int cs); +void nand_deselect_target(struct nand_chip *chip); #endif /* __LINUX_MTD_RAWNAND_H */ diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h index c759d403cbc0..78fc2d4218c8 100644 --- a/include/linux/mtd/sh_flctl.h +++ b/include/linux/mtd/sh_flctl.h @@ -1,20 +1,8 @@ -/* +/* SPDX-License-Identifier: GPL-2.0 + * * SuperH FLCTL nand controller * * Copyright © 2008 Renesas Solutions Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __SH_FLCTL_H__ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 7f0c7303575e..fa2d89e38e40 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -1,10 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2014 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __LINUX_MTD_SPI_NOR_H @@ -23,7 +19,8 @@ #define SNOR_MFR_ATMEL CFI_MFR_ATMEL #define SNOR_MFR_GIGADEVICE 0xc8 #define SNOR_MFR_INTEL CFI_MFR_INTEL -#define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */ +#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */ +#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */ #define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX #define SNOR_MFR_SPANSION CFI_MFR_AMD #define SNOR_MFR_SST CFI_MFR_SST @@ -236,6 +233,8 @@ enum spi_nor_option_flags { SNOR_F_READY_XSR_RDY = BIT(4), SNOR_F_USE_CLSR = BIT(5), SNOR_F_BROKEN_RESET = BIT(6), + SNOR_F_4B_OPCODES = BIT(7), + SNOR_F_HAS_4BAIT = BIT(8), }; /** diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 088ff96c3eb6..b92e2aa955b6 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -194,8 +194,10 @@ struct spinand_manufacturer { }; /* SPI NAND manufacturers */ +extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; extern const struct spinand_manufacturer macronix_spinand_manufacturer; extern const struct spinand_manufacturer micron_spinand_manufacturer; +extern const struct spinand_manufacturer toshiba_spinand_manufacturer; extern const struct spinand_manufacturer winbond_spinand_manufacturer; /** diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h index c79e859408e6..fd458389f7d1 100644 --- a/include/linux/net_dim.h +++ b/include/linux/net_dim.h @@ -406,6 +406,8 @@ static inline void net_dim(struct net_dim *dim, } /* fall through */ case NET_DIM_START_MEASURE: + net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr, + &dim->start_sample); dim->state = NET_DIM_MEASURE_IN_PROGRESS; break; case NET_DIM_APPLY_NEW_PROFILE: diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index dc1d9ed33b31..1377d085ef99 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -845,6 +845,8 @@ enum tc_setup_type { TC_SETUP_QDISC_PRIO, TC_SETUP_QDISC_MQ, TC_SETUP_QDISC_ETF, + TC_SETUP_ROOT_QDISC, + TC_SETUP_QDISC_GRED, }; /* These structures hold the attributes of bpf state that are being passed @@ -863,9 +865,6 @@ enum bpf_netdev_command { XDP_QUERY_PROG, XDP_QUERY_PROG_HW, /* BPF program for offload callbacks, invoked at program load time. */ - BPF_OFFLOAD_VERIFIER_PREP, - BPF_OFFLOAD_TRANSLATE, - BPF_OFFLOAD_DESTROY, BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE, XDP_QUERY_XSK_UMEM, @@ -891,15 +890,6 @@ struct netdev_bpf { /* flags with which program was installed */ u32 prog_flags; }; - /* BPF_OFFLOAD_VERIFIER_PREP */ - struct { - struct bpf_prog *prog; - const struct bpf_prog_offload_ops *ops; /* callee set */ - } verifier; - /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */ - struct { - struct bpf_prog *prog; - } offload; /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ struct { struct bpf_offloaded_map *offmap; @@ -1175,7 +1165,7 @@ struct dev_ifalias { * entries to skb and update idx with the number of entries. * * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, - * u16 flags) + * u16 flags, struct netlink_ext_ack *extack) * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, * struct net_device *dev, u32 filter_mask, * int nlflags) @@ -1397,10 +1387,16 @@ struct net_device_ops { struct net_device *dev, struct net_device *filter_dev, int *idx); - + int (*ndo_fdb_get)(struct sk_buff *skb, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, u32 portid, u32 seq, + struct netlink_ext_ack *extack); int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, - u16 flags); + u16 flags, + struct netlink_ext_ack *extack); int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, @@ -2388,13 +2384,13 @@ struct pcpu_sw_netstats { u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; -}; +} __aligned(4 * sizeof(u64)); struct pcpu_lstats { u64 packets; u64 bytes; struct u64_stats_sync syncp; -}; +} __aligned(2 * sizeof(u64)); #define __netdev_alloc_pcpu_stats(type, gfp) \ ({ \ @@ -2459,7 +2455,8 @@ enum netdev_cmd { NETDEV_REGISTER, NETDEV_UNREGISTER, NETDEV_CHANGEMTU, /* notify after mtu change happened */ - NETDEV_CHANGEADDR, + NETDEV_CHANGEADDR, /* notify after the address change */ + NETDEV_PRE_CHANGEADDR, /* notify before the address change */ NETDEV_GOING_DOWN, NETDEV_CHANGENAME, NETDEV_FEAT_CHANGE, @@ -2521,6 +2518,11 @@ struct netdev_notifier_changelowerstate_info { void *lower_state_info; /* is lower dev state */ }; +struct netdev_notifier_pre_changeaddr_info { + struct netdev_notifier_info info; /* must be first */ + const unsigned char *dev_addr; +}; + static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, struct net_device *dev) { @@ -2615,7 +2617,7 @@ struct net_device *dev_get_by_name(struct net *net, const char *name); struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); struct net_device *__dev_get_by_name(struct net *net, const char *name); int dev_alloc_name(struct net_device *dev, const char *name); -int dev_open(struct net_device *dev); +int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); void dev_close(struct net_device *dev); void dev_close_many(struct list_head *head, bool unlink); void dev_disable_lro(struct net_device *dev); @@ -3190,6 +3192,26 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, #endif } +/* Variant of netdev_tx_sent_queue() for drivers that are aware + * that they should not test BQL status themselves. + * We do want to change __QUEUE_STATE_STACK_XOFF only for the last + * skb of a batch. + * Returns true if the doorbell must be used to kick the NIC. + */ +static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes, + bool xmit_more) +{ + if (xmit_more) { +#ifdef CONFIG_BQL + dql_queued(&dev_queue->dql, bytes); +#endif + return netif_tx_queue_stopped(dev_queue); + } + netdev_tx_sent_queue(dev_queue, bytes); + return true; +} + /** * netdev_sent_queue - report the number of bytes queued to hardware * @dev: network device @@ -3204,6 +3226,14 @@ static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); } +static inline bool __netdev_sent_queue(struct net_device *dev, + unsigned int bytes, + bool xmit_more) +{ + return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, + xmit_more); +} + static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, unsigned int pkts, unsigned int bytes) { @@ -3593,8 +3623,10 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, int dev_ifconf(struct net *net, struct ifconf *, int); int dev_ethtool(struct net *net, struct ifreq *); unsigned int dev_get_flags(const struct net_device *); -int __dev_change_flags(struct net_device *, unsigned int flags); -int dev_change_flags(struct net_device *, unsigned int); +int __dev_change_flags(struct net_device *dev, unsigned int flags, + struct netlink_ext_ack *extack); +int dev_change_flags(struct net_device *dev, unsigned int flags, + struct netlink_ext_ack *extack); void __dev_notify_flags(struct net_device *, unsigned int old_flags, unsigned int gchanges); int dev_change_name(struct net_device *, const char *); @@ -3607,7 +3639,10 @@ int dev_set_mtu_ext(struct net_device *dev, int mtu, int dev_set_mtu(struct net_device *, int); int dev_change_tx_queue_len(struct net_device *, unsigned long); void dev_set_group(struct net_device *, int); -int dev_set_mac_address(struct net_device *, struct sockaddr *); +int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, + struct netlink_ext_ack *extack); +int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, + struct netlink_ext_ack *extack); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid); @@ -4048,6 +4083,16 @@ int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, int (*sync)(struct net_device *, const unsigned char *), int (*unsync)(struct net_device *, const unsigned char *)); +int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, + const unsigned char *, int), + int (*unsync)(struct net_device *, + const unsigned char *, int)); +void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *, int)); void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, struct net_device *dev, int (*unsync)(struct net_device *, @@ -4312,9 +4357,10 @@ static inline bool can_checksum_protocol(netdev_features_t features, } #ifdef CONFIG_BUG -void netdev_rx_csum_fault(struct net_device *dev); +void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); #else -static inline void netdev_rx_csum_fault(struct net_device *dev) +static inline void netdev_rx_csum_fault(struct net_device *dev, + struct sk_buff *skb) { } #endif @@ -4340,7 +4386,7 @@ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_devi struct netdev_queue *txq, bool more) { const struct net_device_ops *ops = dev->netdev_ops; - int rc; + netdev_tx_t rc; rc = __netdev_start_xmit(ops, skb, dev, more); if (rc == NETDEV_TX_OK) diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 34fc80f3eb90..f2e1e6b13ca4 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -303,18 +303,18 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set) /* Netlink CB args */ enum { IPSET_CB_NET = 0, /* net namespace */ + IPSET_CB_PROTO, /* ipset protocol */ IPSET_CB_DUMP, /* dump single set/all sets */ IPSET_CB_INDEX, /* set index */ IPSET_CB_PRIVATE, /* set private data */ IPSET_CB_ARG0, /* type specific */ - IPSET_CB_ARG1, }; /* register and unregister set references */ extern ip_set_id_t ip_set_get_byname(struct net *net, const char *name, struct ip_set **set); extern void ip_set_put_byindex(struct net *net, ip_set_id_t index); -extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index); +extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name); extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index); extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index); diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h index 8e2bab1e8e90..70877f8de7e9 100644 --- a/include/linux/netfilter/ipset/ip_set_comment.h +++ b/include/linux/netfilter/ipset/ip_set_comment.h @@ -43,11 +43,11 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, rcu_assign_pointer(comment->c, c); } -/* Used only when dumping a set, protected by rcu_read_lock_bh() */ +/* Used only when dumping a set, protected by rcu_read_lock() */ static inline int ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment) { - struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c); + struct ip_set_comment_rcu *c = rcu_dereference(comment->c); if (!c) return 0; diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h index b8d95564bd53..6989e2e4eabf 100644 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h @@ -21,6 +21,19 @@ struct nf_ct_gre_keymap { struct nf_conntrack_tuple tuple; }; +enum grep_conntrack { + GRE_CT_UNREPLIED, + GRE_CT_REPLIED, + GRE_CT_MAX +}; + +struct netns_proto_gre { + struct nf_proto_net nf; + rwlock_t keymap_lock; + struct list_head keymap_list; + unsigned int gre_timeouts[GRE_CT_MAX]; +}; + /* add new tuple->key_reply pair to keymap */ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, struct nf_conntrack_tuple *t); @@ -28,7 +41,5 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, /* delete keymap entries */ void nf_ct_gre_keymap_destroy(struct nf_conn *ct); -void nf_nat_need_gre(void); - #endif /* __KERNEL__ */ #endif /* _CONNTRACK_PROTO_GRE_H */ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 4a520d3304a2..cf09ab37b45b 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -62,18 +62,6 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id) } #endif /* CONFIG_PROVE_LOCKING */ -/* - * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex - * - * @p: The pointer to read, prior to dereferencing - * @ss: The nfnetlink subsystem ID - * - * Return the value of the specified RCU-protected pointer, but omit - * the READ_ONCE(), because caller holds the NFNL subsystem mutex. - */ -#define nfnl_dereference(p, ss) \ - rcu_dereference_protected(p, lockdep_nfnl_is_held(ss)) - #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \ MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys)) diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index fa0686500970..5f2614d02e03 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -17,43 +17,58 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb) skb_dst_drop(skb); } +static inline struct nf_bridge_info * +nf_bridge_info_get(const struct sk_buff *skb) +{ + return skb_ext_find(skb, SKB_EXT_BRIDGE_NF); +} + +static inline bool nf_bridge_info_exists(const struct sk_buff *skb) +{ + return skb_ext_exist(skb, SKB_EXT_BRIDGE_NF); +} + static inline int nf_bridge_get_physinif(const struct sk_buff *skb) { - struct nf_bridge_info *nf_bridge; + const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); - if (skb->nf_bridge == NULL) + if (!nf_bridge) return 0; - nf_bridge = skb->nf_bridge; return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0; } static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) { - struct nf_bridge_info *nf_bridge; + const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); - if (skb->nf_bridge == NULL) + if (!nf_bridge) return 0; - nf_bridge = skb->nf_bridge; return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0; } static inline struct net_device * nf_bridge_get_physindev(const struct sk_buff *skb) { - return skb->nf_bridge ? skb->nf_bridge->physindev : NULL; + const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); + + return nf_bridge ? nf_bridge->physindev : NULL; } static inline struct net_device * nf_bridge_get_physoutdev(const struct sk_buff *skb) { - return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL; + const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); + + return nf_bridge ? nf_bridge->physoutdev : NULL; } static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb) { - return skb->nf_bridge && skb->nf_bridge->in_prerouting; + const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); + + return nf_bridge && nf_bridge->in_prerouting; } #else #define br_drop_fake_rtable(skb) do { } while (0) diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 4da90a6ab536..4e8add270200 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -34,8 +34,8 @@ struct netlink_skb_parms { #define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) -extern void netlink_table_grab(void); -extern void netlink_table_ungrab(void); +void netlink_table_grab(void); +void netlink_table_ungrab(void); #define NL_CFG_F_NONROOT_RECV (1 << 0) #define NL_CFG_F_NONROOT_SEND (1 << 1) @@ -51,7 +51,7 @@ struct netlink_kernel_cfg { bool (*compare)(struct net *net, struct sock *sk); }; -extern struct sock *__netlink_kernel_create(struct net *net, int unit, +struct sock *__netlink_kernel_create(struct net *net, int unit, struct module *module, struct netlink_kernel_cfg *cfg); static inline struct sock * @@ -110,24 +110,33 @@ struct netlink_ext_ack { } \ } while (0) -extern void netlink_kernel_release(struct sock *sk); -extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups); -extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); -extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); -extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, - const struct netlink_ext_ack *extack); -extern int netlink_has_listeners(struct sock *sk, unsigned int group); - -extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); -extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, - __u32 group, gfp_t allocation); -extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, - __u32 portid, __u32 group, gfp_t allocation, - int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), - void *filter_data); -extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code); -extern int netlink_register_notifier(struct notifier_block *nb); -extern int netlink_unregister_notifier(struct notifier_block *nb); +static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack, + u64 cookie) +{ + u64 __cookie = cookie; + + memcpy(extack->cookie, &__cookie, sizeof(__cookie)); + extack->cookie_len = sizeof(__cookie); +} + +void netlink_kernel_release(struct sock *sk); +int __netlink_change_ngroups(struct sock *sk, unsigned int groups); +int netlink_change_ngroups(struct sock *sk, unsigned int groups); +void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); +void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, + const struct netlink_ext_ack *extack); +int netlink_has_listeners(struct sock *sk, unsigned int group); + +int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); +int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, + __u32 group, gfp_t allocation); +int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, + __u32 portid, __u32 group, gfp_t allocation, + int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), + void *filter_data); +int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code); +int netlink_register_notifier(struct notifier_block *nb); +int netlink_unregister_notifier(struct notifier_block *nb); /* finegrained unicast helpers: */ struct sock *netlink_getsockbyfilp(struct file *filp); @@ -203,7 +212,7 @@ struct netlink_dump_control { u16 min_dump_alloc; }; -extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, +int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, const struct nlmsghdr *nlh, struct netlink_dump_control *control); static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, @@ -222,8 +231,8 @@ struct netlink_tap { struct list_head list; }; -extern int netlink_add_tap(struct netlink_tap *nt); -extern int netlink_remove_tap(struct netlink_tap *nt); +int netlink_add_tap(struct netlink_tap *nt); +int netlink_remove_tap(struct netlink_tap *nt); bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, struct user_namespace *ns, int cap); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 6e0417c02279..40e30376130b 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -51,7 +51,7 @@ struct nfs_access_entry { struct rb_node rb_node; struct list_head lru; - struct rpc_cred * cred; + const struct cred * cred; __u32 mask; struct rcu_head rcu_head; }; @@ -70,7 +70,8 @@ struct nfs_open_context { struct nfs_lock_context lock_context; fl_owner_t flock_owner; struct dentry *dentry; - struct rpc_cred *cred; + const struct cred *cred; + struct rpc_cred *ll_cred; /* low-level cred - use to check for expiry */ struct nfs4_state *state; fmode_t mode; @@ -88,7 +89,7 @@ struct nfs_open_context { struct nfs_open_dir_context { struct list_head list; - struct rpc_cred *cred; + const struct cred *cred; unsigned long attr_gencount; __u64 dir_cookie; __u64 dup_cookie; @@ -390,7 +391,7 @@ extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, struct nfs4_label *label); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); -extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); +extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode); extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode, struct file *filp); extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx); extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); @@ -461,7 +462,7 @@ static inline struct nfs_open_context *nfs_file_open_context(struct file *filp) return filp->private_data; } -static inline struct rpc_cred *nfs_file_cred(struct file *file) +static inline const struct cred *nfs_file_cred(struct file *file) { if (file != NULL) { struct nfs_open_context *ctx = @@ -490,7 +491,7 @@ extern const struct dentry_operations nfs_dentry_operations; extern void nfs_force_lookup_revalidate(struct inode *dir); extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr, struct nfs4_label *label); -extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags); +extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags); extern void nfs_access_zap_cache(struct inode *inode); /* diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 0fc0b9135d46..6aa8cc83c3b6 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -58,7 +58,7 @@ struct nfs_client { struct nfs_subversion * cl_nfs_mod; /* pointer to nfs version module */ u32 cl_minorversion;/* NFSv4 minorversion */ - struct rpc_cred *cl_machine_cred; + const char * cl_principal; /* used for machine cred */ #if IS_ENABLED(CONFIG_NFS_V4) struct list_head cl_ds_clients; /* auth flavor data servers */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 0e016252cfc6..441a93ebcac0 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -270,7 +270,7 @@ struct nfs4_layoutget_res { struct nfs4_layoutget { struct nfs4_layoutget_args args; struct nfs4_layoutget_res res; - struct rpc_cred *cred; + const struct cred *cred; gfp_t gfp_flags; }; @@ -309,7 +309,7 @@ struct nfs4_layoutcommit_data { struct rpc_task task; struct nfs_fattr fattr; struct list_head lseg_list; - struct rpc_cred *cred; + const struct cred *cred; struct inode *inode; struct nfs4_layoutcommit_args args; struct nfs4_layoutcommit_res res; @@ -334,7 +334,7 @@ struct nfs4_layoutreturn_res { struct nfs4_layoutreturn { struct nfs4_layoutreturn_args args; struct nfs4_layoutreturn_res res; - struct rpc_cred *cred; + const struct cred *cred; struct nfs_client *clp; struct inode *inode; int rpc_status; @@ -1469,7 +1469,7 @@ enum { struct nfs_io_completion; struct nfs_pgio_header { struct inode *inode; - struct rpc_cred *cred; + const struct cred *cred; struct list_head pages; struct nfs_page *req; struct nfs_writeverf verf; /* Used for writes */ @@ -1529,7 +1529,7 @@ struct nfs_commit_info { struct nfs_commit_data { struct rpc_task task; struct inode *inode; - struct rpc_cred *cred; + const struct cred *cred; struct nfs_fattr fattr; struct nfs_writeverf verf; struct list_head pages; /* Coalesced requests we wish to flush */ @@ -1560,7 +1560,7 @@ struct nfs_unlinkdata { struct nfs_removeres res; struct dentry *dentry; wait_queue_head_t wq; - struct rpc_cred *cred; + const struct cred *cred; struct nfs_fattr dir_attr; long timeout; }; @@ -1568,7 +1568,7 @@ struct nfs_unlinkdata { struct nfs_renamedata { struct nfs_renameargs args; struct nfs_renameres res; - struct rpc_cred *cred; + const struct cred *cred; struct inode *old_dir; struct dentry *old_dentry; struct nfs_fattr old_fattr; @@ -1634,7 +1634,7 @@ struct nfs_rpc_ops { unsigned int, struct iattr *); int (*mkdir) (struct inode *, struct dentry *, struct iattr *); int (*rmdir) (struct inode *, const struct qstr *); - int (*readdir) (struct dentry *, struct rpc_cred *, + int (*readdir) (struct dentry *, const struct cred *, u64, struct page **, unsigned int, bool); int (*mknod) (struct inode *, struct dentry *, struct iattr *, dev_t); diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 08f9247e9827..9003e29cde46 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -119,6 +119,8 @@ static inline int hardlockup_detector_perf_init(void) { return 0; } void watchdog_nmi_stop(void); void watchdog_nmi_start(void); int watchdog_nmi_probe(void); +int watchdog_nmi_enable(unsigned int cpu); +void watchdog_nmi_disable(unsigned int cpu); /** * touch_nmi_watchdog - restart NMI watchdog timeout. diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 496ff759f84c..91745cc3704c 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -403,7 +403,6 @@ struct nvme_fc_port_template { void **handle); void (*delete_queue)(struct nvme_fc_local_port *, unsigned int qidx, void *handle); - void (*poll_queue)(struct nvme_fc_local_port *, void *handle); int (*ls_req)(struct nvme_fc_local_port *, struct nvme_fc_remote_port *, struct nvmefc_ls_req *); @@ -649,22 +648,6 @@ enum { * sequence in one LLDD operation. Errors during Data * sequence transmit must not allow RSP sequence to be sent. */ - NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1), - /* Bit 2: When 0, the LLDD is calling the cmd rcv handler - * in a non-isr context, allowing the transport to finish - * op completion in the calling context. When 1, the LLDD - * is calling the cmd rcv handler in an ISR context, - * requiring the transport to transition to a workqueue - * for op completion. - */ - NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2), - /* Bit 3: When 0, the LLDD is calling the op done handler - * in a non-isr context, allowing the transport to finish - * op completion in the calling context. When 1, the LLDD - * is calling the op done handler in an ISR context, - * requiring the transport to transition to a workqueue - * for op completion. - */ }; diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h new file mode 100644 index 000000000000..03d87c0550a9 --- /dev/null +++ b/include/linux/nvme-tcp.h @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVMe over Fabrics TCP protocol header. + * Copyright (c) 2018 Lightbits Labs. All rights reserved. + */ + +#ifndef _LINUX_NVME_TCP_H +#define _LINUX_NVME_TCP_H + +#include <linux/nvme.h> + +#define NVME_TCP_DISC_PORT 8009 +#define NVME_TCP_ADMIN_CCSZ SZ_8K +#define NVME_TCP_DIGEST_LENGTH 4 + +enum nvme_tcp_pfv { + NVME_TCP_PFV_1_0 = 0x0, +}; + +enum nvme_tcp_fatal_error_status { + NVME_TCP_FES_INVALID_PDU_HDR = 0x01, + NVME_TCP_FES_PDU_SEQ_ERR = 0x02, + NVME_TCP_FES_HDR_DIGEST_ERR = 0x03, + NVME_TCP_FES_DATA_OUT_OF_RANGE = 0x04, + NVME_TCP_FES_R2T_LIMIT_EXCEEDED = 0x05, + NVME_TCP_FES_DATA_LIMIT_EXCEEDED = 0x05, + NVME_TCP_FES_UNSUPPORTED_PARAM = 0x06, +}; + +enum nvme_tcp_digest_option { + NVME_TCP_HDR_DIGEST_ENABLE = (1 << 0), + NVME_TCP_DATA_DIGEST_ENABLE = (1 << 1), +}; + +enum nvme_tcp_pdu_type { + nvme_tcp_icreq = 0x0, + nvme_tcp_icresp = 0x1, + nvme_tcp_h2c_term = 0x2, + nvme_tcp_c2h_term = 0x3, + nvme_tcp_cmd = 0x4, + nvme_tcp_rsp = 0x5, + nvme_tcp_h2c_data = 0x6, + nvme_tcp_c2h_data = 0x7, + nvme_tcp_r2t = 0x9, +}; + +enum nvme_tcp_pdu_flags { + NVME_TCP_F_HDGST = (1 << 0), + NVME_TCP_F_DDGST = (1 << 1), + NVME_TCP_F_DATA_LAST = (1 << 2), + NVME_TCP_F_DATA_SUCCESS = (1 << 3), +}; + +/** + * struct nvme_tcp_hdr - nvme tcp pdu common header + * + * @type: pdu type + * @flags: pdu specific flags + * @hlen: pdu header length + * @pdo: pdu data offset + * @plen: pdu wire byte length + */ +struct nvme_tcp_hdr { + __u8 type; + __u8 flags; + __u8 hlen; + __u8 pdo; + __le32 plen; +}; + +/** + * struct nvme_tcp_icreq_pdu - nvme tcp initialize connection request pdu + * + * @hdr: pdu generic header + * @pfv: pdu version format + * @hpda: host pdu data alignment (dwords, 0's based) + * @digest: digest types enabled + * @maxr2t: maximum r2ts per request supported + */ +struct nvme_tcp_icreq_pdu { + struct nvme_tcp_hdr hdr; + __le16 pfv; + __u8 hpda; + __u8 digest; + __le32 maxr2t; + __u8 rsvd2[112]; +}; + +/** + * struct nvme_tcp_icresp_pdu - nvme tcp initialize connection response pdu + * + * @hdr: pdu common header + * @pfv: pdu version format + * @cpda: controller pdu data alignment (dowrds, 0's based) + * @digest: digest types enabled + * @maxdata: maximum data capsules per r2t supported + */ +struct nvme_tcp_icresp_pdu { + struct nvme_tcp_hdr hdr; + __le16 pfv; + __u8 cpda; + __u8 digest; + __le32 maxdata; + __u8 rsvd[112]; +}; + +/** + * struct nvme_tcp_term_pdu - nvme tcp terminate connection pdu + * + * @hdr: pdu common header + * @fes: fatal error status + * @fei: fatal error information + */ +struct nvme_tcp_term_pdu { + struct nvme_tcp_hdr hdr; + __le16 fes; + __le32 fei; + __u8 rsvd[8]; +}; + +/** + * struct nvme_tcp_cmd_pdu - nvme tcp command capsule pdu + * + * @hdr: pdu common header + * @cmd: nvme command + */ +struct nvme_tcp_cmd_pdu { + struct nvme_tcp_hdr hdr; + struct nvme_command cmd; +}; + +/** + * struct nvme_tcp_rsp_pdu - nvme tcp response capsule pdu + * + * @hdr: pdu common header + * @hdr: nvme-tcp generic header + * @cqe: nvme completion queue entry + */ +struct nvme_tcp_rsp_pdu { + struct nvme_tcp_hdr hdr; + struct nvme_completion cqe; +}; + +/** + * struct nvme_tcp_r2t_pdu - nvme tcp ready-to-transfer pdu + * + * @hdr: pdu common header + * @command_id: nvme command identifier which this relates to + * @ttag: transfer tag (controller generated) + * @r2t_offset: offset from the start of the command data + * @r2t_length: length the host is allowed to send + */ +struct nvme_tcp_r2t_pdu { + struct nvme_tcp_hdr hdr; + __u16 command_id; + __u16 ttag; + __le32 r2t_offset; + __le32 r2t_length; + __u8 rsvd[4]; +}; + +/** + * struct nvme_tcp_data_pdu - nvme tcp data pdu + * + * @hdr: pdu common header + * @command_id: nvme command identifier which this relates to + * @ttag: transfer tag (controller generated) + * @data_offset: offset from the start of the command data + * @data_length: length of the data stream + */ +struct nvme_tcp_data_pdu { + struct nvme_tcp_hdr hdr; + __u16 command_id; + __u16 ttag; + __le32 data_offset; + __le32 data_length; + __u8 rsvd[4]; +}; + +union nvme_tcp_pdu { + struct nvme_tcp_icreq_pdu icreq; + struct nvme_tcp_icresp_pdu icresp; + struct nvme_tcp_cmd_pdu cmd; + struct nvme_tcp_rsp_pdu rsp; + struct nvme_tcp_r2t_pdu r2t; + struct nvme_tcp_data_pdu data; +}; + +#endif /* _LINUX_NVME_TCP_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 818dbe9331be..bbcc83886899 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -52,15 +52,20 @@ enum { enum { NVMF_TRTYPE_RDMA = 1, /* RDMA */ NVMF_TRTYPE_FC = 2, /* Fibre Channel */ + NVMF_TRTYPE_TCP = 3, /* TCP/IP */ NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */ NVMF_TRTYPE_MAX, }; /* Transport Requirements codes for Discovery Log Page entry TREQ field */ enum { - NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */ - NVMF_TREQ_REQUIRED = 1, /* Required */ - NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */ + NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */ + NVMF_TREQ_REQUIRED = 1, /* Required */ + NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */ +#define NVME_TREQ_SECURE_CHANNEL_MASK \ + (NVMF_TREQ_REQUIRED | NVMF_TREQ_NOT_REQUIRED) + + NVMF_TREQ_DISABLE_SQFLOW = (1 << 2), /* Supports SQ flow control disable */ }; /* RDMA QP Service Type codes for Discovery Log Page entry TSAS @@ -198,6 +203,11 @@ enum { NVME_PS_FLAGS_NON_OP_STATE = 1 << 1, }; +enum nvme_ctrl_attr { + NVME_CTRL_ATTR_HID_128_BIT = (1 << 0), + NVME_CTRL_ATTR_TBKAS = (1 << 6), +}; + struct nvme_id_ctrl { __le16 vid; __le16 ssvid; @@ -214,7 +224,11 @@ struct nvme_id_ctrl { __le32 rtd3e; __le32 oaes; __le32 ctratt; - __u8 rsvd100[156]; + __u8 rsvd100[28]; + __le16 crdt1; + __le16 crdt2; + __le16 crdt3; + __u8 rsvd134[122]; __le16 oacs; __u8 acl; __u8 aerl; @@ -481,12 +495,21 @@ enum { NVME_AER_NOTICE_NS_CHANGED = 0x00, NVME_AER_NOTICE_FW_ACT_STARTING = 0x01, NVME_AER_NOTICE_ANA = 0x03, + NVME_AER_NOTICE_DISC_CHANGED = 0xf0, }; enum { - NVME_AEN_CFG_NS_ATTR = 1 << 8, - NVME_AEN_CFG_FW_ACT = 1 << 9, - NVME_AEN_CFG_ANA_CHANGE = 1 << 11, + NVME_AEN_BIT_NS_ATTR = 8, + NVME_AEN_BIT_FW_ACT = 9, + NVME_AEN_BIT_ANA_CHANGE = 11, + NVME_AEN_BIT_DISC_CHANGE = 31, +}; + +enum { + NVME_AEN_CFG_NS_ATTR = 1 << NVME_AEN_BIT_NS_ATTR, + NVME_AEN_CFG_FW_ACT = 1 << NVME_AEN_BIT_FW_ACT, + NVME_AEN_CFG_ANA_CHANGE = 1 << NVME_AEN_BIT_ANA_CHANGE, + NVME_AEN_CFG_DISC_CHANGE = 1 << NVME_AEN_BIT_DISC_CHANGE, }; struct nvme_lba_range_type { @@ -639,7 +662,12 @@ struct nvme_common_command { __le32 cdw2[2]; __le64 metadata; union nvme_data_ptr dptr; - __le32 cdw10[6]; + __le32 cdw10; + __le32 cdw11; + __le32 cdw12; + __le32 cdw13; + __le32 cdw14; + __le32 cdw15; }; struct nvme_rw_command { @@ -738,6 +766,15 @@ enum { NVME_HOST_MEM_RETURN = (1 << 1), }; +struct nvme_feat_host_behavior { + __u8 acre; + __u8 resv1[511]; +}; + +enum { + NVME_ENABLE_ACRE = 1, +}; + /* Admin commands */ enum nvme_admin_opcode { @@ -792,6 +829,7 @@ enum { NVME_FEAT_RRL = 0x12, NVME_FEAT_PLM_CONFIG = 0x13, NVME_FEAT_PLM_WINDOW = 0x14, + NVME_FEAT_HOST_BEHAVIOR = 0x16, NVME_FEAT_SW_PROGRESS = 0x80, NVME_FEAT_HOST_ID = 0x81, NVME_FEAT_RESV_MASK = 0x82, @@ -1030,6 +1068,10 @@ struct nvmf_disc_rsp_page_hdr { struct nvmf_disc_rsp_page_entry entries[0]; }; +enum { + NVME_CONNECT_DISABLE_SQFLOW = (1 << 2), +}; + struct nvmf_connect_command { __u8 opcode; __u8 resv1; @@ -1126,6 +1168,20 @@ struct nvme_command { }; }; +struct nvme_error_slot { + __le64 error_count; + __le16 sqid; + __le16 cmdid; + __le16 status_field; + __le16 param_error_location; + __le64 lba; + __le32 nsid; + __u8 vs; + __u8 resv[3]; + __le64 cs; + __u8 resv2[24]; +}; + static inline bool nvme_is_write(struct nvme_command *cmd) { /* @@ -1243,6 +1299,7 @@ enum { NVME_SC_ANA_TRANSITION = 0x303, NVME_SC_HOST_PATH_ERROR = 0x370, + NVME_SC_CRD = 0x1800, NVME_SC_DNR = 0x4000, }; diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 1e3283c2af77..fe051323be0a 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -19,6 +19,13 @@ typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset, typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset, void *val, size_t bytes); +enum nvmem_type { + NVMEM_TYPE_UNKNOWN = 0, + NVMEM_TYPE_EEPROM, + NVMEM_TYPE_OTP, + NVMEM_TYPE_BATTERY_BACKED, +}; + /** * struct nvmem_config - NVMEM device configuration * @@ -28,8 +35,10 @@ typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset, * @owner: Pointer to exporter module. Used for refcounting. * @cells: Optional array of pre-defined NVMEM cells. * @ncells: Number of elements in cells. + * @type: Type of the nvmem storage * @read_only: Device is read-only. * @root_only: Device is accessibly to root only. + * @no_of_node: Device should not use the parent's of_node even if it's !NULL. * @reg_read: Callback to read data. * @reg_write: Callback to write data. * @size: Device size. @@ -51,8 +60,10 @@ struct nvmem_config { struct module *owner; const struct nvmem_cell_info *cells; int ncells; + enum nvmem_type type; bool read_only; bool root_only; + bool no_of_node; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; int size; diff --git a/include/linux/objagg.h b/include/linux/objagg.h new file mode 100644 index 000000000000..34f38c186ea0 --- /dev/null +++ b/include/linux/objagg.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#ifndef _OBJAGG_H +#define _OBJAGG_H + +struct objagg_ops { + size_t obj_size; + void * (*delta_create)(void *priv, void *parent_obj, void *obj); + void (*delta_destroy)(void *priv, void *delta_priv); + void * (*root_create)(void *priv, void *obj); + void (*root_destroy)(void *priv, void *root_priv); +}; + +struct objagg; +struct objagg_obj; + +const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj); +const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj); +const void *objagg_obj_raw(const struct objagg_obj *objagg_obj); + +struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj); +void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj); +struct objagg *objagg_create(const struct objagg_ops *ops, void *priv); +void objagg_destroy(struct objagg *objagg); + +struct objagg_obj_stats { + unsigned int user_count; + unsigned int delta_user_count; /* includes delta object users */ +}; + +struct objagg_obj_stats_info { + struct objagg_obj_stats stats; + struct objagg_obj *objagg_obj; /* associated object */ + bool is_root; +}; + +struct objagg_stats { + unsigned int stats_info_count; + struct objagg_obj_stats_info stats_info[]; +}; + +const struct objagg_stats *objagg_stats_get(struct objagg *objagg); +void objagg_stats_put(const struct objagg_stats *objagg_stats); + +#endif diff --git a/include/linux/of.h b/include/linux/of.h index a5aee3c438ad..fe472e5195a9 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -66,7 +66,6 @@ struct device_node { unsigned long _flags; void *data; #if defined(CONFIG_SPARC) - const char *path_component_name; unsigned int unique_id; struct of_irq_controller *irq_trans; #endif @@ -138,11 +137,16 @@ extern struct device_node *of_aliases; extern struct device_node *of_stdout; extern raw_spinlock_t devtree_lock; -/* flag descriptions (need to be visible even when !CONFIG_OF) */ -#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */ -#define OF_DETACHED 2 /* node has been detached from the device tree */ -#define OF_POPULATED 3 /* device already created for the node */ -#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */ +/* + * struct device_node flag descriptions + * (need to be visible even when !CONFIG_OF) + */ +#define OF_DYNAMIC 1 /* (and properties) allocated via kmalloc */ +#define OF_DETACHED 2 /* detached from the device tree */ +#define OF_POPULATED 3 /* device already created */ +#define OF_POPULATED_BUS 4 /* platform bus created for children */ +#define OF_OVERLAY 5 /* allocated for an overlay */ +#define OF_OVERLAY_FREE_CSET 6 /* in overlay cset being freed */ #define OF_BAD_ADDR ((u64)-1) @@ -985,6 +989,12 @@ static inline int of_map_rid(struct device_node *np, u32 rid, #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) #endif +static inline int of_prop_val_eq(struct property *p1, struct property *p2) +{ + return p1->length == p2->length && + !memcmp(p1->value, p2->value, (size_t)p1->length); +} + #if defined(CONFIG_OF) && defined(CONFIG_NUMA) extern int of_node_to_nid(struct device_node *np); #else diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index b9cd9ebdf9b9..a713e5d156d8 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -76,6 +76,7 @@ extern int early_init_dt_scan_memory(unsigned long node, const char *uname, extern int early_init_dt_scan_chosen_stdout(void); extern void early_init_fdt_scan_reserved_mem(void); extern void early_init_fdt_reserve_self(void); +extern void __init early_init_dt_scan_chosen_arch(unsigned long node); extern void early_init_dt_add_memory_arch(u64 base, u64 size); extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size); extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, diff --git a/include/linux/of_net.h b/include/linux/of_net.h index 90d81ee9e6a0..9cd72aab76fe 100644 --- a/include/linux/of_net.h +++ b/include/linux/of_net.h @@ -13,7 +13,6 @@ struct net_device; extern int of_get_phy_mode(struct device_node *np); extern const void *of_get_mac_address(struct device_node *np); -extern int of_get_nvmem_mac_address(struct device_node *np, void *addr); extern struct net_device *of_find_net_device_by_node(struct device_node *np); #else static inline int of_get_phy_mode(struct device_node *np) @@ -26,11 +25,6 @@ static inline const void *of_get_mac_address(struct device_node *np) return NULL; } -static inline int of_get_nvmem_mac_address(struct device_node *np, void *addr) -{ - return -ENODEV; -} - static inline struct net_device *of_find_net_device_by_node(struct device_node *np) { return NULL; diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h index d0b183ab65c6..89e4eb076a01 100644 --- a/include/linux/of_pdt.h +++ b/include/linux/of_pdt.h @@ -35,6 +35,4 @@ extern void *prom_early_alloc(unsigned long size); /* for building the device tree */ extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); -extern void (*of_pdt_build_more)(struct device_node *dp); - #endif /* _LINUX_OF_PDT_H */ diff --git a/include/linux/oom.h b/include/linux/oom.h index 69864a547663..d07992009265 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -15,6 +15,13 @@ struct notifier_block; struct mem_cgroup; struct task_struct; +enum oom_constraint { + CONSTRAINT_NONE, + CONSTRAINT_CPUSET, + CONSTRAINT_MEMORY_POLICY, + CONSTRAINT_MEMCG, +}; + /* * Details of the page allocation that triggered the oom killer that are used to * determine what should be killed. @@ -42,6 +49,9 @@ struct oom_control { unsigned long totalpages; struct task_struct *chosen; unsigned long chosen_points; + + /* Used to print the constraint info. */ + enum oom_constraint constraint; }; extern struct mutex oom_lock; diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index 7ec86bf31ce4..1dda31825ec4 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -82,6 +82,16 @@ #define LAST_CPUPID_WIDTH 0 #endif +#ifdef CONFIG_KASAN_SW_TAGS +#define KASAN_TAG_WIDTH 8 +#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \ + > BITS_PER_LONG - NR_PAGEFLAGS +#error "KASAN: not enough bits in page flags for tag" +#endif +#else +#define KASAN_TAG_WIDTH 0 +#endif + /* * We are going to use the flags for the page to node mapping if its in * there. This includes the case where there is no node, so it is implicit. diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 50ce1bddaf56..39b4494e29f1 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -669,6 +669,7 @@ PAGEFLAG_FALSE(DoubleMap) #define PAGE_TYPE_BASE 0xf0000000 /* Reserve 0x0000007f to catch underflows of page_mapcount */ +#define PAGE_MAPCOUNT_RESERVE -128 #define PG_buddy 0x00000080 #define PG_balloon 0x00000100 #define PG_kmemcg 0x00000200 @@ -677,6 +678,11 @@ PAGEFLAG_FALSE(DoubleMap) #define PageType(page, flag) \ ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) +static inline int page_has_type(struct page *page) +{ + return (int)page->page_type < PAGE_MAPCOUNT_RESERVE; +} + #define PAGE_TYPE_OPS(uname, lname) \ static __always_inline int Page##uname(struct page *page) \ { \ diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 4ae347cbc36d..4eb26d278046 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -30,8 +30,11 @@ static inline bool is_migrate_isolate(int migratetype) } #endif +#define SKIP_HWPOISON 0x1 +#define REPORT_FAILURE 0x2 + bool has_unmovable_pages(struct zone *zone, struct page *page, int count, - int migratetype, bool skip_hwpoisoned_pages); + int migratetype, int flags); void set_pageblock_migratetype(struct page *page, int migratetype); int move_freepages_block(struct zone *zone, struct page *page, int migratetype, int *num_movable); @@ -44,10 +47,14 @@ int move_freepages_block(struct zone *zone, struct page *page, * For isolating all pages in the range finally, the caller have to * free all pages in the range. test_page_isolated() can be used for * test it. + * + * The following flags are allowed (they can be combined in a bit mask) + * SKIP_HWPOISON - ignore hwpoison pages + * REPORT_FAILURE - report details about the failure to isolate the range */ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, - unsigned migratetype, bool skip_hwpoisoned_pages); + unsigned migratetype, int flags); /* * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 9132c5cb41f1..06a66327333d 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -25,10 +25,11 @@ #include <linux/types.h> +#define PB_migratetype_bits 3 /* Bit indices that affect a whole block of pages */ enum pageblock_bits { PB_migrate, - PB_migrate_end = PB_migrate + 3 - 1, + PB_migrate_end = PB_migrate + PB_migratetype_bits - 1, /* 3 bits required for migrate types */ PB_migrate_skip,/* If set the block is skipped by compaction */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 226f96f0dee0..e2d7039af6a3 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -537,6 +537,8 @@ static inline int wait_on_page_locked_killable(struct page *page) return wait_on_page_bit_killable(compound_head(page), PG_locked); } +extern void put_and_wait_on_page_locked(struct page *page); + /* * Wait for a page to complete writeback */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 11c71c4ecf75..65f1d8c2f082 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -396,6 +396,14 @@ struct pci_dev { unsigned int is_hotplug_bridge:1; unsigned int shpc_managed:1; /* SHPC owned by shpchp */ unsigned int is_thunderbolt:1; /* Thunderbolt controller */ + /* + * Devices marked being untrusted are the ones that can potentially + * execute DMA attacks and similar. They are typically connected + * through external ports such as Thunderbolt but not limited to + * that. When an IOMMU is enabled they should be getting full + * mappings to make sure they cannot access arbitrary memory. + */ + unsigned int untrusted:1; unsigned int __aer_firmware_first_valid:1; unsigned int __aer_firmware_first:1; unsigned int broken_intx_masking:1; /* INTx masking can't be used */ @@ -405,6 +413,7 @@ struct pci_dev { unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ unsigned int is_probed:1; /* Device probing in progress */ unsigned int link_active_reporting:1;/* Device capable of reporting link active */ + unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */ pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ @@ -764,9 +773,9 @@ struct pci_driver { int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */ int (*suspend_late)(struct pci_dev *dev, pm_message_t state); int (*resume_early)(struct pci_dev *dev); - int (*resume) (struct pci_dev *dev); /* Device woken up */ - void (*shutdown) (struct pci_dev *dev); - int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* On PF */ + int (*resume)(struct pci_dev *dev); /* Device woken up */ + void (*shutdown)(struct pci_dev *dev); + int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */ const struct pci_error_handlers *err_handler; const struct attribute_group **groups; struct device_driver driver; @@ -1960,7 +1969,11 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); int pcibios_add_device(struct pci_dev *dev); void pcibios_release_device(struct pci_dev *dev); +#ifdef CONFIG_PCI void pcibios_penalize_isa_irq(int irq, int active); +#else +static inline void pcibios_penalize_isa_irq(int irq, int active) {} +#endif int pcibios_alloc_irq(struct pci_dev *dev); void pcibios_free_irq(struct pci_dev *dev); resource_size_t pcibios_default_alignment(void); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 69f0abe1ba1a..5eaf39dbc388 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -545,6 +545,9 @@ #define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534 #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 0x1583 #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F4 0x1584 +#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 +#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb +#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 @@ -2358,6 +2361,11 @@ #define PCI_DEVICE_ID_CENATEK_IDE 0x0001 #define PCI_VENDOR_ID_SYNOPSYS 0x16c3 +#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd +#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce +#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf + +#define PCI_VENDOR_ID_USR 0x16ec #define PCI_VENDOR_ID_VITESSE 0x1725 #define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174 diff --git a/include/linux/pe.h b/include/linux/pe.h index 143ce75be5f0..3482b18a48b5 100644 --- a/include/linux/pe.h +++ b/include/linux/pe.h @@ -166,7 +166,7 @@ struct mz_hdr { uint16_t oem_info; /* oem specific */ uint16_t reserved1[10]; /* reserved */ uint32_t peaddr; /* address of pe header */ - char message[64]; /* message to print */ + char message[]; /* message to print */ }; struct mz_reloc { diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 79b99d653e03..71b75643c432 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -41,7 +41,7 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore * * cannot both change sem->state from readers_fast and start checking * counters while we are here. So if we see !sem->state, we know that * the writer won't be checking until we're past the preempt_enable() - * and that one the synchronize_sched() is done, the writer will see + * and that once the synchronize_rcu() is done, the writer will see * anything we did within this RCU-sched read-size critical section. */ __this_cpu_inc(*sem->read_count); diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index bf309ff6f244..4641e850b204 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -102,8 +102,10 @@ struct arm_pmu { int (*filter_match)(struct perf_event *event); int num_events; bool secure_access; /* 32-bit ARM only */ -#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 +#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); +#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000 + DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); struct platform_device *plat_device; struct pmu_hw_events __percpu *hw_events; struct hlist_node node; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 53c500f0ca79..1d5c551a5add 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -262,8 +262,8 @@ struct pmu { */ int capabilities; - int * __percpu pmu_disable_count; - struct perf_cpu_context * __percpu pmu_cpu_context; + int __percpu *pmu_disable_count; + struct perf_cpu_context __percpu *pmu_cpu_context; atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ int task_ctx_nr; int hrtimer_interval_ms; diff --git a/include/linux/phy.h b/include/linux/phy.h index 3ea87f774a76..3b051f761450 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1,6 +1,6 @@ /* * Framework and drivers for configuring and reading different PHYs - * Based on code in sungem_phy.c and gianfar_phy.c + * Based on code in sungem_phy.c and (long-removed) gianfar_phy.c * * Author: Andy Fleming * @@ -58,6 +58,11 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) +extern const int phy_10_100_features_array[4]; +extern const int phy_basic_t1_features_array[2]; +extern const int phy_gbit_features_array[2]; +extern const int phy_10gbit_features_array[1]; + /* * Set phydev->irq to PHY_POLL if interrupts are not supported, * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if @@ -66,9 +71,8 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini #define PHY_POLL -1 #define PHY_IGNORE_INTERRUPT -2 -#define PHY_HAS_INTERRUPT 0x00000001 -#define PHY_IS_INTERNAL 0x00000002 -#define PHY_RST_AFTER_CLK_EN 0x00000004 +#define PHY_IS_INTERNAL 0x00000001 +#define PHY_RST_AFTER_CLK_EN 0x00000002 #define MDIO_DEVICE_IS_PHY 0x80000000 /* Interface Mode definitions */ @@ -106,9 +110,9 @@ typedef enum { * @speeds: buffer to store supported speeds in. * @size: size of speeds buffer. * - * Description: Returns the number of supported speeds, and - * fills the speeds * buffer with the supported speeds. If speeds buffer is - * too small to contain * all currently supported speeds, will return as + * Description: Returns the number of supported speeds, and fills + * the speeds buffer with the supported speeds. If speeds buffer is + * too small to contain all currently supported speeds, will return as * many speeds as can fit. */ unsigned int phy_supported_speeds(struct phy_device *phy, @@ -116,7 +120,10 @@ unsigned int phy_supported_speeds(struct phy_device *phy, unsigned int size); /** - * It maps 'enum phy_interface_t' found in include/linux/phy.h + * phy_modes - map phy_interface_t enum to device tree binding of phy-mode + * @interface: enum phy_interface_t value + * + * Description: maps 'enum phy_interface_t' defined in this file * into the device tree binding of 'phy-mode', so that Ethernet * device driver can get phy interface from device tree. */ @@ -178,7 +185,6 @@ static inline const char *phy_modes(phy_interface_t interface) #define PHY_INIT_TIMEOUT 100000 #define PHY_STATE_TIME 1 #define PHY_FORCE_TIMEOUT 10 -#define PHY_AN_TIMEOUT 10 #define PHY_MAX_ADDR 32 @@ -264,57 +270,27 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); -#define PHY_INTERRUPT_DISABLED 0x0 -#define PHY_INTERRUPT_ENABLED 0x80000000 +#define PHY_INTERRUPT_DISABLED false +#define PHY_INTERRUPT_ENABLED true /* PHY state machine states: * * DOWN: PHY device and driver are not ready for anything. probe * should be called if and only if the PHY is in this state, * given that the PHY device exists. - * - PHY driver probe function will, depending on the PHY, set - * the state to STARTING or READY - * - * STARTING: PHY device is coming up, and the ethernet driver is - * not ready. PHY drivers may set this in the probe function. - * If they do, they are responsible for making sure the state is - * eventually set to indicate whether the PHY is UP or READY, - * depending on the state when the PHY is done starting up. - * - PHY driver will set the state to READY - * - start will set the state to PENDING + * - PHY driver probe function will set the state to READY * * READY: PHY is ready to send and receive packets, but the * controller is not. By default, PHYs which do not implement - * probe will be set to this state by phy_probe(). If the PHY - * driver knows the PHY is ready, and the PHY state is STARTING, - * then it sets this STATE. + * probe will be set to this state by phy_probe(). * - start will set the state to UP * - * PENDING: PHY device is coming up, but the ethernet driver is - * ready. phy_start will set this state if the PHY state is - * STARTING. - * - PHY driver will set the state to UP when the PHY is ready - * * UP: The PHY and attached device are ready to do work. * Interrupts should be started here. - * - timer moves to AN - * - * AN: The PHY is currently negotiating the link state. Link is - * therefore down for now. phy_timer will set this state when it - * detects the state is UP. config_aneg will set this state - * whenever called with phydev->autoneg set to AUTONEG_ENABLE. - * - If autonegotiation finishes, but there's no link, it sets - * the state to NOLINK. - * - If aneg finishes with link, it sets the state to RUNNING, - * and calls adjust_link - * - If autonegotiation did not finish after an arbitrary amount - * of time, autonegotiation should be tried again if the PHY - * supports "magic" autonegotiation (back to AN) - * - If it didn't finish, and no magic_aneg, move to FORCING. + * - timer moves to NOLINK or RUNNING * * NOLINK: PHY is up, but not currently plugged in. - * - If the timer notes that the link comes back, we move to RUNNING - * - config_aneg moves to AN + * - irq or timer will set RUNNING if link comes back * - phy_stop moves to HALTED * * FORCING: PHY is being configured with forced settings @@ -325,11 +301,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); * * RUNNING: PHY is currently up, running, and possibly sending * and/or receiving packets - * - timer will set CHANGELINK if we're polling (this ensures the - * link state is polled every other cycle of this state machine, - * which makes it every other second) - * - irq will set CHANGELINK - * - config_aneg will set AN + * - irq or timer will set NOLINK if link goes down * - phy_stop moves to HALTED * * CHANGELINK: PHY experienced a change in link state @@ -349,16 +321,13 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); */ enum phy_state { PHY_DOWN = 0, - PHY_STARTING, PHY_READY, - PHY_PENDING, + PHY_HALTED, PHY_UP, - PHY_AN, PHY_RUNNING, PHY_NOLINK, PHY_FORCING, PHY_CHANGELINK, - PHY_HALTED, PHY_RESUMING }; @@ -390,7 +359,6 @@ struct phy_c45_device_ids { * giving up on the current attempt at acquiring a link * irq: IRQ number of the PHY's interrupt (-1 if none) * phy_timer: The timer for handling the state machine - * phy_queue: A work_queue for the phy_mac_interrupt * attached_dev: The attached enet driver's device instance ptr * adjust_link: Callback for the enet controller to respond to * changes in the link state. @@ -427,6 +395,9 @@ struct phy_device { /* The most recently read link state */ unsigned link:1; + /* Interrupts are enabled */ + unsigned interrupts:1; + enum phy_state state; u32 dev_flags; @@ -442,14 +413,11 @@ struct phy_device { int pause; int asym_pause; - /* Enabled Interrupts */ - u32 interrupts; - - /* Union of PHY and Attached devices' supported modes */ - /* See mii.h for more info */ - u32 supported; - u32 advertising; - u32 lp_advertising; + /* Union of PHY and Attached devices' supported link modes */ + /* See ethtool.h for more info */ + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); /* Energy efficient ethernet modes which should be prohibited */ u32 eee_broken_modes; @@ -475,7 +443,6 @@ struct phy_device { void *priv; /* Interrupt and Polling infrastructure */ - struct work_struct phy_queue; struct delayed_work state_queue; struct mutex lock; @@ -674,6 +641,10 @@ struct phy_driver { #define PHY_ANY_ID "MATCH ANY PHY" #define PHY_ANY_UID 0xffffffff +#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 0) +#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4) +#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10) + /* A Structure for boards to register fixups with the PHY Lib */ struct phy_fixup { struct list_head list; @@ -697,9 +668,31 @@ struct phy_setting { const struct phy_setting * phy_lookup_setting(int speed, int duplex, const unsigned long *mask, - size_t maxbit, bool exact); + bool exact); size_t phy_speeds(unsigned int *speeds, size_t size, - unsigned long *mask, size_t maxbit); + unsigned long *mask); + +static inline bool __phy_is_started(struct phy_device *phydev) +{ + WARN_ON(!mutex_is_locked(&phydev->lock)); + + return phydev->state >= PHY_UP; +} + +/** + * phy_is_started - Convenience function to check whether PHY is started + * @phydev: The phy_device struct + */ +static inline bool phy_is_started(struct phy_device *phydev) +{ + bool started; + + mutex_lock(&phydev->lock); + started = __phy_is_started(phydev); + mutex_unlock(&phydev->lock); + + return started; +} void phy_resolve_aneg_linkmode(struct phy_device *phydev); @@ -1050,11 +1043,9 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner); int phy_drivers_register(struct phy_driver *new_driver, int n, struct module *owner); void phy_state_machine(struct work_struct *work); -void phy_change_work(struct work_struct *work); void phy_mac_interrupt(struct phy_device *phydev); void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); -void phy_trigger_machine(struct phy_device *phydev); int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); void phy_ethtool_ksettings_get(struct phy_device *phydev, struct ethtool_link_ksettings *cmd); diff --git a/include/linux/phy/phy-mipi-dphy.h b/include/linux/phy/phy-mipi-dphy.h new file mode 100644 index 000000000000..c08aacc0ac35 --- /dev/null +++ b/include/linux/phy/phy-mipi-dphy.h @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Cadence Design Systems Inc. + */ + +#ifndef __PHY_MIPI_DPHY_H_ +#define __PHY_MIPI_DPHY_H_ + +#include <video/videomode.h> + +/** + * struct phy_configure_opts_mipi_dphy - MIPI D-PHY configuration set + * + * This structure is used to represent the configuration state of a + * MIPI D-PHY phy. + */ +struct phy_configure_opts_mipi_dphy { + /** + * @clk_miss: + * + * Timeout, in picoseconds, for receiver to detect absence of + * Clock transitions and disable the Clock Lane HS-RX. + * + * Maximum value: 60000 ps + */ + unsigned int clk_miss; + + /** + * @clk_post: + * + * Time, in picoseconds, that the transmitter continues to + * send HS clock after the last associated Data Lane has + * transitioned to LP Mode. Interval is defined as the period + * from the end of @hs_trail to the beginning of @clk_trail. + * + * Minimum value: 60000 ps + 52 * @hs_clk_rate period in ps + */ + unsigned int clk_post; + + /** + * @clk_pre: + * + * Time, in UI, that the HS clock shall be driven by + * the transmitter prior to any associated Data Lane beginning + * the transition from LP to HS mode. + * + * Minimum value: 8 UI + */ + unsigned int clk_pre; + + /** + * @clk_prepare: + * + * Time, in picoseconds, that the transmitter drives the Clock + * Lane LP-00 Line state immediately before the HS-0 Line + * state starting the HS transmission. + * + * Minimum value: 38000 ps + * Maximum value: 95000 ps + */ + unsigned int clk_prepare; + + /** + * @clk_settle: + * + * Time interval, in picoseconds, during which the HS receiver + * should ignore any Clock Lane HS transitions, starting from + * the beginning of @clk_prepare. + * + * Minimum value: 95000 ps + * Maximum value: 300000 ps + */ + unsigned int clk_settle; + + /** + * @clk_term_en: + * + * Time, in picoseconds, for the Clock Lane receiver to enable + * the HS line termination. + * + * Maximum value: 38000 ps + */ + unsigned int clk_term_en; + + /** + * @clk_trail: + * + * Time, in picoseconds, that the transmitter drives the HS-0 + * state after the last payload clock bit of a HS transmission + * burst. + * + * Minimum value: 60000 ps + */ + unsigned int clk_trail; + + /** + * @clk_zero: + * + * Time, in picoseconds, that the transmitter drives the HS-0 + * state prior to starting the Clock. + */ + unsigned int clk_zero; + + /** + * @d_term_en: + * + * Time, in picoseconds, for the Data Lane receiver to enable + * the HS line termination. + * + * Maximum value: 35000 ps + 4 * @hs_clk_rate period in ps + */ + unsigned int d_term_en; + + /** + * @eot: + * + * Transmitted time interval, in picoseconds, from the start + * of @hs_trail or @clk_trail, to the start of the LP- 11 + * state following a HS burst. + * + * Maximum value: 105000 ps + 12 * @hs_clk_rate period in ps + */ + unsigned int eot; + + /** + * @hs_exit: + * + * Time, in picoseconds, that the transmitter drives LP-11 + * following a HS burst. + * + * Minimum value: 100000 ps + */ + unsigned int hs_exit; + + /** + * @hs_prepare: + * + * Time, in picoseconds, that the transmitter drives the Data + * Lane LP-00 Line state immediately before the HS-0 Line + * state starting the HS transmission. + * + * Minimum value: 40000 ps + 4 * @hs_clk_rate period in ps + * Maximum value: 85000 ps + 6 * @hs_clk_rate period in ps + */ + unsigned int hs_prepare; + + /** + * @hs_settle: + * + * Time interval, in picoseconds, during which the HS receiver + * shall ignore any Data Lane HS transitions, starting from + * the beginning of @hs_prepare. + * + * Minimum value: 85000 ps + 6 * @hs_clk_rate period in ps + * Maximum value: 145000 ps + 10 * @hs_clk_rate period in ps + */ + unsigned int hs_settle; + + /** + * @hs_skip: + * + * Time interval, in picoseconds, during which the HS-RX + * should ignore any transitions on the Data Lane, following a + * HS burst. The end point of the interval is defined as the + * beginning of the LP-11 state following the HS burst. + * + * Minimum value: 40000 ps + * Maximum value: 55000 ps + 4 * @hs_clk_rate period in ps + */ + unsigned int hs_skip; + + /** + * @hs_trail: + * + * Time, in picoseconds, that the transmitter drives the + * flipped differential state after last payload data bit of a + * HS transmission burst + * + * Minimum value: max(8 * @hs_clk_rate period in ps, + * 60000 ps + 4 * @hs_clk_rate period in ps) + */ + unsigned int hs_trail; + + /** + * @hs_zero: + * + * Time, in picoseconds, that the transmitter drives the HS-0 + * state prior to transmitting the Sync sequence. + */ + unsigned int hs_zero; + + /** + * @init: + * + * Time, in picoseconds for the initialization period to + * complete. + * + * Minimum value: 100000000 ps + */ + unsigned int init; + + /** + * @lpx: + * + * Transmitted length, in picoseconds, of any Low-Power state + * period. + * + * Minimum value: 50000 ps + */ + unsigned int lpx; + + /** + * @ta_get: + * + * Time, in picoseconds, that the new transmitter drives the + * Bridge state (LP-00) after accepting control during a Link + * Turnaround. + * + * Value: 5 * @lpx + */ + unsigned int ta_get; + + /** + * @ta_go: + * + * Time, in picoseconds, that the transmitter drives the + * Bridge state (LP-00) before releasing control during a Link + * Turnaround. + * + * Value: 4 * @lpx + */ + unsigned int ta_go; + + /** + * @ta_sure: + * + * Time, in picoseconds, that the new transmitter waits after + * the LP-10 state before transmitting the Bridge state + * (LP-00) during a Link Turnaround. + * + * Minimum value: @lpx + * Maximum value: 2 * @lpx + */ + unsigned int ta_sure; + + /** + * @wakeup: + * + * Time, in picoseconds, that a transmitter drives a Mark-1 + * state prior to a Stop state in order to initiate an exit + * from ULPS. + * + * Minimum value: 1000000000 ps + */ + unsigned int wakeup; + + /** + * @hs_clk_rate: + * + * Clock rate, in Hertz, of the high-speed clock. + */ + unsigned long hs_clk_rate; + + /** + * @lp_clk_rate: + * + * Clock rate, in Hertz, of the low-power clock. + */ + unsigned long lp_clk_rate; + + /** + * @lanes: + * + * Number of active data lanes used for the transmissions. + */ + unsigned char lanes; +}; + +int phy_mipi_dphy_get_default_config(unsigned long pixel_clock, + unsigned int bpp, + unsigned int lanes, + struct phy_configure_opts_mipi_dphy *cfg); +int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg); + +#endif /* __PHY_MIPI_DPHY_H_ */ diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index 03b319f89a34..e8e118d70fd7 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -20,6 +20,8 @@ #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> +#include <linux/phy/phy-mipi-dphy.h> + struct phy; enum phy_mode { @@ -35,13 +37,21 @@ enum phy_mode { PHY_MODE_USB_DEVICE_HS, PHY_MODE_USB_DEVICE_SS, PHY_MODE_USB_OTG, - PHY_MODE_SGMII, - PHY_MODE_2500SGMII, - PHY_MODE_QSGMII, - PHY_MODE_10GKR, PHY_MODE_UFS_HS_A, PHY_MODE_UFS_HS_B, PHY_MODE_PCIE, + PHY_MODE_ETHERNET, + PHY_MODE_MIPI_DPHY, +}; + +/** + * union phy_configure_opts - Opaque generic phy configuration + * + * @mipi_dphy: Configuration set applicable for phys supporting + * the MIPI_DPHY phy mode. + */ +union phy_configure_opts { + struct phy_configure_opts_mipi_dphy mipi_dphy; }; /** @@ -60,7 +70,38 @@ struct phy_ops { int (*exit)(struct phy *phy); int (*power_on)(struct phy *phy); int (*power_off)(struct phy *phy); - int (*set_mode)(struct phy *phy, enum phy_mode mode); + int (*set_mode)(struct phy *phy, enum phy_mode mode, int submode); + + /** + * @configure: + * + * Optional. + * + * Used to change the PHY parameters. phy_init() must have + * been called on the phy. + * + * Returns: 0 if successful, an negative error code otherwise + */ + int (*configure)(struct phy *phy, union phy_configure_opts *opts); + + /** + * @validate: + * + * Optional. + * + * Used to check that the current set of parameters can be + * handled by the phy. Implementations are free to tune the + * parameters passed as arguments if needed by some + * implementation detail or constraints. It must not change + * any actual configuration of the PHY, so calling it as many + * times as deemed fit by the consumer must have no side + * effect. + * + * Returns: 0 if the configuration can be applied, an negative + * error code otherwise + */ + int (*validate)(struct phy *phy, enum phy_mode mode, int submode, + union phy_configure_opts *opts); int (*reset)(struct phy *phy); int (*calibrate)(struct phy *phy); struct module *owner; @@ -69,6 +110,7 @@ struct phy_ops { /** * struct phy_attrs - represents phy attributes * @bus_width: Data path width implemented by PHY + * @mode: PHY mode */ struct phy_attrs { u32 bus_width; @@ -80,7 +122,6 @@ struct phy_attrs { * @dev: phy device * @id: id of the phy device * @ops: function pointers for performing phy operations - * @init_data: list of PHY consumers (non-dt only) * @mutex: mutex to protect phy_ops * @init_count: used to protect when the PHY is used by multiple consumers * @power_count: used to protect when the PHY is used by multiple consumers @@ -164,7 +205,13 @@ int phy_init(struct phy *phy); int phy_exit(struct phy *phy); int phy_power_on(struct phy *phy); int phy_power_off(struct phy *phy); -int phy_set_mode(struct phy *phy, enum phy_mode mode); +int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode); +#define phy_set_mode(phy, mode) \ + phy_set_mode_ext(phy, mode, 0) +int phy_configure(struct phy *phy, union phy_configure_opts *opts); +int phy_validate(struct phy *phy, enum phy_mode mode, int submode, + union phy_configure_opts *opts); + static inline enum phy_mode phy_get_mode(struct phy *phy) { return phy->attrs.mode; @@ -278,13 +325,17 @@ static inline int phy_power_off(struct phy *phy) return -ENOSYS; } -static inline int phy_set_mode(struct phy *phy, enum phy_mode mode) +static inline int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, + int submode) { if (!phy) return 0; return -ENOSYS; } +#define phy_set_mode(phy, mode) \ + phy_set_mode_ext(phy, mode, 0) + static inline enum phy_mode phy_get_mode(struct phy *phy) { return PHY_MODE_INVALID; @@ -304,6 +355,24 @@ static inline int phy_calibrate(struct phy *phy) return -ENOSYS; } +static inline int phy_configure(struct phy *phy, + union phy_configure_opts *opts) +{ + if (!phy) + return 0; + + return -ENOSYS; +} + +static inline int phy_validate(struct phy *phy, enum phy_mode mode, int submode, + union phy_configure_opts *opts) +{ + if (!phy) + return 0; + + return -ENOSYS; +} + static inline int phy_get_bus_width(struct phy *phy) { return -ENOSYS; diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index ee54453a40a0..9525567b1951 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -13,6 +13,7 @@ struct fixed_phy_status { struct device_node; #if IS_ENABLED(CONFIG_FIXED_PHY) +extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier); extern int fixed_phy_add(unsigned int irq, int phy_id, struct fixed_phy_status *status, int link_gpio); @@ -47,6 +48,10 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev, { return -ENODEV; } +static inline int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier) +{ + return -EINVAL; +} #endif /* CONFIG_FIXED_PHY */ #endif /* __PHY_FIXED_H */ diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h index b37b05bfd1a6..4587ce362535 100644 --- a/include/linux/phy_led_triggers.h +++ b/include/linux/phy_led_triggers.h @@ -20,7 +20,7 @@ struct phy_device; #include <linux/leds.h> #include <linux/phy.h> -#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10 +#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 11 #define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \ FIELD_SIZEOF(struct mdio_device, addr)+\ diff --git a/include/linux/pl353-smc.h b/include/linux/pl353-smc.h new file mode 100644 index 000000000000..0e0d3df9bf72 --- /dev/null +++ b/include/linux/pl353-smc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ARM PL353 SMC Driver Header + * + * Copyright (C) 2012 - 2018 Xilinx, Inc + */ + +#ifndef __LINUX_PL353_SMC_H +#define __LINUX_PL353_SMC_H + +enum pl353_smc_ecc_mode { + PL353_SMC_ECCMODE_BYPASS = 0, + PL353_SMC_ECCMODE_APB = 1, + PL353_SMC_ECCMODE_MEM = 2 +}; + +enum pl353_smc_mem_width { + PL353_SMC_MEM_WIDTH_8 = 0, + PL353_SMC_MEM_WIDTH_16 = 1 +}; + +u32 pl353_smc_get_ecc_val(int ecc_reg); +bool pl353_smc_ecc_is_busy(void); +int pl353_smc_get_nand_int_status_raw(void); +void pl353_smc_clr_nand_int(void); +int pl353_smc_set_ecc_mode(enum pl353_smc_ecc_mode mode); +int pl353_smc_set_ecc_pg_size(unsigned int pg_sz); +int pl353_smc_set_buswidth(unsigned int bw); +void pl353_smc_set_cycles(u32 timings[]); +#endif diff --git a/include/linux/platform_data/ad7879.h b/include/linux/platform_data/ad7879.h deleted file mode 100644 index 6655cc8453ac..000000000000 --- a/include/linux/platform_data/ad7879.h +++ /dev/null @@ -1,42 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* linux/platform_data/ad7879.h */ - -/* Touchscreen characteristics vary between boards and models. The - * platform_data for the device's "struct device" holds this information. - * - * It's OK if the min/max values are zero. - */ -struct ad7879_platform_data { - u16 model; /* 7879 */ - u16 x_plate_ohms; - u16 x_min, x_max; - u16 y_min, y_max; - u16 pressure_min, pressure_max; - - bool swap_xy; /* swap x and y axes */ - - /* [0..255] 0=OFF Starts at 1=550us and goes - * all the way to 9.440ms in steps of 35us. - */ - u8 pen_down_acc_interval; - /* [0..15] Starts at 0=128us and goes all the - * way to 4.096ms in steps of 128us. - */ - u8 first_conversion_delay; - /* [0..3] 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */ - u8 acquisition_time; - /* [0..3] Average X middle samples 0 = 2, 1 = 4, 2 = 8, 3 = 16 */ - u8 averaging; - /* [0..3] Perform X measurements 0 = OFF, - * 1 = 4, 2 = 8, 3 = 16 (median > averaging) - */ - u8 median; - /* 1 = AUX/VBAT/GPIO export GPIO to gpiolib - * requires CONFIG_GPIOLIB - */ - bool gpio_export; - /* identifies the first GPIO number handled by this chip; - * or, if negative, requests dynamic ID allocation. - */ - s32 gpio_base; -}; diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index 85ad68f9206a..7fe80f1c7e08 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -79,6 +79,7 @@ struct davinci_mcasp_pdata { /* McASP specific fields */ int tdm_slots; u8 op_mode; + u8 dismod; u8 num_serializer; u8 *serial_dir; u8 version; diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 896cb71a382c..1a1d58ebffbf 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -49,6 +49,7 @@ struct dw_dma_slave { * @data_width: Maximum data width supported by hardware per AHB master * (in bytes, power of 2) * @multi_block: Multi block transfers supported by hardware per channel. + * @protctl: Protection control signals setting per channel. */ struct dw_dma_platform_data { unsigned int nr_channels; @@ -65,6 +66,11 @@ struct dw_dma_platform_data { unsigned char nr_masters; unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS]; +#define CHAN_PROTCTL_PRIVILEGED BIT(0) +#define CHAN_PROTCTL_BUFFERABLE BIT(1) +#define CHAN_PROTCTL_CACHEABLE BIT(2) +#define CHAN_PROTCTL_MASK GENMASK(2, 0) + unsigned char protctl; }; #endif /* _PLATFORM_DATA_DMA_DW_H */ diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h index f92a47e18034..a93841bfb9f7 100644 --- a/include/linux/platform_data/gpio-davinci.h +++ b/include/linux/platform_data/gpio-davinci.h @@ -17,6 +17,8 @@ #define __DAVINCI_GPIO_PLATFORM_H struct davinci_gpio_platform_data { + bool no_auto_base; + u32 base; u32 ngpio; u32 gpio_unbanked; }; diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h new file mode 100644 index 000000000000..13874fa6e767 --- /dev/null +++ b/include/linux/platform_data/mdio-gpio.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * MDIO-GPIO bus platform data structure + */ + +#ifndef __LINUX_MDIO_GPIO_PDATA_H +#define __LINUX_MDIO_GPIO_PDATA_H + +struct mdio_gpio_platform_data { + u32 phy_mask; + u32 phy_ignore_ta_mask; +}; + +#endif /* __LINUX_MDIO_GPIO_PDATA_H */ diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h index 640dec8b5b0c..b606ca4197df 100644 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ b/include/linux/platform_data/mmc-esdhc-imx.h @@ -30,15 +30,11 @@ enum cd_types { * * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35. * - * @wp_gpio: gpio for write_protect - * @cd_gpio: gpio for card_detect interrupt * @wp_type: type of write_protect method (see wp_types enum above) * @cd_type: type of card_detect method (see cd_types enum above) */ struct esdhc_platform_data { - unsigned int wp_gpio; - unsigned int cd_gpio; enum wp_types wp_type; enum cd_types cd_type; int max_bus_width; diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h index 752f97c62ef2..7e44e84e7150 100644 --- a/include/linux/platform_data/mmc-pxamci.h +++ b/include/linux/platform_data/mmc-pxamci.h @@ -15,11 +15,7 @@ struct pxamci_platform_data { int (*get_ro)(struct device *); int (*setpower)(struct device *, unsigned int); void (*exit)(struct device *, void *); - int gpio_card_detect; /* gpio detecting card insertion */ - int gpio_card_ro; /* gpio detecting read only toggle */ bool gpio_card_ro_invert; /* gpio ro is inverted */ - int gpio_power; /* gpio powering up MMC bus */ - bool gpio_power_invert; /* gpio power is inverted */ }; extern void pxa_set_mci_info(struct pxamci_platform_data *info); diff --git a/include/linux/platform_data/mmc-s3cmci.h b/include/linux/platform_data/mmc-s3cmci.h index b68d9f0bdd9e..33310b11cbdd 100644 --- a/include/linux/platform_data/mmc-s3cmci.h +++ b/include/linux/platform_data/mmc-s3cmci.h @@ -7,7 +7,6 @@ * @no_wprotect: Set this to indicate there is no write-protect switch. * @no_detect: Set this if there is no detect switch. * @wprotect_invert: Invert the default sense of the write protect switch. - * @detect_invert: Invert the default sense of the write protect switch. * @use_dma: Set to allow the use of DMA. * @gpio_detect: GPIO number for the card detect line. * @gpio_wprotect: GPIO number for the write protect line. @@ -31,11 +30,8 @@ struct s3c24xx_mci_pdata { unsigned int no_wprotect:1; unsigned int no_detect:1; unsigned int wprotect_invert:1; - unsigned int detect_invert:1; /* set => detect active high */ unsigned int use_dma:1; - unsigned int gpio_detect; - unsigned int gpio_wprotect; unsigned long ocr_avail; void (*set_power)(unsigned char power_mode, unsigned short vdd); diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h index 698d0d59db76..ee03d429742b 100644 --- a/include/linux/platform_data/ntc_thermistor.h +++ b/include/linux/platform_data/ntc_thermistor.h @@ -24,10 +24,11 @@ struct iio_channel; enum ntc_thermistor_type { - TYPE_NCPXXWB473, - TYPE_NCPXXWL333, TYPE_B57330V2103, + TYPE_B57891S0103, + TYPE_NCPXXWB473, TYPE_NCPXXWF104, + TYPE_NCPXXWL333, TYPE_NCPXXXH103, }; diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h index f8274b0c6888..728193111c2f 100644 --- a/include/linux/platform_data/st_sensors_pdata.h +++ b/include/linux/platform_data/st_sensors_pdata.h @@ -18,11 +18,13 @@ * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). * @open_drain: set the interrupt line to be open drain if possible. * @spi_3wire: enable spi-3wire mode. + * @pullups: enable/disable i2c controller pullup resistors. */ struct st_sensors_platform_data { u8 drdy_int_pin; bool open_drain; bool spi_3wire; + bool pullups; }; #endif /* ST_SENSORS_PDATA_H */ diff --git a/include/linux/pm.h b/include/linux/pm.h index e723b78d8357..0bd9de116826 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -26,6 +26,7 @@ #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/timer.h> +#include <linux/hrtimer.h> #include <linux/completion.h> /* @@ -608,7 +609,7 @@ struct dev_pm_info { unsigned int should_wakeup:1; #endif #ifdef CONFIG_PM - struct timer_list suspend_timer; + struct hrtimer suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; @@ -631,7 +632,7 @@ struct dev_pm_info { enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; - unsigned long last_busy; + u64 last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 3b5d7280e52e..dd364abb649a 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -73,6 +73,7 @@ struct genpd_power_state { struct genpd_lock_ops; struct dev_pm_opp; +struct opp_table; struct generic_pm_domain { struct device dev; @@ -94,6 +95,7 @@ struct generic_pm_domain { unsigned int performance_state; /* Aggregated max performance state */ int (*power_off)(struct generic_pm_domain *domain); int (*power_on)(struct generic_pm_domain *domain); + struct opp_table *opp_table; /* OPP table of the genpd */ unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd, struct dev_pm_opp *opp); int (*set_performance_state)(struct generic_pm_domain *genpd, @@ -134,6 +136,10 @@ struct gpd_link { struct list_head master_node; struct generic_pm_domain *slave; struct list_head slave_node; + + /* Sub-domain's per-master domain performance state */ + unsigned int performance_state; + unsigned int prev_performance_state; }; struct gpd_timing_data { @@ -258,8 +264,8 @@ int of_genpd_add_subdomain(struct of_phandle_args *parent, struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); int of_genpd_parse_idle_states(struct device_node *dn, struct genpd_power_state **states, int *n); -unsigned int of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *np); +unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, + struct dev_pm_opp *opp); int genpd_dev_pm_attach(struct device *dev); struct device *genpd_dev_pm_attach_by_id(struct device *dev, @@ -300,8 +306,8 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn, } static inline unsigned int -of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *np) +pm_genpd_opp_to_performance_state(struct device *genpd_dev, + struct dev_pm_opp *opp) { return 0; } diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 5d399eeef172..0a2a88e5a383 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -126,6 +126,9 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name); void dev_pm_opp_put_clkname(struct opp_table *opp_table); struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); +struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index); +void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev); +int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); @@ -272,6 +275,18 @@ static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} +static inline struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev) {} + +static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate) +{ + return -ENOTSUPP; +} + static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { return -ENOTSUPP; @@ -305,8 +320,8 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); -struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np); struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); +int of_get_required_opp_performance_state(struct device_node *np, int index); #else static inline int dev_pm_opp_of_add_table(struct device *dev) { @@ -341,13 +356,13 @@ static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device return NULL; } -static inline struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np) +static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) { return NULL; } -static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) +static inline int of_get_required_opp_performance_state(struct device_node *np, int index) { - return NULL; + return -ENOTSUPP; } #endif diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index f0fc4700b6ff..54af4eef169f 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -51,7 +51,7 @@ extern void pm_runtime_no_callbacks(struct device *dev); extern void pm_runtime_irq_safe(struct device *dev); extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); -extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); +extern u64 pm_runtime_autosuspend_expiration(struct device *dev); extern void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns); extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); @@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev) static inline void pm_runtime_mark_last_busy(struct device *dev) { - WRITE_ONCE(dev->power.last_busy, jiffies); + WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get())); } static inline bool pm_runtime_is_irq_safe(struct device *dev) @@ -168,7 +168,7 @@ static inline void __pm_runtime_use_autosuspend(struct device *dev, bool use) {} static inline void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) {} -static inline unsigned long pm_runtime_autosuspend_expiration( +static inline u64 pm_runtime_autosuspend_expiration( struct device *dev) { return 0; } static inline void pm_runtime_set_memalloc_noio(struct device *dev, bool enable){} diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h index c4fa907c8f14..2ce8d00c20de 100644 --- a/include/linux/power/charger-manager.h +++ b/include/linux/power/charger-manager.h @@ -119,7 +119,7 @@ struct charger_regulator { struct charger_cable *cables; int num_cables; - struct attribute_group attr_g; + struct attribute_group attr_grp; struct device_attribute attr_name; struct device_attribute attr_state; struct device_attribute attr_externally_control; @@ -186,6 +186,7 @@ struct charger_desc { int num_charger_regulators; struct charger_regulator *charger_regulators; + const struct attribute_group **sysfs_groups; const char *psy_fuel_gauge; diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h index 7b81dad712de..d0b37e937037 100644 --- a/include/linux/power/smartreflex.h +++ b/include/linux/power/smartreflex.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * OMAP Smartreflex Defines and Routines * @@ -11,10 +12,6 @@ * * Copyright (C) 2007 Texas Instruments, Inc. * Lesly A M <x0080970@ti.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __POWER_SMARTREFLEX_H @@ -303,9 +300,6 @@ void omap_sr_enable(struct voltagedomain *voltdm); void omap_sr_disable(struct voltagedomain *voltdm); void omap_sr_disable_reset_volt(struct voltagedomain *voltdm); -/* API to register the pmic specific data with the smartreflex driver. */ -void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data); - /* Smartreflex driver hooks to be called from Smartreflex class driver */ int sr_enable(struct omap_sr *sr, unsigned long volt); void sr_disable(struct omap_sr *sr); @@ -320,7 +314,5 @@ static inline void omap_sr_enable(struct voltagedomain *voltdm) {} static inline void omap_sr_disable(struct voltagedomain *voltdm) {} static inline void omap_sr_disable_reset_volt( struct voltagedomain *voltdm) {} -static inline void omap_sr_register_pmic( - struct omap_sr_pmic_data *pmic_data) {} #endif #endif diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index f80769175c56..57b2ab82b951 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -204,6 +204,9 @@ struct power_supply_config { /* Driver private data */ void *drv_data; + /* Device specific sysfs attributes */ + const struct attribute_group **attr_grp; + char **supplied_to; size_t num_supplicants; }; @@ -309,6 +312,13 @@ struct power_supply_info { int use_for_apm; }; +struct power_supply_battery_ocv_table { + int ocv; /* microVolts */ + int capacity; /* percent */ +}; + +#define POWER_SUPPLY_OCV_TEMP_MAX 20 + /* * This is the recommended struct to manage static battery parameters, * populated by power_supply_get_battery_info(). Most platform drivers should @@ -326,6 +336,10 @@ struct power_supply_battery_info { int charge_term_current_ua; /* microAmps */ int constant_charge_current_max_ua; /* microAmps */ int constant_charge_voltage_max_uv; /* microVolts */ + int factory_internal_resistance_uohm; /* microOhms */ + int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];/* celsius */ + struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX]; + int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX]; }; extern struct atomic_notifier_head power_supply_notifier; @@ -349,6 +363,15 @@ devm_power_supply_get_by_phandle(struct device *dev, const char *property) extern int power_supply_get_battery_info(struct power_supply *psy, struct power_supply_battery_info *info); +extern void power_supply_put_battery_info(struct power_supply *psy, + struct power_supply_battery_info *info); +extern int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table, + int table_len, int ocv); +extern struct power_supply_battery_ocv_table * +power_supply_find_ocv2cap_table(struct power_supply_battery_info *info, + int temp, int *table_len); +extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info, + int ocv, int temp); extern void power_supply_changed(struct power_supply *psy); extern int power_supply_am_i_supplied(struct power_supply *psy); extern int power_supply_set_input_current_limit_from_supplier( diff --git a/include/linux/preempt.h b/include/linux/preempt.h index c01813c3fbe9..dd92b1a93919 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -53,9 +53,6 @@ #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -/* We use the MSB mostly because its available */ -#define PREEMPT_NEED_RESCHED 0x80000000 - #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) /* diff --git a/include/linux/printk.h b/include/linux/printk.h index cf3eccfe1543..77740a506ebb 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -166,11 +166,6 @@ int vprintk_emit(int facility, int level, asmlinkage __printf(1, 0) int vprintk(const char *fmt, va_list args); -asmlinkage __printf(5, 6) __cold -int printk_emit(int facility, int level, - const char *dict, size_t dictlen, - const char *fmt, ...); - asmlinkage __printf(1, 2) __cold int printk(const char *fmt, ...); @@ -269,7 +264,7 @@ static inline void show_regs_print_info(const char *log_lvl) { } -static inline asmlinkage void dump_stack(void) +static inline void dump_stack(void) { } diff --git a/include/linux/property.h b/include/linux/property.h index ac8a1ebc4c1b..3789ec755fb6 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -311,4 +311,16 @@ fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port, int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, struct fwnode_endpoint *endpoint); +/* -------------------------------------------------------------------------- */ +/* Software fwnode support - when HW description is incomplete or missing */ + +bool is_software_node(const struct fwnode_handle *fwnode); + +int software_node_notify(struct device *dev, unsigned long action); + +struct fwnode_handle * +fwnode_create_software_node(const struct property_entry *properties, + const struct fwnode_handle *parent); +void fwnode_remove_software_node(struct fwnode_handle *fwnode); + #endif /* _LINUX_PROPERTY_H_ */ diff --git a/include/linux/psi.h b/include/linux/psi.h index 8e0725aac0aa..7006008d5b72 100644 --- a/include/linux/psi.h +++ b/include/linux/psi.h @@ -1,6 +1,7 @@ #ifndef _LINUX_PSI_H #define _LINUX_PSI_H +#include <linux/jump_label.h> #include <linux/psi_types.h> #include <linux/sched.h> @@ -9,7 +10,7 @@ struct css_set; #ifdef CONFIG_PSI -extern bool psi_disabled; +extern struct static_key_false psi_disabled; void psi_init(void); diff --git a/include/linux/pstore.h b/include/linux/pstore.h index a15bc4d48752..b146181e8709 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -26,27 +26,38 @@ #include <linux/errno.h> #include <linux/kmsg_dump.h> #include <linux/mutex.h> -#include <linux/spinlock.h> +#include <linux/semaphore.h> #include <linux/time.h> #include <linux/types.h> struct module; -/* pstore record types (see fs/pstore/inode.c for filename templates) */ +/* + * pstore record types (see fs/pstore/platform.c for pstore_type_names[]) + * These values may be written to storage (see EFI vars backend), so + * they are kind of an ABI. Be careful changing the mappings. + */ enum pstore_type_id { + /* Frontend storage types */ PSTORE_TYPE_DMESG = 0, PSTORE_TYPE_MCE = 1, PSTORE_TYPE_CONSOLE = 2, PSTORE_TYPE_FTRACE = 3, - /* PPC64 partition types */ + + /* PPC64-specific partition types */ PSTORE_TYPE_PPC_RTAS = 4, PSTORE_TYPE_PPC_OF = 5, PSTORE_TYPE_PPC_COMMON = 6, PSTORE_TYPE_PMSG = 7, PSTORE_TYPE_PPC_OPAL = 8, - PSTORE_TYPE_UNKNOWN = 255 + + /* End of the list */ + PSTORE_TYPE_MAX }; +const char *pstore_type_to_name(enum pstore_type_id type); +enum pstore_type_id pstore_name_to_type(const char *name); + struct pstore_info; /** * struct pstore_record - details of a pstore record entry @@ -85,12 +96,15 @@ struct pstore_record { /** * struct pstore_info - backend pstore driver structure * - * @owner: module which is repsonsible for this backend driver + * @owner: module which is responsible for this backend driver * @name: name of the backend driver * - * @buf_lock: spinlock to serialize access to @buf + * @buf_lock: semaphore to serialize access to @buf * @buf: preallocated crash dump buffer - * @bufsize: size of @buf available for crash dump writes + * @bufsize: size of @buf available for crash dump bytes (must match + * smallest number of bytes available for writing to a + * backend entry, since compressed bytes don't take kindly + * to being truncated) * * @read_mutex: serializes @open, @read, @close, and @erase callbacks * @flags: bitfield of frontends the backend can accept writes for @@ -170,7 +184,7 @@ struct pstore_info { struct module *owner; char *name; - spinlock_t buf_lock; + struct semaphore buf_lock; char *buf; size_t bufsize; @@ -189,14 +203,13 @@ struct pstore_info { }; /* Supported frontends */ -#define PSTORE_FLAGS_DMESG (1 << 0) -#define PSTORE_FLAGS_CONSOLE (1 << 1) -#define PSTORE_FLAGS_FTRACE (1 << 2) -#define PSTORE_FLAGS_PMSG (1 << 3) +#define PSTORE_FLAGS_DMESG BIT(0) +#define PSTORE_FLAGS_CONSOLE BIT(1) +#define PSTORE_FLAGS_FTRACE BIT(2) +#define PSTORE_FLAGS_PMSG BIT(3) extern int pstore_register(struct pstore_info *); extern void pstore_unregister(struct pstore_info *); -extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); struct pstore_ftrace_record { unsigned long ip; diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 602d64725222..337971c41980 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -22,6 +22,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> +#include <linux/pstore.h> #include <linux/types.h> /* @@ -30,6 +31,11 @@ * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required. */ #define PRZ_FLAG_NO_LOCK BIT(0) +/* + * If a PRZ should only have a single-boot lifetime, this marks it as + * getting wiped after its contents get copied out after boot. + */ +#define PRZ_FLAG_ZAP_OLD BIT(1) struct persistent_ram_buffer; struct rs_control; @@ -42,17 +48,55 @@ struct persistent_ram_ecc_info { uint16_t *par; }; +/** + * struct persistent_ram_zone - Details of a persistent RAM zone (PRZ) + * used as a pstore backend + * + * @paddr: physical address of the mapped RAM area + * @size: size of mapping + * @label: unique name of this PRZ + * @type: frontend type for this PRZ + * @flags: holds PRZ_FLAGS_* bits + * + * @buffer_lock: + * locks access to @buffer "size" bytes and "start" offset + * @buffer: + * pointer to actual RAM area managed by this PRZ + * @buffer_size: + * bytes in @buffer->data (not including any trailing ECC bytes) + * + * @par_buffer: + * pointer into @buffer->data containing ECC bytes for @buffer->data + * @par_header: + * pointer into @buffer->data containing ECC bytes for @buffer header + * (i.e. all fields up to @data) + * @rs_decoder: + * RSLIB instance for doing ECC calculations + * @corrected_bytes: + * ECC corrected bytes accounting since boot + * @bad_blocks: + * ECC uncorrectable bytes accounting since boot + * @ecc_info: + * ECC configuration details + * + * @old_log: + * saved copy of @buffer->data prior to most recent wipe + * @old_log_size: + * bytes contained in @old_log + * + */ struct persistent_ram_zone { phys_addr_t paddr; size_t size; void *vaddr; char *label; - struct persistent_ram_buffer *buffer; - size_t buffer_size; + enum pstore_type_id type; u32 flags; + raw_spinlock_t buffer_lock; + struct persistent_ram_buffer *buffer; + size_t buffer_size; - /* ECC correction */ char *par_buffer; char *par_header; struct rs_control *rs_decoder; diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 51349d124ee5..7121bbe76979 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -39,6 +39,15 @@ struct ptp_clock_request { }; struct system_device_crosststamp; + +/** + * struct ptp_system_timestamp - system time corresponding to a PHC timestamp + */ +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + /** * struct ptp_clock_info - decribes a PTP hardware clock * @@ -73,8 +82,18 @@ struct system_device_crosststamp; * parameter delta: Desired change in nanoseconds. * * @gettime64: Reads the current time from the hardware clock. + * This method is deprecated. New drivers should implement + * the @gettimex64 method instead. * parameter ts: Holds the result. * + * @gettimex64: Reads the current time from the hardware clock and optionally + * also the system clock. + * parameter ts: Holds the PHC timestamp. + * parameter sts: If not NULL, it holds a pair of timestamps from + * the system clock. The first reading is made right before + * reading the lowest bits of the PHC timestamp and the second + * reading immediately follows that. + * * @getcrosststamp: Reads the current time from the hardware clock and * system clock simultaneously. * parameter cts: Contains timestamp (device,system) pair, @@ -124,6 +143,8 @@ struct ptp_clock_info { int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); + int (*gettimex64)(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts); int (*getcrosststamp)(struct ptp_clock_info *ptp, struct system_device_crosststamp *cts); int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts); @@ -247,4 +268,16 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp, #endif +static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts) +{ + if (sts) + ktime_get_real_ts64(&sts->pre_ts); +} + +static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts) +{ + if (sts) + ktime_get_real_ts64(&sts->post_ts); +} + #endif diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 6894976b54e3..186cd8e970c7 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -573,6 +573,8 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, else if (destroy) destroy(ptr); + if (producer >= size) + producer = 0; __ptr_ring_set_size(r, size); r->producer = producer; r->consumer_head = 0; diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 6c2ffed907f5..edb9b040c94c 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -64,15 +64,12 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); #define PTRACE_MODE_NOAUDIT 0x04 #define PTRACE_MODE_FSCREDS 0x08 #define PTRACE_MODE_REALCREDS 0x10 -#define PTRACE_MODE_SCHED 0x20 -#define PTRACE_MODE_IBPB 0x40 /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS) #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS) #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS) -#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB) /** * ptrace_may_access - check whether the caller is permitted to access @@ -90,20 +87,6 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); */ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); -/** - * ptrace_may_access - check whether the caller is permitted to access - * a target task. - * @task: target task - * @mode: selects type of access and caller credentials - * - * Returns true on success, false on denial. - * - * Similar to ptrace_may_access(). Only to be called from context switch - * code. Does not call into audit and the regular LSM hooks due to locking - * constraints. - */ -extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode); - static inline int ptrace_reparented(struct task_struct *child) { return !same_thread_group(child->real_parent, child->parent); @@ -428,4 +411,5 @@ extern int task_current_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc); +extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); #endif diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 56518adc31dd..d5199b507d79 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -349,42 +349,6 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns, } /** - * pwm_set_polarity() - configure the polarity of a PWM signal - * @pwm: PWM device - * @polarity: new polarity of the PWM signal - * - * Note that the polarity cannot be configured while the PWM device is - * enabled. - * - * Returns: 0 on success or a negative error code on failure. - */ -static inline int pwm_set_polarity(struct pwm_device *pwm, - enum pwm_polarity polarity) -{ - struct pwm_state state; - - if (!pwm) - return -EINVAL; - - pwm_get_state(pwm, &state); - if (state.polarity == polarity) - return 0; - - /* - * Changing the polarity of a running PWM without adjusting the - * dutycycle/period value is a bit risky (can introduce glitches). - * Return -EBUSY in this case. - * Note that this is allowed when using pwm_apply_state() because - * the user specifies all the parameters. - */ - if (state.enabled) - return -EBUSY; - - state.polarity = polarity; - return pwm_apply_state(pwm, &state); -} - -/** * pwm_enable() - start a PWM output toggling * @pwm: PWM device * @@ -483,12 +447,6 @@ static inline int pwm_capture(struct pwm_device *pwm, return -EINVAL; } -static inline int pwm_set_polarity(struct pwm_device *pwm, - enum pwm_polarity polarity) -{ - return -ENOTSUPP; -} - static inline int pwm_enable(struct pwm_device *pwm) { return -EINVAL; diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 06996ad4f2bc..1637385bcc17 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -67,6 +67,9 @@ extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val); extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val); #else + +#include <linux/errno.h> + static inline int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index a47321a0d572..91c536a01b56 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -47,6 +47,7 @@ #include <linux/slab.h> #include <linux/qed/common_hsi.h> #include <linux/qed/qed_chain.h> +#include <linux/io-64-nonatomic-lo-hi.h> enum dcbx_protocol_type { DCBX_PROTOCOL_ISCSI, @@ -448,11 +449,24 @@ struct qed_mfw_tlv_iscsi { bool tx_bytes_set; }; +enum qed_db_rec_width { + DB_REC_WIDTH_32B, + DB_REC_WIDTH_64B, +}; + +enum qed_db_rec_space { + DB_REC_KERNEL, + DB_REC_USER, +}; + #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ (void __iomem *)(reg_addr)) #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) +#define DIRECT_REG_WR64(reg_addr, val) writeq((u32)val, \ + (void __iomem *)(reg_addr)) + #define QED_COALESCE_MAX 0x1FF #define QED_DEFAULT_RX_USECS 12 #define QED_DEFAULT_TX_USECS 48 @@ -1015,6 +1029,33 @@ struct qed_common_ops { */ int (*set_led)(struct qed_dev *cdev, enum qed_led_mode mode); +/** + * @brief db_recovery_add - add doorbell information to the doorbell + * recovery mechanism. + * + * @param cdev + * @param db_addr - doorbell address + * @param db_data - address of where db_data is stored + * @param db_is_32b - doorbell is 32b pr 64b + * @param db_is_user - doorbell recovery addresses are user or kernel space + */ + int (*db_recovery_add)(struct qed_dev *cdev, + void __iomem *db_addr, + void *db_data, + enum qed_db_rec_width db_width, + enum qed_db_rec_space db_space); + +/** + * @brief db_recovery_del - remove doorbell information from the doorbell + * recovery mechanism. db_data serves as key (db_addr is not unique). + * + * @param cdev + * @param db_addr - doorbell address + * @param db_data - address where db_data is stored. Serves as key for the + * entry to delete. + */ + int (*db_recovery_del)(struct qed_dev *cdev, + void __iomem *db_addr, void *db_data); /** * @brief update_drv_state - API to inform the change in the driver state. diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index ea8505204fdf..605cf46c17bd 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -35,6 +35,7 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; #include <limits.h> #include <stddef.h> #include <sys/mman.h> +#include <sys/time.h> #include <sys/types.h> /* Not standard, but glibc defines it */ @@ -52,7 +53,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; #define __init #define __exit -#define __attribute_const__ __attribute__((const)) +#ifndef __attribute_const__ +# define __attribute_const__ __attribute__((const)) +#endif #define noinline __attribute__((noinline)) #define preempt_enable() @@ -67,6 +70,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE]; #define MODULE_DESCRIPTION(desc) #define subsys_initcall(x) #define module_exit(x) + +#define IS_ENABLED(x) (x) +#define CONFIG_RAID6_PQ_BENCHMARK 1 #endif /* __KERNEL__ */ /* Routine choices */ diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h index 8a16c3eb3dd0..c0578ba23c1a 100644 --- a/include/linux/rcupdate_wait.h +++ b/include/linux/rcupdate_wait.h @@ -31,21 +31,4 @@ do { \ #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) -/** - * synchronize_rcu_mult - Wait concurrently for multiple grace periods - * @...: List of call_rcu() functions for different grace periods to wait on - * - * This macro waits concurrently for multiple types of RCU grace periods. - * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait - * on concurrent RCU and RCU-tasks grace periods. Waiting on a give SRCU - * domain requires you to write a wrapper function for that SRCU domain's - * call_srcu() function, supplying the corresponding srcu_struct. - * - * If Tiny RCU, tell _wait_rcu_gp() does not bother waiting for RCU, - * given that anywhere synchronize_rcu_mult() can be called is automatically - * a grace period. - */ -#define synchronize_rcu_mult(...) \ - _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) - #endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ diff --git a/include/linux/regmap.h b/include/linux/regmap.h index a367d59c301d..1781b6cb793c 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1089,27 +1089,48 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id, int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, unsigned int mask, unsigned int val, bool *change, bool async, bool force); +/** + * struct regmap_irq_type - IRQ type definitions. + * + * @type_reg_offset: Offset register for the irq type setting. + * @type_rising_val: Register value to configure RISING type irq. + * @type_falling_val: Register value to configure FALLING type irq. + * @type_level_low_val: Register value to configure LEVEL_LOW type irq. + * @type_level_high_val: Register value to configure LEVEL_HIGH type irq. + * @types_supported: logical OR of IRQ_TYPE_* flags indicating supported types. + */ +struct regmap_irq_type { + unsigned int type_reg_offset; + unsigned int type_reg_mask; + unsigned int type_rising_val; + unsigned int type_falling_val; + unsigned int type_level_low_val; + unsigned int type_level_high_val; + unsigned int types_supported; +}; /** * struct regmap_irq - Description of an IRQ for the generic regmap irq_chip. * * @reg_offset: Offset of the status/mask register within the bank * @mask: Mask used to flag/control the register. - * @type_reg_offset: Offset register for the irq type setting. - * @type_rising_mask: Mask bit to configure RISING type irq. - * @type_falling_mask: Mask bit to configure FALLING type irq. + * @type: IRQ trigger type setting details if supported. */ struct regmap_irq { unsigned int reg_offset; unsigned int mask; - unsigned int type_reg_offset; - unsigned int type_rising_mask; - unsigned int type_falling_mask; + struct regmap_irq_type type; }; #define REGMAP_IRQ_REG(_irq, _off, _mask) \ [_irq] = { .reg_offset = (_off), .mask = (_mask) } +#define REGMAP_IRQ_REG_LINE(_id, _reg_bits) \ + [_id] = { \ + .mask = BIT((_id) % (_reg_bits)), \ + .reg_offset = (_id) / (_reg_bits), \ + } + /** * struct regmap_irq_chip - Description of a generic regmap irq_chip. * @@ -1131,6 +1152,12 @@ struct regmap_irq { * @ack_invert: Inverted ack register: cleared bits for ack. * @wake_invert: Inverted wake register: cleared bits are wake enabled. * @type_invert: Invert the type flags. + * @type_in_mask: Use the mask registers for controlling irq type. For + * interrupts defining type_rising/falling_mask use mask_base + * for edge configuration and never update bits in type_base. + * @clear_on_unmask: For chips with interrupts cleared on read: read the status + * registers before unmasking interrupts to clear any bits + * set when they were masked. * @runtime_pm: Hold a runtime PM lock on the device when accessing it. * * @num_regs: Number of registers in each control bank. @@ -1169,6 +1196,8 @@ struct regmap_irq_chip { bool wake_invert:1; bool runtime_pm:1; bool type_invert:1; + bool type_in_mask:1; + bool clear_on_unmask:1; int num_regs; diff --git a/include/linux/regset.h b/include/linux/regset.h index 494cedaafdf2..a85c1707285c 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -376,7 +376,7 @@ static inline int copy_regset_to_user(struct task_struct *target, if (!regset->get) return -EOPNOTSUPP; - if (!access_ok(VERIFY_WRITE, data, size)) + if (!access_ok(data, size)) return -EFAULT; return regset->get(target, regset, offset, size, NULL, data); @@ -402,7 +402,7 @@ static inline int copy_regset_from_user(struct task_struct *target, if (!regset->set) return -EOPNOTSUPP; - if (!access_ok(VERIFY_READ, data, size)) + if (!access_ok(data, size)) return -EFAULT; return regset->set(target, regset, offset, size, NULL, data); diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 25602afd4844..f3f76051e8b0 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -508,7 +508,7 @@ static inline int regulator_get_error_flags(struct regulator *regulator, static inline int regulator_set_load(struct regulator *regulator, int load_uA) { - return REGULATOR_MODE_NORMAL; + return 0; } static inline int regulator_allow_bypass(struct regulator *regulator, diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index a9c030192147..389bcaf7900f 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -15,11 +15,12 @@ #ifndef __LINUX_REGULATOR_DRIVER_H_ #define __LINUX_REGULATOR_DRIVER_H_ -#define MAX_COUPLED 4 +#define MAX_COUPLED 2 #include <linux/device.h> #include <linux/notifier.h> #include <linux/regulator/consumer.h> +#include <linux/ww_mutex.h> struct gpio_desc; struct regmap; @@ -462,7 +463,7 @@ struct regulator_dev { struct coupling_desc coupling_desc; struct blocking_notifier_head notifier; - struct mutex mutex; /* consumer lock */ + struct ww_mutex mutex; /* consumer lock */ struct task_struct *mutex_owner; int ref_cnt; struct module *owner; @@ -473,7 +474,6 @@ struct regulator_dev { struct regmap *regmap; struct delayed_work disable_work; - int deferred_disables; void *reg_data; /* regulator_dev data */ @@ -545,4 +545,7 @@ int regulator_set_active_discharge_regmap(struct regulator_dev *rdev, bool enable); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); +void regulator_lock(struct regulator_dev *rdev); +void regulator_unlock(struct regulator_dev *rdev); + #endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index a459a5e973a7..1d34a70ffda2 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -158,6 +158,9 @@ struct regulation_constraints { /* used for coupled regulators */ int max_spread; + /* used for changing voltage in steps */ + int max_uV_step; + /* valid regulator operating modes for this machine */ unsigned int valid_modes_mask; diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h index cb5aecd40f07..331d7d940c7a 100644 --- a/include/linux/regulator/pfuze100.h +++ b/include/linux/regulator/pfuze100.h @@ -33,7 +33,8 @@ #define PFUZE100_VGEN4 12 #define PFUZE100_VGEN5 13 #define PFUZE100_VGEN6 14 -#define PFUZE100_MAX_REGULATOR 15 +#define PFUZE100_COIN 15 +#define PFUZE100_MAX_REGULATOR 16 #define PFUZE200_SW1AB 0 #define PFUZE200_SW2 1 diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 02166e815afb..2f0ffca35780 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -68,7 +68,6 @@ struct reservation_object_list { * @seq: sequence count for managing RCU read-side synchronization * @fence_excl: the exclusive fence, if there is one currently * @fence: list of current shared fences - * @staged: staged copy of shared fences for RCU updates */ struct reservation_object { struct ww_mutex lock; @@ -76,7 +75,6 @@ struct reservation_object { struct dma_fence __rcu *fence_excl; struct reservation_object_list __rcu *fence; - struct reservation_object_list *staged; }; #define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) @@ -95,7 +93,6 @@ reservation_object_init(struct reservation_object *obj) __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); RCU_INIT_POINTER(obj->fence, NULL); RCU_INIT_POINTER(obj->fence_excl, NULL); - obj->staged = NULL; } /** @@ -124,7 +121,6 @@ reservation_object_fini(struct reservation_object *obj) kfree(fobj); } - kfree(obj->staged); ww_mutex_destroy(&obj->lock); } @@ -218,6 +214,11 @@ reservation_object_trylock(struct reservation_object *obj) static inline void reservation_object_unlock(struct reservation_object *obj) { +#ifdef CONFIG_DEBUG_MUTEXES + /* Test shared fence slot reservation */ + if (obj->fence) + obj->fence->shared_max = obj->fence->shared_count; +#endif ww_mutex_unlock(&obj->lock); } @@ -265,7 +266,8 @@ reservation_object_get_excl_rcu(struct reservation_object *obj) return fence; } -int reservation_object_reserve_shared(struct reservation_object *obj); +int reservation_object_reserve_shared(struct reservation_object *obj, + unsigned int num_fences); void reservation_object_add_shared_fence(struct reservation_object *obj, struct dma_fence *fence); diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index eb7111039247..20f9c6af7473 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -75,8 +75,19 @@ struct bucket_table { struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; +/* + * NULLS_MARKER() expects a hash value with the low + * bits mostly likely to be significant, and it discards + * the msb. + * We git it an address, in which the bottom 2 bits are + * always 0, and the msb might be significant. + * So we shift the address down one bit to align with + * expectations and avoid losing a significant bit. + */ +#define RHT_NULLS_MARKER(ptr) \ + ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1)) #define INIT_RHT_NULLS_HEAD(ptr) \ - ((ptr) = (typeof(ptr)) NULLS_MARKER(0)) + ((ptr) = RHT_NULLS_MARKER(&(ptr))) static inline bool rht_is_a_nulls(const struct rhash_head *ptr) { @@ -471,6 +482,7 @@ static inline struct rhash_head *__rhashtable_lookup( .ht = ht, .key = key, }; + struct rhash_head __rcu * const *head; struct bucket_table *tbl; struct rhash_head *he; unsigned int hash; @@ -478,13 +490,19 @@ static inline struct rhash_head *__rhashtable_lookup( tbl = rht_dereference_rcu(ht->tbl, ht); restart: hash = rht_key_hashfn(ht, tbl, key, params); - rht_for_each_rcu(he, tbl, hash) { - if (params.obj_cmpfn ? - params.obj_cmpfn(&arg, rht_obj(ht, he)) : - rhashtable_compare(&arg, rht_obj(ht, he))) - continue; - return he; - } + head = rht_bucket(tbl, hash); + do { + rht_for_each_rcu_continue(he, *head, tbl, hash) { + if (params.obj_cmpfn ? + params.obj_cmpfn(&arg, rht_obj(ht, he)) : + rhashtable_compare(&arg, rht_obj(ht, he))) + continue; + return he; + } + /* An object might have been moved to a different hash chain, + * while we walk along it - better check and retry. + */ + } while (he != RHT_NULLS_MARKER(head)); /* Ensure we see any new tables. */ smp_rmb(); diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 0940fda59872..5b9ae62272bb 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k __ring_buffer_alloc((size), (flags), &__key); \ }) -int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full); +int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full); __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, struct file *filp, poll_table *poll_table); @@ -189,6 +189,8 @@ bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer); size_t ring_buffer_page_len(void *page); +size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu); +size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu); void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu); void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data); diff --git a/include/linux/rtc.h b/include/linux/rtc.h index c8bb4a2b48c3..c1089fe5344a 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -87,15 +87,16 @@ struct rtc_class_ops { int (*set_offset)(struct device *, long offset); }; +struct rtc_device; + struct rtc_timer { struct timerqueue_node node; ktime_t period; - void (*func)(void *private_data); - void *private_data; + void (*func)(struct rtc_device *rtc); + struct rtc_device *rtc; int enabled; }; - /* flags */ #define RTC_DEV_BUSY 0 @@ -138,7 +139,6 @@ struct rtc_device { bool registered; - struct nvmem_device *nvmem; /* Old ABI support */ bool nvram_old_abi; struct bin_attribute *nvram; @@ -173,8 +173,6 @@ extern struct rtc_device *devm_rtc_device_register(struct device *dev, struct module *owner); struct rtc_device *devm_rtc_allocate_device(struct device *dev); int __rtc_register_device(struct module *owner, struct rtc_device *rtc); -extern void devm_rtc_device_unregister(struct device *dev, - struct rtc_device *rtc); extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm); extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm); @@ -200,11 +198,12 @@ extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled); void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode); -void rtc_aie_update_irq(void *private); -void rtc_uie_update_irq(void *private); +void rtc_aie_update_irq(struct rtc_device *rtc); +void rtc_uie_update_irq(struct rtc_device *rtc); enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); -void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data); +void rtc_timer_init(struct rtc_timer *timer, void (*f)(struct rtc_device *r), + struct rtc_device *rtc); int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, ktime_t expires, ktime_t period); void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer); diff --git a/include/linux/sa11x0-dma.h b/include/linux/sa11x0-dma.h deleted file mode 100644 index 65839a58b8e5..000000000000 --- a/include/linux/sa11x0-dma.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * SA11x0 DMA Engine support - * - * Copyright (C) 2012 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __LINUX_SA11X0_DMA_H -#define __LINUX_SA11X0_DMA_H - -struct dma_chan; - -#if defined(CONFIG_DMA_SA11X0) || defined(CONFIG_DMA_SA11X0_MODULE) -bool sa11x0_dma_filter_fn(struct dma_chan *, void *); -#else -static inline bool sa11x0_dma_filter_fn(struct dma_chan *c, void *d) -{ - return false; -} -#endif - -#endif diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 804a50983ec5..14d558146aea 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -30,14 +30,24 @@ struct seq_file; */ struct sbitmap_word { /** - * @word: The bitmap word itself. + * @depth: Number of bits being used in @word/@cleared */ - unsigned long word; + unsigned long depth; /** - * @depth: Number of bits being used in @word. + * @word: word holding free bits */ - unsigned long depth; + unsigned long word ____cacheline_aligned_in_smp; + + /** + * @cleared: word holding cleared bits + */ + unsigned long cleared ____cacheline_aligned_in_smp; + + /** + * @swap_lock: Held while swapping word <-> cleared + */ + spinlock_t swap_lock; } ____cacheline_aligned_in_smp; /** @@ -125,6 +135,11 @@ struct sbitmap_queue { */ struct sbq_wait_state *ws; + /* + * @ws_active: count of currently active ws waitqueues + */ + atomic_t ws_active; + /** * @round_robin: Allocate bits in strict round-robin order. */ @@ -250,12 +265,14 @@ static inline void __sbitmap_for_each_set(struct sbitmap *sb, nr = SB_NR_TO_BIT(sb, start); while (scanned < sb->depth) { - struct sbitmap_word *word = &sb->map[index]; - unsigned int depth = min_t(unsigned int, word->depth - nr, + unsigned long word; + unsigned int depth = min_t(unsigned int, + sb->map[index].depth - nr, sb->depth - scanned); scanned += depth; - if (!word->word) + word = sb->map[index].word & ~sb->map[index].cleared; + if (!word) goto next; /* @@ -265,7 +282,7 @@ static inline void __sbitmap_for_each_set(struct sbitmap *sb, */ depth += nr; while (1) { - nr = find_next_bit(&word->word, depth, nr); + nr = find_next_bit(&word, depth, nr); if (nr >= depth) break; if (!fn(sb, (index << sb->shift) + nr, data)) @@ -310,6 +327,19 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr) clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } +/* + * This one is special, since it doesn't actually clear the bit, rather it + * sets the corresponding bit in the ->cleared mask instead. Paired with + * the caller doing sbitmap_batch_clear() if a given index is full, which + * will clear the previously freed entries in the corresponding ->word. + */ +static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr) +{ + unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared; + + set_bit(SB_NR_TO_BIT(sb, bitnr), addr); +} + static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb, unsigned int bitnr) { @@ -321,8 +351,6 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); } -unsigned int sbitmap_weight(const struct sbitmap *sb); - /** * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file. * @sb: Bitmap to show. @@ -531,4 +559,45 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq); */ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m); +struct sbq_wait { + struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */ + struct wait_queue_entry wait; +}; + +#define DEFINE_SBQ_WAIT(name) \ + struct sbq_wait name = { \ + .sbq = NULL, \ + .wait = { \ + .private = current, \ + .func = autoremove_wake_function, \ + .entry = LIST_HEAD_INIT((name).wait.entry), \ + } \ + } + +/* + * Wrapper around prepare_to_wait_exclusive(), which maintains some extra + * internal state. + */ +void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, + struct sbq_wait_state *ws, + struct sbq_wait *sbq_wait, int state); + +/* + * Must be paired with sbitmap_prepare_to_wait(). + */ +void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, + struct sbq_wait *sbq_wait); + +/* + * Wrapper around add_wait_queue(), which maintains some extra internal state + */ +void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, + struct sbq_wait_state *ws, + struct sbq_wait *sbq_wait); + +/* + * Must be paired with sbitmap_add_wait_queue() + */ +void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait); + #endif /* __LINUX_SCALE_BITMAP_H */ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 093aa57120b0..b96f0d0b5b8f 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -324,10 +324,10 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. */ -#ifdef CONFIG_ARCH_HAS_SG_CHAIN -#define SG_MAX_SEGMENTS 2048 -#else +#ifdef CONFIG_ARCH_NO_SG_CHAIN #define SG_MAX_SEGMENTS SG_CHUNK_SIZE +#else +#define SG_MAX_SEGMENTS 2048 #endif #ifdef CONFIG_SG_POOL diff --git a/include/linux/sched.h b/include/linux/sched.h index a51c13c2b1a0..89541d248893 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -176,7 +176,7 @@ struct task_group; * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). * * However, with slightly different timing the wakeup TASK_RUNNING store can - * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not + * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not * a problem either because that will result in one extra go around the loop * and our @cond test will save the day. * @@ -515,7 +515,7 @@ struct sched_dl_entity { /* * Actual scheduling parameters. Initialized with the values above, - * they are continously updated during task execution. Note that + * they are continuously updated during task execution. Note that * the remaining runtime could be < 0 in case we are in overrun. */ s64 runtime; /* Remaining runtime for this instance */ @@ -572,8 +572,10 @@ union rcu_special { struct { u8 blocked; u8 need_qs; + u8 exp_hint; /* Hint for performance. */ + u8 pad; /* No garbage from compiler! */ } b; /* Bits. */ - u16 s; /* Set of bits. */ + u32 s; /* Set of bits. */ }; enum perf_event_task_context { @@ -993,7 +995,7 @@ struct task_struct { /* cg_list protected by css_set_lock and tsk->alloc_lock: */ struct list_head cg_list; #endif -#ifdef CONFIG_INTEL_RDT +#ifdef CONFIG_RESCTRL u32 closid; u32 rmid; #endif @@ -1116,6 +1118,7 @@ struct task_struct { #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored address in ret_stack: */ int curr_ret_stack; + int curr_ret_depth; /* Stack of return addresses for return function tracing: */ struct ftrace_ret_stack *ret_stack; @@ -1453,6 +1456,8 @@ static inline bool is_percpu_thread(void) #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ +#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ +#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ #define TASK_PFA_TEST(name, func) \ static inline bool task_##func(struct task_struct *p) \ @@ -1484,6 +1489,13 @@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) +TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable) +TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable) +TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable) + +TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) +TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) + static inline void current_restore_flags(unsigned long orig_flags, unsigned long flags) { diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h index 59667444669f..afa940cd50dc 100644 --- a/include/linux/sched/cpufreq.h +++ b/include/linux/sched/cpufreq.h @@ -20,6 +20,12 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, void (*func)(struct update_util_data *data, u64 time, unsigned int flags)); void cpufreq_remove_update_util_hook(int cpu); + +static inline unsigned long map_util_freq(unsigned long util, + unsigned long freq, unsigned long cap) +{ + return (freq + (freq >> 2)) * util / cap; +} #endif /* CONFIG_CPU_FREQ */ #endif /* _LINUX_SCHED_CPUFREQ_H */ diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index 4a6582c27dea..b0fb1446fe04 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -16,7 +16,7 @@ enum hk_flags { }; #ifdef CONFIG_CPU_ISOLATION -DECLARE_STATIC_KEY_FALSE(housekeeping_overriden); +DECLARE_STATIC_KEY_FALSE(housekeeping_overridden); extern int housekeeping_any_cpu(enum hk_flags flags); extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags); extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags); @@ -43,7 +43,7 @@ static inline void housekeeping_init(void) { } static inline bool housekeeping_cpu(int cpu, enum hk_flags flags) { #ifdef CONFIG_CPU_ISOLATION - if (static_branch_unlikely(&housekeeping_overriden)) + if (static_branch_unlikely(&housekeeping_overridden)) return housekeeping_test_cpu(cpu, flags); #endif return true; diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index aebb370a0006..3bfa6a0cbba4 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -153,7 +153,7 @@ static inline gfp_t current_gfp_context(gfp_t flags) { /* * NOIO implies both NOIO and NOFS and it is a weaker context - * so always make sure it makes precendence + * so always make sure it makes precedence */ if (unlikely(current->flags & PF_MEMALLOC_NOIO)) flags &= ~(__GFP_IO | __GFP_FS); diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h new file mode 100644 index 000000000000..59d3736c454c --- /dev/null +++ b/include/linux/sched/smt.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_SMT_H +#define _LINUX_SCHED_SMT_H + +#include <linux/static_key.h> + +#ifdef CONFIG_SCHED_SMT +extern struct static_key_false sched_smt_present; + +static __always_inline bool sched_smt_active(void) +{ + return static_branch_likely(&sched_smt_present); +} +#else +static inline bool sched_smt_active(void) { return false; } +#endif + +void arch_smt_update(void); + +#endif diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h index f30954cc059d..568286411b43 100644 --- a/include/linux/sched/stat.h +++ b/include/linux/sched/stat.h @@ -8,7 +8,7 @@ * Various counters maintained by the scheduler and fork(), * exposed via /proc, sys.c or used by drivers via these APIs. * - * ( Note that all these values are aquired without locking, + * ( Note that all these values are acquired without locking, * so they can only be relied on in narrow circumstances. ) */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 108ede99e533..44c6f15800ff 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -39,6 +39,8 @@ void __noreturn do_task_dead(void); extern void proc_caches_init(void); +extern void fork_init(void); + extern void release_task(struct task_struct * p); #ifdef CONFIG_HAVE_COPY_THREAD_TLS diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 6b9976180c1e..c31d3a47a47c 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -89,7 +89,6 @@ struct sched_domain { unsigned int newidle_idx; unsigned int wake_idx; unsigned int forkexec_idx; - unsigned int smt_gain; int nohz_idle; /* NOHZ IDLE status */ int flags; /* See SD_* */ @@ -202,6 +201,14 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl); # define SD_INIT_NAME(type) #endif +#ifndef arch_scale_cpu_capacity +static __always_inline +unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) +{ + return SCHED_CAPACITY_SCALE; +} +#endif + #else /* CONFIG_SMP */ struct sched_domain_attr; @@ -217,6 +224,14 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) return true; } +#ifndef arch_scale_cpu_capacity +static __always_inline +unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu) +{ + return SCHED_CAPACITY_SCALE; +} +#endif + #endif /* !CONFIG_SMP */ static inline int task_node(const struct task_struct *p) diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index e5320f6c8654..84868d37b35d 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -4,9 +4,10 @@ #include <uapi/linux/seccomp.h> -#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ - SECCOMP_FILTER_FLAG_LOG | \ - SECCOMP_FILTER_FLAG_SPEC_ALLOW) +#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ + SECCOMP_FILTER_FLAG_LOG | \ + SECCOMP_FILTER_FLAG_SPEC_ALLOW | \ + SECCOMP_FILTER_FLAG_NEW_LISTENER) #ifdef CONFIG_SECCOMP @@ -43,7 +44,7 @@ extern void secure_computing_strict(int this_syscall); #endif extern long prctl_get_seccomp(void); -extern long prctl_set_seccomp(unsigned long, char __user *); +extern long prctl_set_seccomp(unsigned long, void __user *); static inline int seccomp_mode(struct seccomp *s) { diff --git a/include/linux/security.h b/include/linux/security.h index d170a5b031f3..dbfb5a66babb 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -182,36 +182,10 @@ static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id) #ifdef CONFIG_SECURITY -struct security_mnt_opts { - char **mnt_opts; - int *mnt_opts_flags; - int num_mnt_opts; -}; - int call_lsm_notifier(enum lsm_event event, void *data); int register_lsm_notifier(struct notifier_block *nb); int unregister_lsm_notifier(struct notifier_block *nb); -static inline void security_init_mnt_opts(struct security_mnt_opts *opts) -{ - opts->mnt_opts = NULL; - opts->mnt_opts_flags = NULL; - opts->num_mnt_opts = 0; -} - -static inline void security_free_mnt_opts(struct security_mnt_opts *opts) -{ - int i; - if (opts->mnt_opts) - for (i = 0; i < opts->num_mnt_opts; i++) - kfree(opts->mnt_opts[i]); - kfree(opts->mnt_opts); - opts->mnt_opts = NULL; - kfree(opts->mnt_opts_flags); - opts->mnt_opts_flags = NULL; - opts->num_mnt_opts = 0; -} - /* prototypes */ extern int security_init(void); @@ -248,9 +222,10 @@ void security_bprm_committing_creds(struct linux_binprm *bprm); void security_bprm_committed_creds(struct linux_binprm *bprm); int security_sb_alloc(struct super_block *sb); void security_sb_free(struct super_block *sb); -int security_sb_copy_data(char *orig, char *copy); -int security_sb_remount(struct super_block *sb, void *data); -int security_sb_kern_mount(struct super_block *sb, int flags, void *data); +void security_free_mnt_opts(void **mnt_opts); +int security_sb_eat_lsm_opts(char *options, void **mnt_opts); +int security_sb_remount(struct super_block *sb, void *mnt_opts); +int security_sb_kern_mount(struct super_block *sb); int security_sb_show_options(struct seq_file *m, struct super_block *sb); int security_sb_statfs(struct dentry *dentry); int security_sb_mount(const char *dev_name, const struct path *path, @@ -258,14 +233,15 @@ int security_sb_mount(const char *dev_name, const struct path *path, int security_sb_umount(struct vfsmount *mnt, int flags); int security_sb_pivotroot(const struct path *old_path, const struct path *new_path); int security_sb_set_mnt_opts(struct super_block *sb, - struct security_mnt_opts *opts, + void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags); int security_sb_clone_mnt_opts(const struct super_block *oldsb, struct super_block *newsb, unsigned long kern_flags, unsigned long *set_kern_flags); -int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts); +int security_add_mnt_opt(const char *option, const char *val, + int len, void **mnt_opts); int security_dentry_init_security(struct dentry *dentry, int mode, const struct qstr *name, void **ctx, u32 *ctxlen); @@ -403,8 +379,6 @@ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); #else /* CONFIG_SECURITY */ -struct security_mnt_opts { -}; static inline int call_lsm_notifier(enum lsm_event event, void *data) { @@ -421,11 +395,7 @@ static inline int unregister_lsm_notifier(struct notifier_block *nb) return 0; } -static inline void security_init_mnt_opts(struct security_mnt_opts *opts) -{ -} - -static inline void security_free_mnt_opts(struct security_mnt_opts *opts) +static inline void security_free_mnt_opts(void **mnt_opts) { } @@ -555,17 +525,19 @@ static inline int security_sb_alloc(struct super_block *sb) static inline void security_sb_free(struct super_block *sb) { } -static inline int security_sb_copy_data(char *orig, char *copy) +static inline int security_sb_eat_lsm_opts(char *options, + void **mnt_opts) { return 0; } -static inline int security_sb_remount(struct super_block *sb, void *data) +static inline int security_sb_remount(struct super_block *sb, + void *mnt_opts) { return 0; } -static inline int security_sb_kern_mount(struct super_block *sb, int flags, void *data) +static inline int security_sb_kern_mount(struct super_block *sb) { return 0; } @@ -600,7 +572,7 @@ static inline int security_sb_pivotroot(const struct path *old_path, } static inline int security_sb_set_mnt_opts(struct super_block *sb, - struct security_mnt_opts *opts, + void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags) { @@ -615,7 +587,8 @@ static inline int security_sb_clone_mnt_opts(const struct super_block *oldsb, return 0; } -static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts) +static inline int security_add_mnt_opt(const char *option, const char *val, + int len, void **mnt_opts) { return 0; } @@ -1820,28 +1793,5 @@ static inline void security_bpf_prog_free(struct bpf_prog_aux *aux) #endif /* CONFIG_SECURITY */ #endif /* CONFIG_BPF_SYSCALL */ -#ifdef CONFIG_SECURITY - -static inline char *alloc_secdata(void) -{ - return (char *)get_zeroed_page(GFP_KERNEL); -} - -static inline void free_secdata(void *secdata) -{ - free_page((unsigned long)secdata); -} - -#else - -static inline char *alloc_secdata(void) -{ - return (char *)1; -} - -static inline void free_secdata(void *secdata) -{ } -#endif /* CONFIG_SECURITY */ - #endif /* ! __LINUX_SECURITY_H */ diff --git a/include/linux/serdev.h b/include/linux/serdev.h index f153b2c7f0cd..070bf4e92df7 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -210,7 +210,7 @@ void serdev_device_wait_until_sent(struct serdev_device *, long); int serdev_device_get_tiocm(struct serdev_device *); int serdev_device_set_tiocm(struct serdev_device *, int, int); void serdev_device_write_wakeup(struct serdev_device *); -int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, unsigned long); +int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, long); void serdev_device_write_flush(struct serdev_device *); int serdev_device_write_room(struct serdev_device *); diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 18e21427bce4..5a655ba8d273 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -134,6 +134,10 @@ struct uart_8250_port { void (*dl_write)(struct uart_8250_port *, int); struct uart_8250_em485 *em485; + + /* Serial port overrun backoff */ + struct delayed_work overrun_backoff; + u32 overrun_backoff_time_ms; }; static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 047fa67d039b..5fe2b037e833 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -22,6 +22,7 @@ #include <linux/bitops.h> #include <linux/compiler.h> +#include <linux/console.h> #include <linux/interrupt.h> #include <linux/circ_buf.h> #include <linux/spinlock.h> @@ -175,6 +176,7 @@ struct uart_port { struct console *cons; /* struct console, if any */ #if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ) unsigned long sysrq; /* sysrq timeout */ + unsigned int sysrq_ch; /* char for sysrq */ #endif /* flags must be updated while holding port mutex */ @@ -485,8 +487,42 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) } return 0; } +static inline int +uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) +{ + if (port->sysrq) { + if (ch && time_before(jiffies, port->sysrq)) { + port->sysrq_ch = ch; + port->sysrq = 0; + return 1; + } + port->sysrq = 0; + } + return 0; +} +static inline void +uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags) +{ + int sysrq_ch; + + sysrq_ch = port->sysrq_ch; + port->sysrq_ch = 0; + + spin_unlock_irqrestore(&port->lock, irqflags); + + if (sysrq_ch) + handle_sysrq(sysrq_ch); +} #else -#define uart_handle_sysrq_char(port,ch) ({ (void)port; 0; }) +static inline int +uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; } +static inline int +uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; } +static inline void +uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags) +{ + spin_unlock_irqrestore(&port->lock, irqflags); +} #endif /* diff --git a/include/linux/sfp.h b/include/linux/sfp.h index d37518e89db2..d9d9de3fcf8e 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -224,7 +224,7 @@ struct sfp_eeprom_ext { * * See the SFF-8472 specification and related documents for the definition * of these structure members. This can be obtained from - * ftp://ftp.seagate.com/sff + * https://www.snia.org/technology-communities/sff/specifications */ struct sfp_eeprom_id { struct sfp_eeprom_base base; diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index d927647e6350..6dfd05ef5c2d 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h @@ -1,4 +1,5 @@ -/* +/* SPDX-License-Identifier: GPL-2.0 + * * Dmaengine driver base library for DMA controllers, found on SH-based SoCs * * extracted from shdma.c and headers @@ -7,10 +8,6 @@ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. - * - * This is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. */ #ifndef SHDMA_BASE_H diff --git a/include/linux/signal.h b/include/linux/signal.h index f428e86f4800..cc7e2c1cd444 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -273,6 +273,10 @@ extern int group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, enum pid_type type); extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *); extern int sigprocmask(int, sigset_t *, sigset_t *); +extern int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set, + sigset_t *oldset, size_t sigsetsize); +extern void restore_user_sigmask(const void __user *usigmask, + sigset_t *sigsaved); extern void set_current_blocked(sigset_t *); extern void __set_current_blocked(const sigset_t *); extern int show_unhandled_signals; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0ba687454267..93f56fddd92a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -245,6 +245,7 @@ struct iov_iter; struct napi_struct; struct bpf_prog; union bpf_attr; +struct skb_ext; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { @@ -254,7 +255,6 @@ struct nf_conntrack { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info { - refcount_t use; enum { BRNF_PROTO_UNCHANGED, BRNF_PROTO_8021Q, @@ -481,10 +481,11 @@ static inline void sock_zerocopy_get(struct ubuf_info *uarg) } void sock_zerocopy_put(struct ubuf_info *uarg); -void sock_zerocopy_put_abort(struct ubuf_info *uarg); +void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); void sock_zerocopy_callback(struct ubuf_info *uarg, bool success); +int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len); int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, struct msghdr *msg, int len, struct ubuf_info *uarg); @@ -615,6 +616,8 @@ typedef unsigned char *sk_buff_data_t; * @pkt_type: Packet class * @fclone: skbuff clone status * @ipvs_property: skbuff is owned by ipvs + * @offload_fwd_mark: Packet was L2-forwarded in hardware + * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware * @tc_skip_classify: do not classify packet. set by IFB device * @tc_at_ingress: used within tc_classify to distinguish in/egress * @tc_redirected: packet was redirected by a tc action @@ -633,6 +636,7 @@ typedef unsigned char *sk_buff_data_t; * @queue_mapping: Queue mapping for multiqueue devices * @xmit_more: More SKBs are pending for this queue * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves + * @active_extensions: active extensions (skb_ext_id types) * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_hash: indicate hash is a canonical 4-tuple hash over transport @@ -662,6 +666,7 @@ typedef unsigned char *sk_buff_data_t; * @data: Data head pointer * @truesize: Buffer size * @users: User count - see {datagram,tcp}.c + * @extensions: allocated extensions, valid if active_extensions is nonzero */ struct sk_buff { @@ -709,15 +714,9 @@ struct sk_buff { struct list_head tcp_tsorted_anchor; }; -#ifdef CONFIG_XFRM - struct sec_path *sp; -#endif #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) unsigned long _nfct; #endif -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) - struct nf_bridge_info *nf_bridge; -#endif unsigned int len, data_len; __u16 mac_len, @@ -744,7 +743,9 @@ struct sk_buff { head_frag:1, xmit_more:1, pfmemalloc:1; - +#ifdef CONFIG_SKB_EXTENSIONS + __u8 active_extensions; +#endif /* fields enclosed in headers_start/headers_end are copied * using a single memcpy() in __copy_skb_header() */ @@ -777,6 +778,14 @@ struct sk_buff { __u8 encap_hdr_csum:1; __u8 csum_valid:1; +#ifdef __BIG_ENDIAN_BITFIELD +#define PKT_VLAN_PRESENT_BIT 7 +#else +#define PKT_VLAN_PRESENT_BIT 0 +#endif +#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset) + __u8 __pkt_vlan_present_offset[0]; + __u8 vlan_present:1; __u8 csum_complete_sw:1; __u8 csum_level:2; __u8 csum_not_inet:1; @@ -784,13 +793,13 @@ struct sk_buff { #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif - __u8 ipvs_property:1; + __u8 ipvs_property:1; __u8 inner_protocol_type:1; __u8 remcsum_offload:1; #ifdef CONFIG_NET_SWITCHDEV __u8 offload_fwd_mark:1; - __u8 offload_mr_fwd_mark:1; + __u8 offload_l3_fwd_mark:1; #endif #ifdef CONFIG_NET_CLS_ACT __u8 tc_skip_classify:1; @@ -858,6 +867,11 @@ struct sk_buff { *data; unsigned int truesize; refcount_t users; + +#ifdef CONFIG_SKB_EXTENSIONS + /* only useable after checking ->active_extensions != 0 */ + struct skb_ext *extensions; +#endif }; #ifdef __KERNEL__ @@ -1317,15 +1331,35 @@ static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb) return is_zcopy ? skb_uarg(skb) : NULL; } -static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg) +static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg, + bool *have_ref) { if (skb && uarg && !skb_zcopy(skb)) { - sock_zerocopy_get(uarg); + if (unlikely(have_ref && *have_ref)) + *have_ref = false; + else + sock_zerocopy_get(uarg); skb_shinfo(skb)->destructor_arg = uarg; skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; } } +static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val) +{ + skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); + skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; +} + +static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb) +{ + return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL; +} + +static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb) +{ + return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL); +} + /* Release a reference on a zerocopy structure */ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) { @@ -1335,7 +1369,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) if (uarg->callback == sock_zerocopy_callback) { uarg->zerocopy = uarg->zerocopy && zerocopy; sock_zerocopy_put(uarg); - } else { + } else if (!skb_zcopy_is_nouarg(skb)) { uarg->callback(uarg, zerocopy); } @@ -1349,7 +1383,7 @@ static inline void skb_zcopy_abort(struct sk_buff *skb) struct ubuf_info *uarg = skb_zcopy(skb); if (uarg) { - sock_zerocopy_put_abort(uarg); + sock_zerocopy_put_abort(uarg, false); skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; } } @@ -1725,8 +1759,6 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list, * The "__skb_xxxx()" functions are the non-atomic ones that * can only be called with interrupts disabled. */ -void skb_insert(struct sk_buff *old, struct sk_buff *newsk, - struct sk_buff_head *list); static inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list) @@ -2508,10 +2540,8 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len); static inline void __skb_set_length(struct sk_buff *skb, unsigned int len) { - if (unlikely(skb_is_nonlinear(skb))) { - WARN_ON(1); + if (WARN_ON(skb_is_nonlinear(skb))) return; - } skb->len = len; skb_set_tail_pointer(skb, len); } @@ -3309,6 +3339,9 @@ static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, } int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, struct msghdr *msg); +int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset, + struct iov_iter *to, int len, + struct ahash_request *hash); int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, struct iov_iter *from, int len); int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); @@ -3329,7 +3362,6 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, unsigned int flags); int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, int len); -int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len); void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); unsigned int skb_zerocopy_headlen(const struct sk_buff *from); int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, @@ -3870,18 +3902,97 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct) atomic_inc(&nfct->use); } #endif + +#ifdef CONFIG_SKB_EXTENSIONS +enum skb_ext_id { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) -static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) + SKB_EXT_BRIDGE_NF, +#endif +#ifdef CONFIG_XFRM + SKB_EXT_SEC_PATH, +#endif + SKB_EXT_NUM, /* must be last */ +}; + +/** + * struct skb_ext - sk_buff extensions + * @refcnt: 1 on allocation, deallocated on 0 + * @offset: offset to add to @data to obtain extension address + * @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units + * @data: start of extension data, variable sized + * + * Note: offsets/lengths are stored in chunks of 8 bytes, this allows + * to use 'u8' types while allowing up to 2kb worth of extension data. + */ +struct skb_ext { + refcount_t refcnt; + u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */ + u8 chunks; /* same */ + char data[0] __aligned(8); +}; + +void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id); +void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id); +void __skb_ext_put(struct skb_ext *ext); + +static inline void skb_ext_put(struct sk_buff *skb) { - if (nf_bridge && refcount_dec_and_test(&nf_bridge->use)) - kfree(nf_bridge); + if (skb->active_extensions) + __skb_ext_put(skb->extensions); } -static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) + +static inline void __skb_ext_copy(struct sk_buff *dst, + const struct sk_buff *src) +{ + dst->active_extensions = src->active_extensions; + + if (src->active_extensions) { + struct skb_ext *ext = src->extensions; + + refcount_inc(&ext->refcnt); + dst->extensions = ext; + } +} + +static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src) +{ + skb_ext_put(dst); + __skb_ext_copy(dst, src); +} + +static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i) { - if (nf_bridge) - refcount_inc(&nf_bridge->use); + return !!ext->offset[i]; } -#endif /* CONFIG_BRIDGE_NETFILTER */ + +static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id) +{ + return skb->active_extensions & (1 << id); +} + +static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) +{ + if (skb_ext_exist(skb, id)) + __skb_ext_del(skb, id); +} + +static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id) +{ + if (skb_ext_exist(skb, id)) { + struct skb_ext *ext = skb->extensions; + + return (void *)ext + (ext->offset[id] << 3); + } + + return NULL; +} +#else +static inline void skb_ext_put(struct sk_buff *skb) {} +static inline void skb_ext_del(struct sk_buff *skb, int unused) {} +static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {} +static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {} +#endif /* CONFIG_SKB_EXTENSIONS */ + static inline void nf_reset(struct sk_buff *skb) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) @@ -3889,8 +4000,7 @@ static inline void nf_reset(struct sk_buff *skb) skb->_nfct = 0; #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) - nf_bridge_put(skb->nf_bridge); - skb->nf_bridge = NULL; + skb_ext_del(skb, SKB_EXT_BRIDGE_NF); #endif } @@ -3908,7 +4018,7 @@ static inline void ipvs_reset(struct sk_buff *skb) #endif } -/* Note: This doesn't put any conntrack and bridge info in dst. */ +/* Note: This doesn't put any conntrack info in dst. */ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, bool copy) { @@ -3916,10 +4026,6 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, dst->_nfct = src->_nfct; nf_conntrack_get(skb_nfct(src)); #endif -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) - dst->nf_bridge = src->nf_bridge; - nf_bridge_get(src->nf_bridge); -#endif #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) if (copy) dst->nf_trace = src->nf_trace; @@ -3931,9 +4037,6 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb_nfct(dst)); #endif -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) - nf_bridge_put(dst->nf_bridge); -#endif __nf_copy(dst, src, true); } @@ -3955,12 +4058,19 @@ static inline void skb_init_secmark(struct sk_buff *skb) { } #endif +static inline int secpath_exists(const struct sk_buff *skb) +{ +#ifdef CONFIG_XFRM + return skb_ext_exist(skb, SKB_EXT_SEC_PATH); +#else + return 0; +#endif +} + static inline bool skb_irq_freeable(const struct sk_buff *skb) { return !skb->destructor && -#if IS_ENABLED(CONFIG_XFRM) - !skb->sp && -#endif + !secpath_exists(skb) && !skb_nfct(skb) && !skb->_skb_refdst && !skb_has_frag_list(skb); @@ -4006,10 +4116,10 @@ static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb) return skb->dst_pending_confirm != 0; } -static inline struct sec_path *skb_sec_path(struct sk_buff *skb) +static inline struct sec_path *skb_sec_path(const struct sk_buff *skb) { #ifdef CONFIG_XFRM - return skb->sp; + return skb_ext_find(skb, SKB_EXT_SEC_PATH); #else return NULL; #endif diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 2a11e9d91dfa..178a3933a71b 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -36,6 +36,7 @@ struct sk_msg_sg { struct scatterlist data[MAX_MSG_FRAGS + 1]; }; +/* UAPI in filter.c depends on struct sk_msg_sg being first element. */ struct sk_msg { struct sk_msg_sg sg; void *data; @@ -416,6 +417,14 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) sk_psock_drop(sk, psock); } +static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) +{ + if (psock->parser.enabled) + psock->parser.saved_data_ready(sk); + else + sk->sk_data_ready(sk); +} + static inline void psock_set_prog(struct bpf_prog **pprog, struct bpf_prog *prog) { diff --git a/include/linux/slab.h b/include/linux/slab.h index 918f374e7156..11b45f7ae405 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -314,22 +314,22 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) { - int is_dma = 0; - int type_dma = 0; - int is_reclaimable; - #ifdef CONFIG_ZONE_DMA - is_dma = !!(flags & __GFP_DMA); - type_dma = is_dma * KMALLOC_DMA; -#endif - - is_reclaimable = !!(flags & __GFP_RECLAIMABLE); + /* + * The most common case is KMALLOC_NORMAL, so test for it + * with a single branch for both flags. + */ + if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0)) + return KMALLOC_NORMAL; /* - * If an allocation is both __GFP_DMA and __GFP_RECLAIMABLE, return - * KMALLOC_DMA and effectively ignore __GFP_RECLAIMABLE + * At least one of the flags has to be set. If both are, __GFP_DMA + * is more important. */ - return type_dma + (is_reclaimable & !is_dma) * KMALLOC_RECLAIM; + return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM; +#else + return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL; +#endif } /* @@ -444,7 +444,7 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, { void *ret = kmem_cache_alloc(s, flags); - kasan_kmalloc(s, ret, size, flags); + ret = kasan_kmalloc(s, ret, size, flags); return ret; } @@ -455,7 +455,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, { void *ret = kmem_cache_alloc_node(s, gfpflags, node); - kasan_kmalloc(s, ret, size, gfpflags); + ret = kasan_kmalloc(s, ret, size, gfpflags); return ret; } #endif /* CONFIG_TRACING */ @@ -486,48 +486,47 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) * kmalloc is the normal method of allocating memory * for objects smaller than page size in the kernel. * - * The @flags argument may be one of: - * - * %GFP_USER - Allocate memory on behalf of user. May sleep. - * - * %GFP_KERNEL - Allocate normal kernel ram. May sleep. + * The @flags argument may be one of the GFP flags defined at + * include/linux/gfp.h and described at + * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>` * - * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. - * For example, use this inside interrupt handlers. + * The recommended usage of the @flags is described at + * :ref:`Documentation/core-api/memory-allocation.rst <memory-allocation>` * - * %GFP_HIGHUSER - Allocate pages from high memory. + * Below is a brief outline of the most useful GFP flags * - * %GFP_NOIO - Do not do any I/O at all while trying to get memory. + * %GFP_KERNEL + * Allocate normal kernel ram. May sleep. * - * %GFP_NOFS - Do not make any fs calls while trying to get memory. + * %GFP_NOWAIT + * Allocation will not sleep. * - * %GFP_NOWAIT - Allocation will not sleep. + * %GFP_ATOMIC + * Allocation will not sleep. May use emergency pools. * - * %__GFP_THISNODE - Allocate node-local memory only. - * - * %GFP_DMA - Allocation suitable for DMA. - * Should only be used for kmalloc() caches. Otherwise, use a - * slab created with SLAB_DMA. + * %GFP_HIGHUSER + * Allocate memory from high memory on behalf of user. * * Also it is possible to set different flags by OR'ing * in one or more of the following additional @flags: * - * %__GFP_HIGH - This allocation has high priority and may use emergency pools. - * - * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail - * (think twice before using). + * %__GFP_HIGH + * This allocation has high priority and may use emergency pools. * - * %__GFP_NORETRY - If memory is not immediately available, - * then give up at once. + * %__GFP_NOFAIL + * Indicate that this allocation is in no way allowed to fail + * (think twice before using). * - * %__GFP_NOWARN - If allocation fails, don't issue any warnings. + * %__GFP_NORETRY + * If memory is not immediately available, + * then give up at once. * - * %__GFP_RETRY_MAYFAIL - Try really hard to succeed the allocation but fail - * eventually. + * %__GFP_NOWARN + * If allocation fails, don't issue any warnings. * - * There are other flags available as well, but these are not intended - * for general use, and so are not documented here. For a full list of - * potential flags, always refer to linux/gfp.h. + * %__GFP_RETRY_MAYFAIL + * Try really hard to succeed the allocation but fail + * eventually. */ static __always_inline void *kmalloc(size_t size, gfp_t flags) { diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 3485c58cfd1c..9a5eafb7145b 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -104,4 +104,17 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, return object; } +/* + * We want to avoid an expensive divide : (offset / cache->size) + * Using the fact that size is a constant for a particular cache, + * we can replace (offset / cache->size) by + * reciprocal_divide(offset, cache->reciprocal_buffer_size) + */ +static inline unsigned int obj_to_index(const struct kmem_cache *cache, + const struct page *page, void *obj) +{ + u32 offset = (obj - page->s_mem); + return reciprocal_divide(offset, cache->reciprocal_buffer_size); +} + #endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h new file mode 100644 index 000000000000..54ade13a9b15 --- /dev/null +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 MediaTek Inc. + * + */ + +#ifndef __MTK_CMDQ_H__ +#define __MTK_CMDQ_H__ + +#include <linux/mailbox_client.h> +#include <linux/mailbox/mtk-cmdq-mailbox.h> +#include <linux/timer.h> + +#define CMDQ_NO_TIMEOUT 0xffffffffu + +/** cmdq event maximum */ +#define CMDQ_MAX_EVENT 0x3ff + +struct cmdq_pkt; + +struct cmdq_client { + spinlock_t lock; + u32 pkt_cnt; + struct mbox_client client; + struct mbox_chan *chan; + struct timer_list timer; + u32 timeout_ms; /* in unit of microsecond */ +}; + +/** + * cmdq_mbox_create() - create CMDQ mailbox client and channel + * @dev: device of CMDQ mailbox client + * @index: index of CMDQ mailbox channel + * @timeout: timeout of a pkt execution by GCE, in unit of microsecond, set + * CMDQ_NO_TIMEOUT if a timer is not used. + * + * Return: CMDQ mailbox client pointer + */ +struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, + u32 timeout); + +/** + * cmdq_mbox_destroy() - destroy CMDQ mailbox client and channel + * @client: the CMDQ mailbox client + */ +void cmdq_mbox_destroy(struct cmdq_client *client); + +/** + * cmdq_pkt_create() - create a CMDQ packet + * @client: the CMDQ mailbox client + * @size: required CMDQ buffer size + * + * Return: CMDQ packet pointer + */ +struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size); + +/** + * cmdq_pkt_destroy() - destroy the CMDQ packet + * @pkt: the CMDQ packet + */ +void cmdq_pkt_destroy(struct cmdq_pkt *pkt); + +/** + * cmdq_pkt_write() - append write command to the CMDQ packet + * @pkt: the CMDQ packet + * @value: the specified target register value + * @subsys: the CMDQ sub system code + * @offset: register offset from CMDQ sub system + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_write(struct cmdq_pkt *pkt, u32 value, u32 subsys, u32 offset); + +/** + * cmdq_pkt_write_mask() - append write command with mask to the CMDQ packet + * @pkt: the CMDQ packet + * @value: the specified target register value + * @subsys: the CMDQ sub system code + * @offset: register offset from CMDQ sub system + * @mask: the specified target register mask + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value, + u32 subsys, u32 offset, u32 mask); + +/** + * cmdq_pkt_wfe() - append wait for event command to the CMDQ packet + * @pkt: the CMDQ packet + * @event: the desired event type to "wait and CLEAR" + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event); + +/** + * cmdq_pkt_clear_event() - append clear event command to the CMDQ packet + * @pkt: the CMDQ packet + * @event: the desired event to be cleared + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u32 event); + +/** + * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ + * packet and call back at the end of done packet + * @pkt: the CMDQ packet + * @cb: called at the end of done packet + * @data: this data will pass back to cb + * + * Return: 0 for success; else the error code is returned + * + * Trigger CMDQ to asynchronously execute the CMDQ packet and call back + * at the end of done packet. Note that this is an ASYNC function. When the + * function returned, it may or may not be finished. + */ +int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb, + void *data); + +/** + * cmdq_pkt_flush() - trigger CMDQ to execute the CMDQ packet + * @pkt: the CMDQ packet + * + * Return: 0 for success; else the error code is returned + * + * Trigger CMDQ to execute the CMDQ packet. Note that this is a + * synchronous flush function. When the function returned, the recorded + * commands have been done. + */ +int cmdq_pkt_flush(struct cmdq_pkt *pkt); + +#endif /* __MTK_CMDQ_H__ */ diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h index f4de33654a60..5efa2b67fa55 100644 --- a/include/linux/soc/qcom/qmi.h +++ b/include/linux/soc/qcom/qmi.h @@ -166,7 +166,7 @@ struct qmi_ops { struct qmi_txn { struct qmi_handle *qmi; - int id; + u16 id; struct mutex lock; struct completion completion; diff --git a/include/linux/socket.h b/include/linux/socket.h index 8b571e9b9f76..ab2041a00e01 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -286,6 +286,7 @@ struct ucred { #define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */ #define MSG_MORE 0x8000 /* Sender will send more */ #define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ +#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */ #define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */ #define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ #define MSG_EOF MSG_FIN @@ -348,7 +349,8 @@ struct ucred { extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); -struct timespec64; +struct __kernel_timespec; +struct old_timespec32; /* The __sys_...msg variants allow MSG_CMSG_COMPAT iff * forbid_cmsg_compat==false @@ -357,8 +359,10 @@ extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned int flags, bool forbid_cmsg_compat); extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned int flags, bool forbid_cmsg_compat); -extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, - unsigned int flags, struct timespec64 *timeout); +extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, + unsigned int vlen, unsigned int flags, + struct __kernel_timespec __user *timeout, + struct old_timespec32 __user *timeout32); extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, bool forbid_cmsg_compat); diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h index bfde741a543d..778ae8eb1f3e 100644 --- a/include/linux/spi/mmc_spi.h +++ b/include/linux/spi/mmc_spi.h @@ -8,11 +8,6 @@ struct device; struct mmc_host; -#define MMC_SPI_USE_CD_GPIO (1 << 0) -#define MMC_SPI_USE_RO_GPIO (1 << 1) -#define MMC_SPI_CD_GPIO_ACTIVE_LOW (1 << 2) -#define MMC_SPI_RO_GPIO_ACTIVE_LOW (1 << 3) - /* Put this in platform_data of a device being used to manage an MMC/SD * card slot. (Modeled after PXA mmc glue; see that for usage examples.) * @@ -27,16 +22,6 @@ struct mmc_spi_platform_data { void *); void (*exit)(struct device *, void *); - /* - * Card Detect and Read Only GPIOs. To enable debouncing on the card - * detect GPIO, set the cd_debounce to the debounce time in - * microseconds. - */ - unsigned int flags; - unsigned int cd_gpio; - unsigned int cd_debounce; - unsigned int ro_gpio; - /* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */ unsigned long caps; unsigned long caps2; diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index 9ec4c147abbc..b0674e330ef6 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -25,6 +25,7 @@ struct dma_chan; struct pxa2xx_spi_master { u16 num_chipselect; u8 enable_dma; + bool is_slave; /* DMA engine specific config */ bool (*dma_filter)(struct dma_chan *chan, void *param); diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 69ee30456864..3fe24500c5ee 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -57,10 +57,12 @@ /** * enum spi_mem_data_dir - describes the direction of a SPI memory data * transfer from the controller perspective + * @SPI_MEM_NO_DATA: no data transferred * @SPI_MEM_DATA_IN: data coming from the SPI memory - * @SPI_MEM_DATA_OUT: data sent the SPI memory + * @SPI_MEM_DATA_OUT: data sent to the SPI memory */ enum spi_mem_data_dir { + SPI_MEM_NO_DATA, SPI_MEM_DATA_IN, SPI_MEM_DATA_OUT, }; @@ -123,6 +125,49 @@ struct spi_mem_op { } /** + * struct spi_mem_dirmap_info - Direct mapping information + * @op_tmpl: operation template that should be used by the direct mapping when + * the memory device is accessed + * @offset: absolute offset this direct mapping is pointing to + * @length: length in byte of this direct mapping + * + * These information are used by the controller specific implementation to know + * the portion of memory that is directly mapped and the spi_mem_op that should + * be used to access the device. + * A direct mapping is only valid for one direction (read or write) and this + * direction is directly encoded in the ->op_tmpl.data.dir field. + */ +struct spi_mem_dirmap_info { + struct spi_mem_op op_tmpl; + u64 offset; + u64 length; +}; + +/** + * struct spi_mem_dirmap_desc - Direct mapping descriptor + * @mem: the SPI memory device this direct mapping is attached to + * @info: information passed at direct mapping creation time + * @nodirmap: set to 1 if the SPI controller does not implement + * ->mem_ops->dirmap_create() or when this function returned an + * error. If @nodirmap is true, all spi_mem_dirmap_{read,write}() + * calls will use spi_mem_exec_op() to access the memory. This is a + * degraded mode that allows spi_mem drivers to use the same code + * no matter whether the controller supports direct mapping or not + * @priv: field pointing to controller specific data + * + * Common part of a direct mapping descriptor. This object is created by + * spi_mem_dirmap_create() and controller implementation of ->create_dirmap() + * can create/attach direct mapping resources to the descriptor in the ->priv + * field. + */ +struct spi_mem_dirmap_desc { + struct spi_mem *mem; + struct spi_mem_dirmap_info info; + unsigned int nodirmap; + void *priv; +}; + +/** * struct spi_mem - describes a SPI memory device * @spi: the underlying SPI device * @drvpriv: spi_mem_driver private data @@ -177,10 +222,32 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem) * Note that if the implementation of this function allocates memory * dynamically, then it should do so with devm_xxx(), as we don't * have a ->free_name() function. + * @dirmap_create: create a direct mapping descriptor that can later be used to + * access the memory device. This method is optional + * @dirmap_destroy: destroy a memory descriptor previous created by + * ->dirmap_create() + * @dirmap_read: read data from the memory device using the direct mapping + * created by ->dirmap_create(). The function can return less + * data than requested (for example when the request is crossing + * the currently mapped area), and the caller of + * spi_mem_dirmap_read() is responsible for calling it again in + * this case. + * @dirmap_write: write data to the memory device using the direct mapping + * created by ->dirmap_create(). The function can return less + * data than requested (for example when the request is crossing + * the currently mapped area), and the caller of + * spi_mem_dirmap_write() is responsible for calling it again in + * this case. * * This interface should be implemented by SPI controllers providing an * high-level interface to execute SPI memory operation, which is usually the * case for QSPI controllers. + * + * Note on ->dirmap_{read,write}(): drivers should avoid accessing the direct + * mapping from the CPU because doing that can stall the CPU waiting for the + * SPI mem transaction to finish, and this will make real-time maintainers + * unhappy and might make your system less reactive. Instead, drivers should + * use DMA to access this direct mapping. */ struct spi_controller_mem_ops { int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op); @@ -189,6 +256,12 @@ struct spi_controller_mem_ops { int (*exec_op)(struct spi_mem *mem, const struct spi_mem_op *op); const char *(*get_name)(struct spi_mem *mem); + int (*dirmap_create)(struct spi_mem_dirmap_desc *desc); + void (*dirmap_destroy)(struct spi_mem_dirmap_desc *desc); + ssize_t (*dirmap_read)(struct spi_mem_dirmap_desc *desc, + u64 offs, size_t len, void *buf); + ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc, + u64 offs, size_t len, const void *buf); }; /** @@ -249,6 +322,15 @@ int spi_mem_exec_op(struct spi_mem *mem, const char *spi_mem_get_name(struct spi_mem *mem); +struct spi_mem_dirmap_desc * +spi_mem_dirmap_create(struct spi_mem *mem, + const struct spi_mem_dirmap_info *info); +void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc); +ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, + u64 offs, size_t len, void *buf); +ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, + u64 offs, size_t len, const void *buf); + int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv, struct module *owner); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 6be77fa5ab90..314d922ca607 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -154,7 +154,10 @@ struct spi_device { #define SPI_TX_QUAD 0x200 /* transmit with 4 wires */ #define SPI_RX_DUAL 0x400 /* receive with 2 wires */ #define SPI_RX_QUAD 0x800 /* receive with 4 wires */ -#define SPI_CS_WORD 0x1000 /* toggle cs after each word */ +#define SPI_CS_WORD 0x1000 /* toggle cs after each word */ +#define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */ +#define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */ +#define SPI_3WIRE_HIZ 0x8000 /* high impedance turnaround */ int irq; void *controller_state; void *controller_data; diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 67135d4a8a30..c614375cd264 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -38,20 +38,20 @@ struct srcu_struct; #ifdef CONFIG_DEBUG_LOCK_ALLOC -int __init_srcu_struct(struct srcu_struct *sp, const char *name, +int __init_srcu_struct(struct srcu_struct *ssp, const char *name, struct lock_class_key *key); -#define init_srcu_struct(sp) \ +#define init_srcu_struct(ssp) \ ({ \ static struct lock_class_key __srcu_key; \ \ - __init_srcu_struct((sp), #sp, &__srcu_key); \ + __init_srcu_struct((ssp), #ssp, &__srcu_key); \ }) #define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -int init_srcu_struct(struct srcu_struct *sp); +int init_srcu_struct(struct srcu_struct *ssp); #define __SRCU_DEP_MAP_INIT(srcu_name) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -67,28 +67,28 @@ int init_srcu_struct(struct srcu_struct *sp); struct srcu_struct { }; #endif -void call_srcu(struct srcu_struct *sp, struct rcu_head *head, +void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, void (*func)(struct rcu_head *head)); -void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced); -int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); -void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); -void synchronize_srcu(struct srcu_struct *sp); +void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced); +int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); +void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); +void synchronize_srcu(struct srcu_struct *ssp); /** * cleanup_srcu_struct - deconstruct a sleep-RCU structure - * @sp: structure to clean up. + * @ssp: structure to clean up. * * Must invoke this after you are finished using a given srcu_struct that * was initialized via init_srcu_struct(), else you leak memory. */ -static inline void cleanup_srcu_struct(struct srcu_struct *sp) +static inline void cleanup_srcu_struct(struct srcu_struct *ssp) { - _cleanup_srcu_struct(sp, false); + _cleanup_srcu_struct(ssp, false); } /** * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure - * @sp: structure to clean up. + * @ssp: structure to clean up. * * Must invoke this after you are finished using a given srcu_struct that * was initialized via init_srcu_struct(), else you leak memory. Also, @@ -103,16 +103,16 @@ static inline void cleanup_srcu_struct(struct srcu_struct *sp) * (with high probability, anyway), and will also cause the srcu_struct * to be leaked. */ -static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp) +static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp) { - _cleanup_srcu_struct(sp, true); + _cleanup_srcu_struct(ssp, true); } #ifdef CONFIG_DEBUG_LOCK_ALLOC /** * srcu_read_lock_held - might we be in SRCU read-side critical section? - * @sp: The srcu_struct structure to check + * @ssp: The srcu_struct structure to check * * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, @@ -126,16 +126,16 @@ static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp) * relies on normal RCU, it can be called from the CPU which * is in the idle loop from an RCU point of view or offline. */ -static inline int srcu_read_lock_held(const struct srcu_struct *sp) +static inline int srcu_read_lock_held(const struct srcu_struct *ssp) { if (!debug_lockdep_rcu_enabled()) return 1; - return lock_is_held(&sp->dep_map); + return lock_is_held(&ssp->dep_map); } #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -static inline int srcu_read_lock_held(const struct srcu_struct *sp) +static inline int srcu_read_lock_held(const struct srcu_struct *ssp) { return 1; } @@ -145,7 +145,7 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp) /** * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing * @p: the pointer to fetch and protect for later dereferencing - * @sp: pointer to the srcu_struct, which is used to check that we + * @ssp: pointer to the srcu_struct, which is used to check that we * really are in an SRCU read-side critical section. * @c: condition to check for update-side use * @@ -154,29 +154,32 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp) * to 1. The @c argument will normally be a logical expression containing * lockdep_is_held() calls. */ -#define srcu_dereference_check(p, sp, c) \ - __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu) +#define srcu_dereference_check(p, ssp, c) \ + __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu) /** * srcu_dereference - fetch SRCU-protected pointer for later dereferencing * @p: the pointer to fetch and protect for later dereferencing - * @sp: pointer to the srcu_struct, which is used to check that we + * @ssp: pointer to the srcu_struct, which is used to check that we * really are in an SRCU read-side critical section. * * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU * is enabled, invoking this outside of an RCU read-side critical * section will result in an RCU-lockdep splat. */ -#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) +#define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0) /** * srcu_dereference_notrace - no tracing and no lockdep calls from here + * @p: the pointer to fetch and protect for later dereferencing + * @ssp: pointer to the srcu_struct, which is used to check that we + * really are in an SRCU read-side critical section. */ -#define srcu_dereference_notrace(p, sp) srcu_dereference_check((p), (sp), 1) +#define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1) /** * srcu_read_lock - register a new reader for an SRCU-protected structure. - * @sp: srcu_struct in which to register the new reader. + * @ssp: srcu_struct in which to register the new reader. * * Enter an SRCU read-side critical section. Note that SRCU read-side * critical sections may be nested. However, it is illegal to @@ -191,44 +194,44 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp) * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() * was invoked in process context. */ -static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) +static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp) { int retval; - retval = __srcu_read_lock(sp); - rcu_lock_acquire(&(sp)->dep_map); + retval = __srcu_read_lock(ssp); + rcu_lock_acquire(&(ssp)->dep_map); return retval; } /* Used by tracing, cannot be traced and cannot invoke lockdep. */ static inline notrace int -srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp) +srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) { int retval; - retval = __srcu_read_lock(sp); + retval = __srcu_read_lock(ssp); return retval; } /** * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. - * @sp: srcu_struct in which to unregister the old reader. + * @ssp: srcu_struct in which to unregister the old reader. * @idx: return value from corresponding srcu_read_lock(). * * Exit an SRCU read-side critical section. */ -static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) - __releases(sp) +static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) + __releases(ssp) { - rcu_lock_release(&(sp)->dep_map); - __srcu_read_unlock(sp, idx); + rcu_lock_release(&(ssp)->dep_map); + __srcu_read_unlock(ssp, idx); } /* Used by tracing, cannot be traced and cannot call lockdep. */ static inline notrace void -srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp) +srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp) { - __srcu_read_unlock(sp, idx); + __srcu_read_unlock(ssp, idx); } /** diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index f41d2fb09f87..b19216aaaef2 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -60,7 +60,7 @@ void srcu_drive_gp(struct work_struct *wp); #define DEFINE_STATIC_SRCU(name) \ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name) -void synchronize_srcu(struct srcu_struct *sp); +void synchronize_srcu(struct srcu_struct *ssp); /* * Counts the new reader in the appropriate per-CPU element of the @@ -68,36 +68,36 @@ void synchronize_srcu(struct srcu_struct *sp); * __srcu_read_unlock() must be in the same handler instance. Returns an * index that must be passed to the matching srcu_read_unlock(). */ -static inline int __srcu_read_lock(struct srcu_struct *sp) +static inline int __srcu_read_lock(struct srcu_struct *ssp) { int idx; - idx = READ_ONCE(sp->srcu_idx); - WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1); + idx = READ_ONCE(ssp->srcu_idx); + WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1); return idx; } -static inline void synchronize_srcu_expedited(struct srcu_struct *sp) +static inline void synchronize_srcu_expedited(struct srcu_struct *ssp) { - synchronize_srcu(sp); + synchronize_srcu(ssp); } -static inline void srcu_barrier(struct srcu_struct *sp) +static inline void srcu_barrier(struct srcu_struct *ssp) { - synchronize_srcu(sp); + synchronize_srcu(ssp); } /* Defined here to avoid size increase for non-torture kernels. */ -static inline void srcu_torture_stats_print(struct srcu_struct *sp, +static inline void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) { int idx; - idx = READ_ONCE(sp->srcu_idx) & 0x1; + idx = READ_ONCE(ssp->srcu_idx) & 0x1; pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", tt, tf, idx, - READ_ONCE(sp->srcu_lock_nesting[!idx]), - READ_ONCE(sp->srcu_lock_nesting[idx])); + READ_ONCE(ssp->srcu_lock_nesting[!idx]), + READ_ONCE(ssp->srcu_lock_nesting[idx])); } #endif diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 0ae91b3a7406..6f292bd3e7db 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -51,7 +51,7 @@ struct srcu_data { unsigned long grpmask; /* Mask for leaf srcu_node */ /* ->srcu_data_have_cbs[]. */ int cpu; - struct srcu_struct *sp; + struct srcu_struct *ssp; }; /* @@ -138,8 +138,8 @@ struct srcu_struct { #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) -void synchronize_srcu_expedited(struct srcu_struct *sp); -void srcu_barrier(struct srcu_struct *sp); -void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf); +void synchronize_srcu_expedited(struct srcu_struct *ssp); +void srcu_barrier(struct srcu_struct *ssp); +void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf); #endif diff --git a/include/linux/string.h b/include/linux/string.h index 27d0482e5e05..7927b875f80c 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -456,4 +456,24 @@ static inline void memcpy_and_pad(void *dest, size_t dest_len, memcpy(dest, src, dest_len); } +/** + * str_has_prefix - Test if a string has a given prefix + * @str: The string to test + * @prefix: The string to see if @str starts with + * + * A common way to test a prefix of a string is to do: + * strncmp(str, prefix, sizeof(prefix) - 1) + * + * But this can lead to bugs due to typos, or if prefix is a pointer + * and not a constant. Instead use str_has_prefix(). + * + * Returns: 0 if @str does not start with @prefix + strlen(@prefix) if @str does start with @prefix + */ +static __always_inline size_t str_has_prefix(const char *str, const char *prefix) +{ + size_t len = strlen(prefix); + return strncmp(str, prefix, len) == 0 ? len : 0; +} + #endif /* _LINUX_STRING_H_ */ diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index c4db9424b63b..eed3cb16ccf1 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -37,21 +37,9 @@ struct rpcsec_gss_info; -/* auth_cred ac_flags bits */ -enum { - RPC_CRED_KEY_EXPIRE_SOON = 1, /* underlying cred key will expire soon */ - RPC_CRED_NOTIFY_TIMEOUT = 2, /* nofity generic cred when underlying - key will expire soon */ -}; - -/* Work around the lack of a VFS credential */ struct auth_cred { - kuid_t uid; - kgid_t gid; - struct group_info *group_info; - const char *principal; - unsigned long ac_flags; - unsigned char machine_cred : 1; + const struct cred *cred; + const char *principal; /* If present, this is a machine credential */ }; /* @@ -68,8 +56,7 @@ struct rpc_cred { unsigned long cr_expire; /* when to gc */ unsigned long cr_flags; /* various flags */ refcount_t cr_count; /* ref count */ - - kuid_t cr_uid; + const struct cred *cr_cred; /* per-flavor data */ }; @@ -78,8 +65,7 @@ struct rpc_cred { #define RPCAUTH_CRED_HASHED 2 #define RPCAUTH_CRED_NEGATIVE 3 -/* rpc_auth au_flags */ -#define RPCAUTH_AUTH_NO_CRKEY_TIMEOUT 0x0001 /* underlying cred has no key timeout */ +const struct cred *rpc_machine_cred(void); /* * Client authentication handle @@ -116,7 +102,6 @@ struct rpc_auth_create_args { /* Flags for rpcauth_lookupcred() */ #define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */ -#define RPCAUTH_LOOKUP_RCU 0x02 /* lock-less lookup */ /* * Client authentication ops @@ -146,7 +131,6 @@ struct rpc_credops { void (*crdestroy)(struct rpc_cred *); int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); - struct rpc_cred * (*crbind)(struct rpc_task *, struct rpc_cred *, int); __be32 * (*crmarshal)(struct rpc_task *, __be32 *); int (*crrefresh)(struct rpc_task *); __be32 * (*crvalidate)(struct rpc_task *, __be32 *); @@ -155,7 +139,6 @@ struct rpc_credops { int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t, void *, __be32 *, void *); int (*crkey_timeout)(struct rpc_cred *); - bool (*crkey_to_expire)(struct rpc_cred *); char * (*crstringify_acceptor)(struct rpc_cred *); bool (*crneed_reencode)(struct rpc_task *); }; @@ -164,16 +147,10 @@ extern const struct rpc_authops authunix_ops; extern const struct rpc_authops authnull_ops; int __init rpc_init_authunix(void); -int __init rpc_init_generic_auth(void); int __init rpcauth_init_module(void); void rpcauth_remove_module(void); -void rpc_destroy_generic_auth(void); void rpc_destroy_authunix(void); -struct rpc_cred * rpc_lookup_cred(void); -struct rpc_cred * rpc_lookup_cred_nonblock(void); -struct rpc_cred * rpc_lookup_generic_cred(struct auth_cred *, int, gfp_t); -struct rpc_cred * rpc_lookup_machine_cred(const char *service_name); int rpcauth_register(const struct rpc_authops *); int rpcauth_unregister(const struct rpc_authops *); struct rpc_auth * rpcauth_create(const struct rpc_auth_create_args *, @@ -187,7 +164,6 @@ int rpcauth_list_flavors(rpc_authflavor_t *, int); struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int, gfp_t); void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); -struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int); void put_rpccred(struct rpc_cred *); __be32 * rpcauth_marshcred(struct rpc_task *, __be32 *); __be32 * rpcauth_checkverf(struct rpc_task *, __be32 *); @@ -200,9 +176,6 @@ int rpcauth_uptodatecred(struct rpc_task *); int rpcauth_init_credcache(struct rpc_auth *); void rpcauth_destroy_credcache(struct rpc_auth *); void rpcauth_clear_credcache(struct rpc_cred_cache *); -int rpcauth_key_timeout_notify(struct rpc_auth *, - struct rpc_cred *); -bool rpcauth_cred_key_to_expire(struct rpc_auth *, struct rpc_cred *); char * rpcauth_stringify_acceptor(struct rpc_cred *); static inline @@ -213,21 +186,5 @@ struct rpc_cred *get_rpccred(struct rpc_cred *cred) return NULL; } -/** - * get_rpccred_rcu - get a reference to a cred using rcu-protected pointer - * @cred: cred of which to take a reference - * - * In some cases, we may have a pointer to a credential to which we - * want to take a reference, but don't already have one. Because these - * objects are freed using RCU, we can access the cr_count while its - * on its way to destruction and only take a reference if it's not already - * zero. - */ -static inline struct rpc_cred * -get_rpccred_rcu(struct rpc_cred *cred) -{ - return get_rpccred(cred); -} - #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_AUTH_H */ diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index 28721cf73ec3..d4229a78524a 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h @@ -47,11 +47,14 @@ void xprt_free_bc_rqst(struct rpc_rqst *req); /* * Determine if a shared backchannel is in use */ -static inline int svc_is_backchannel(const struct svc_rqst *rqstp) +static inline bool svc_is_backchannel(const struct svc_rqst *rqstp) { - if (rqstp->rq_server->sv_bc_xprt) - return 1; - return 0; + return rqstp->rq_server->sv_bc_enabled; +} + +static inline void set_bc_enabled(struct svc_serv *serv) +{ + serv->sv_bc_enabled = true; } #else /* CONFIG_SUNRPC_BACKCHANNEL */ static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, @@ -60,9 +63,13 @@ static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, return 0; } -static inline int svc_is_backchannel(const struct svc_rqst *rqstp) +static inline bool svc_is_backchannel(const struct svc_rqst *rqstp) +{ + return false; +} + +static inline void set_bc_enabled(struct svc_serv *serv) { - return 0; } static inline void xprt_free_bc_request(struct rpc_rqst *req) diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 73d5c4a870fa..1c441714d569 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -66,6 +66,7 @@ struct rpc_clnt { struct rpc_rtt cl_rtt_default; struct rpc_timeout cl_timeout_default; const struct rpc_program *cl_program; + const char * cl_principal; /* use for machine cred */ #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct dentry *cl_debugfs; /* debugfs directory */ #endif @@ -127,8 +128,8 @@ struct rpc_create_args { }; struct rpc_add_xprt_test { - int (*add_xprt_test)(struct rpc_clnt *, - struct rpc_xprt *, + void (*add_xprt_test)(struct rpc_clnt *clnt, + struct rpc_xprt *xprt, void *calldata); void *data; }; diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 7b540c066594..219aa3910a0c 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -26,7 +26,7 @@ struct rpc_message { const struct rpc_procinfo *rpc_proc; /* Procedure information */ void * rpc_argp; /* Arguments */ void * rpc_resp; /* Result */ - struct rpc_cred * rpc_cred; /* Credentials */ + const struct cred * rpc_cred; /* Credentials */ }; struct rpc_call_ops; @@ -71,6 +71,7 @@ struct rpc_task { struct rpc_clnt * tk_client; /* RPC client */ struct rpc_xprt * tk_xprt; /* Transport */ + struct rpc_cred * tk_op_cred; /* cred being operated on */ struct rpc_rqst * tk_rqstp; /* RPC request */ @@ -105,6 +106,7 @@ struct rpc_task_setup { struct rpc_task *task; struct rpc_clnt *rpc_client; struct rpc_xprt *rpc_xprt; + struct rpc_cred *rpc_op_cred; /* credential being operated on */ const struct rpc_message *rpc_message; const struct rpc_call_ops *callback_ops; void *callback_data; @@ -118,6 +120,7 @@ struct rpc_task_setup { */ #define RPC_TASK_ASYNC 0x0001 /* is an async task */ #define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */ +#define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */ #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ #define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ @@ -131,7 +134,6 @@ struct rpc_task_setup { #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) -#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) #define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) #define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT)) #define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN) diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 73e130a840ce..e52385340b3b 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -109,7 +109,7 @@ struct svc_serv { spinlock_t sv_cb_lock; /* protects the svc_cb_list */ wait_queue_head_t sv_cb_waitq; /* sleep here if there are no * entries in the svc_cb_list */ - struct svc_xprt *sv_bc_xprt; /* callback on fore channel */ + bool sv_bc_enabled; /* service uses backchannel */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */ }; @@ -295,9 +295,12 @@ struct svc_rqst { struct svc_cacherep * rq_cacherep; /* cache info */ struct task_struct *rq_task; /* service thread */ spinlock_t rq_lock; /* per-request lock */ + struct net *rq_bc_net; /* pointer to backchannel's + * net namespace + */ }; -#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net) +#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net) /* * Rigorous type checking on sockaddr type conversions diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index e6e26918504c..981f0d726ad4 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -135,6 +135,7 @@ struct svc_rdma_recv_ctxt { u32 rc_byte_len; unsigned int rc_page_count; unsigned int rc_hdr_count; + u32 rc_inv_rkey; struct page *rc_pages[RPCSVC_MAXPAGES]; }; @@ -192,7 +193,6 @@ extern int svc_rdma_sendto(struct svc_rqst *); extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); extern void svc_sq_reap(struct svcxprt_rdma *); extern void svc_rq_reap(struct svcxprt_rdma *); -extern void svc_rdma_prep_reply_hdr(struct svc_rqst *); extern struct svc_xprt_class svc_rdma_class; #ifdef CONFIG_SUNRPC_BACKCHANNEL diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 6b7a86c4d6e6..b3f9577e17d6 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -20,7 +20,6 @@ struct svc_xprt_ops { struct svc_xprt *(*xpo_accept)(struct svc_xprt *); int (*xpo_has_wspace)(struct svc_xprt *); int (*xpo_recvfrom)(struct svc_rqst *); - void (*xpo_prep_reply_hdr)(struct svc_rqst *); int (*xpo_sendto)(struct svc_rqst *); void (*xpo_release_rqst)(struct svc_rqst *); void (*xpo_detach)(struct svc_xprt *); diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 43106ffa6788..2ec128060239 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -72,7 +72,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) buf->head[0].iov_base = start; buf->head[0].iov_len = len; buf->tail[0].iov_len = 0; - buf->bvec = NULL; buf->pages = NULL; buf->page_len = 0; buf->flags = 0; diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index a4ab4f8d9140..ad7e910b119d 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -157,7 +157,6 @@ struct rpc_xprt_ops { void (*inject_disconnect)(struct rpc_xprt *xprt); int (*bc_setup)(struct rpc_xprt *xprt, unsigned int min_reqs); - int (*bc_up)(struct svc_serv *serv, struct net *net); size_t (*bc_maxpayload)(struct rpc_xprt *xprt); void (*bc_free_rqst)(struct rpc_rqst *rqst); void (*bc_destroy)(struct rpc_xprt *xprt, diff --git a/include/linux/swap.h b/include/linux/swap.h index d8a07a4f171d..622025ac1461 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -18,6 +18,8 @@ struct notifier_block; struct bio; +struct pagevec; + #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ #define SWAP_FLAG_PRIO_MASK 0x7fff #define SWAP_FLAG_PRIO_SHIFT 0 @@ -233,7 +235,6 @@ struct swap_info_struct { unsigned long flags; /* SWP_USED etc: see above */ signed short prio; /* swap priority of this type */ struct plist_node list; /* entry in swap_active_head */ - struct plist_node avail_lists[MAX_NUMNODES];/* entry in swap_avail_heads */ signed char type; /* strange name for an index */ unsigned int max; /* extent of the swap_map */ unsigned char *swap_map; /* vmalloc'ed array of usage counts */ @@ -274,6 +275,16 @@ struct swap_info_struct { */ struct work_struct discard_work; /* discard worker */ struct swap_cluster_list discard_clusters; /* discard clusters list */ + struct plist_node avail_lists[0]; /* + * entries in swap_avail_heads, one + * entry per node. + * Must be last as the number of the + * array is nr_node_ids, which is not + * a fixed value so have to allocate + * dynamically. + * And it has to be an array so that + * plist_for_each_* can work. + */ }; #ifdef CONFIG_64BIT @@ -308,7 +319,6 @@ void workingset_update_node(struct xa_node *node); } while (0) /* linux/mm/page_alloc.c */ -extern unsigned long totalram_pages; extern unsigned long totalreserve_pages; extern unsigned long nr_free_buffer_pages(void); extern unsigned long nr_free_pagecache_pages(void); @@ -358,18 +368,12 @@ extern unsigned long vm_total_pages; extern int node_reclaim_mode; extern int sysctl_min_unmapped_ratio; extern int sysctl_min_slab_ratio; -extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); #else #define node_reclaim_mode 0 -static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, - unsigned int order) -{ - return 0; -} #endif extern int page_evictable(struct page *page); -extern void check_move_unevictable_pages(struct page **, int nr_pages); +extern void check_move_unevictable_pages(struct pagevec *pvec); extern int kswapd_run(int nid); extern void kswapd_stop(int nid); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index a387b59640a4..7c007ed7505f 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -16,8 +16,6 @@ enum swiotlb_force { SWIOTLB_NO_FORCE, /* swiotlb=noforce */ }; -extern enum swiotlb_force swiotlb_force; - /* * Maximum allowable number of contiguous slabs to map, * must be a power of 2. What is the appropriate value ? @@ -46,9 +44,6 @@ enum dma_sync_target { SYNC_FOR_DEVICE = 1, }; -/* define the last possible byte of physical address space as a mapping error */ -#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0) - extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, phys_addr_t phys, size_t size, @@ -65,56 +60,44 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev, size_t size, enum dma_data_direction dir, enum dma_sync_target target); -/* Accessory functions. */ - -extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs); -extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs); - -extern int -swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, - enum dma_data_direction dir, - unsigned long attrs); - -extern void -swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, - unsigned long attrs); - -extern void -swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir); - -extern void -swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir); - -extern void -swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir); - -extern void -swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir); - extern int swiotlb_dma_supported(struct device *hwdev, u64 mask); #ifdef CONFIG_SWIOTLB -extern void __init swiotlb_exit(void); +extern enum swiotlb_force swiotlb_force; +extern phys_addr_t io_tlb_start, io_tlb_end; + +static inline bool is_swiotlb_buffer(phys_addr_t paddr) +{ + return paddr >= io_tlb_start && paddr < io_tlb_end; +} + +bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr, + size_t size, enum dma_data_direction dir, unsigned long attrs); +void __init swiotlb_exit(void); unsigned int swiotlb_max_segment(void); #else -static inline void swiotlb_exit(void) { } -static inline unsigned int swiotlb_max_segment(void) { return 0; } -#endif +#define swiotlb_force SWIOTLB_NO_FORCE +static inline bool is_swiotlb_buffer(phys_addr_t paddr) +{ + return false; +} +static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys, + dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + return false; +} +static inline void swiotlb_exit(void) +{ +} +static inline unsigned int swiotlb_max_segment(void) +{ + return 0; +} +#endif /* CONFIG_SWIOTLB */ extern void swiotlb_print_info(void); extern void swiotlb_set_max_segment(unsigned int); -extern const struct dma_map_ops swiotlb_dma_ops; - #endif /* __LINUX_SWIOTLB_H */ diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h index ab400af6f0ce..eee0412bdf4b 100644 --- a/include/linux/switchtec.h +++ b/include/linux/switchtec.h @@ -29,6 +29,7 @@ #define SWITCHTEC_EVENT_EN_IRQ BIT(3) #define SWITCHTEC_EVENT_FATAL BIT(4) +#define SWITCHTEC_DMA_MRPC_EN BIT(0) enum { SWITCHTEC_GAS_MRPC_OFFSET = 0x0000, SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000, @@ -46,6 +47,10 @@ struct mrpc_regs { u32 cmd; u32 status; u32 ret_value; + u32 dma_en; + u64 dma_addr; + u32 dma_vector; + u32 dma_ver; } __packed; enum mrpc_status { @@ -342,6 +347,14 @@ struct pff_csr_regs { struct switchtec_ntb; +struct dma_mrpc_output { + u32 status; + u32 cmd_id; + u32 rtn_code; + u32 output_size; + u8 data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; +}; + struct switchtec_dev { struct pci_dev *pdev; struct device dev; @@ -381,6 +394,9 @@ struct switchtec_dev { u8 link_event_count[SWITCHTEC_MAX_PFF_CSR]; struct switchtec_ntb *sndev; + + struct dma_mrpc_output *dma_mrpc; + dma_addr_t dma_mrpc_dma_addr; }; static inline struct switchtec_dev *to_stdev(struct device *dev) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 2ac3d13a915b..257cccba3062 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -296,12 +296,18 @@ asmlinkage long sys_io_getevents(aio_context_t ctx_id, long min_nr, long nr, struct io_event __user *events, - struct timespec __user *timeout); + struct __kernel_timespec __user *timeout); asmlinkage long sys_io_pgetevents(aio_context_t ctx_id, long min_nr, long nr, struct io_event __user *events, - struct timespec __user *timeout, + struct __kernel_timespec __user *timeout, + const struct __aio_sigset *sig); +asmlinkage long sys_io_pgetevents_time32(aio_context_t ctx_id, + long min_nr, + long nr, + struct io_event __user *events, + struct old_timespec32 __user *timeout, const struct __aio_sigset *sig); /* fs/xattr.c */ @@ -466,10 +472,16 @@ asmlinkage long sys_sendfile64(int out_fd, int in_fd, /* fs/select.c */ asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *, - fd_set __user *, struct timespec __user *, + fd_set __user *, struct __kernel_timespec __user *, + void __user *); +asmlinkage long sys_pselect6_time32(int, fd_set __user *, fd_set __user *, + fd_set __user *, struct old_timespec32 __user *, void __user *); asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, - struct timespec __user *, const sigset_t __user *, + struct __kernel_timespec __user *, const sigset_t __user *, + size_t); +asmlinkage long sys_ppoll_time32(struct pollfd __user *, unsigned int, + struct old_timespec32 __user *, const sigset_t __user *, size_t); /* fs/signalfd.c */ @@ -541,7 +553,7 @@ asmlinkage long sys_unshare(unsigned long unshare_flags); /* kernel/futex.c */ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, - struct timespec __user *utime, u32 __user *uaddr2, + struct __kernel_timespec __user *utime, u32 __user *uaddr2, u32 val3); asmlinkage long sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, @@ -637,6 +649,10 @@ asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese, siginfo_t __user *uinfo, const struct __kernel_timespec __user *uts, size_t sigsetsize); +asmlinkage long sys_rt_sigtimedwait_time32(const sigset_t __user *uthese, + siginfo_t __user *uinfo, + const struct old_timespec32 __user *uts, + size_t sigsetsize); asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo); /* kernel/sys.c */ @@ -831,6 +847,9 @@ asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int); asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, unsigned int vlen, unsigned flags, struct __kernel_timespec __user *timeout); +asmlinkage long sys_recvmmsg_time32(int fd, struct mmsghdr __user *msg, + unsigned int vlen, unsigned flags, + struct old_timespec32 __user *timeout); asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, int options, struct rusage __user *ru); @@ -879,7 +898,7 @@ asmlinkage long sys_renameat2(int olddfd, const char __user *oldname, int newdfd, const char __user *newname, unsigned int flags); asmlinkage long sys_seccomp(unsigned int op, unsigned int flags, - const char __user *uargs); + void __user *uargs); asmlinkage long sys_getrandom(char __user *buf, size_t count, unsigned int flags); asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags); diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 987cefa337de..786816cf4aa5 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -234,7 +234,7 @@ int __must_check sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); int __must_check sysfs_create_files(struct kobject *kobj, - const struct attribute **attr); + const struct attribute * const *attr); int __must_check sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, umode_t mode); struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, @@ -243,7 +243,7 @@ void sysfs_unbreak_active_protection(struct kernfs_node *kn); void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); -void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr); +void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr); int __must_check sysfs_create_bin_file(struct kobject *kobj, const struct bin_attribute *attr); @@ -342,7 +342,7 @@ static inline int sysfs_create_file_ns(struct kobject *kobj, } static inline int sysfs_create_files(struct kobject *kobj, - const struct attribute **attr) + const struct attribute * const *attr) { return 0; } @@ -377,7 +377,7 @@ static inline bool sysfs_remove_file_self(struct kobject *kobj, } static inline void sysfs_remove_files(struct kobject *kobj, - const struct attribute **attr) + const struct attribute * const *attr) { } diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index b9626aa7e90c..3e2a80cc7b56 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -39,12 +39,13 @@ struct t10_pi_tuple { static inline u32 t10_pi_ref_tag(struct request *rq) { + unsigned int shift = ilog2(queue_logical_block_size(rq->q)); + #ifdef CONFIG_BLK_DEV_INTEGRITY - return blk_rq_pos(rq) >> - (rq->q->integrity.interval_exp - 9) & 0xffffffff; -#else - return -1U; + if (rq->q->integrity.interval_exp) + shift = rq->q->integrity.interval_exp; #endif + return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; } extern const struct blk_integrity_profile t10_pi_type1_crc; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 8ed77bb4ed86..a9b0280687d5 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -196,6 +196,7 @@ struct tcp_sock { u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ + u32 compressed_ack_rcv_nxt; u32 tsoffset; /* timestamp offset */ diff --git a/include/linux/thinkpad_acpi.h b/include/linux/thinkpad_acpi.h deleted file mode 100644 index 9fb317970c01..000000000000 --- a/include/linux/thinkpad_acpi.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __THINKPAD_ACPI_H__ -#define __THINKPAD_ACPI_H__ - -/* These two functions return 0 if success, or negative error code - (e g -ENODEV if no led present) */ - -enum { - TPACPI_LED_MUTE, - TPACPI_LED_MICMUTE, - TPACPI_LED_MAX, -}; - -int tpacpi_led_set(int whichled, bool on); - -#endif diff --git a/include/linux/time32.h b/include/linux/time32.h index 61904a6c098f..118b9977080c 100644 --- a/include/linux/time32.h +++ b/include/linux/time32.h @@ -96,31 +96,6 @@ static inline int timespec_compare(const struct timespec *lhs, const struct time return lhs->tv_nsec - rhs->tv_nsec; } -extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); - -static inline struct timespec timespec_add(struct timespec lhs, - struct timespec rhs) -{ - struct timespec ts_delta; - - set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec, - lhs.tv_nsec + rhs.tv_nsec); - return ts_delta; -} - -/* - * sub = lhs - rhs, in normalized form - */ -static inline struct timespec timespec_sub(struct timespec lhs, - struct timespec rhs) -{ - struct timespec ts_delta; - - set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec, - lhs.tv_nsec - rhs.tv_nsec); - return ts_delta; -} - /* * Returns true if the timespec is norm, false if denorm: */ diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 29975e93fcb8..a8ab0f143ac4 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -262,18 +262,4 @@ void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock, struct timespec64 *boot_offset); extern int update_persistent_clock64(struct timespec64 now); -/* - * deprecated aliases, don't use in new code - */ -#define getnstimeofday64(ts) ktime_get_real_ts64(ts) - -static inline struct timespec64 current_kernel_time64(void) -{ - struct timespec64 ts; - - ktime_get_coarse_real_ts64(&ts); - - return ts; -} - #endif diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h index a502616f7e1c..cc59cc9e0e84 100644 --- a/include/linux/timekeeping32.h +++ b/include/linux/timekeeping32.h @@ -6,15 +6,6 @@ * over time so we can remove the file here. */ -static inline void do_gettimeofday(struct timeval *tv) -{ - struct timespec64 now; - - ktime_get_real_ts64(&now); - tv->tv_sec = now.tv_sec; - tv->tv_usec = now.tv_nsec/1000; -} - static inline unsigned long get_seconds(void) { return ktime_get_real_seconds(); @@ -52,10 +43,4 @@ static inline void getboottime(struct timespec *ts) *ts = timespec64_to_timespec(ts64); } -/* - * Persistent clock related interfaces - */ -extern void read_persistent_clock(struct timespec *ts); -extern int update_persistent_clock(struct timespec now); - #endif diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 4609b94142d4..b49a55cf775f 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -53,8 +53,8 @@ struct tpm_class_ops { #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) extern int tpm_is_tpm2(struct tpm_chip *chip); -extern int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf); -extern int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash); +extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf); +extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash); extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen); extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max); extern int tpm_seal_trusted(struct tpm_chip *chip, @@ -69,15 +69,18 @@ static inline int tpm_is_tpm2(struct tpm_chip *chip) { return -ENODEV; } -static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) + +static inline int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf) { return -ENODEV; } -static inline int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, + +static inline int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash) { return -ENODEV; } + static inline int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen) { return -ENODEV; diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 4130a5497d40..8a62731673f7 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -471,7 +471,8 @@ void perf_event_detach_bpf_prog(struct perf_event *event); int perf_event_query_prog_array(struct perf_event *event, void __user *info); int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog); int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog); -struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name); +struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name); +void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, u64 *probe_addr); @@ -502,10 +503,13 @@ static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf { return -EOPNOTSUPP; } -static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name) +static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) { return NULL; } +static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) +{ +} static inline int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 40b0b4c1bf7b..df20f8bdbfa3 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -83,8 +83,8 @@ static inline int ptrace_report_syscall(struct pt_regs *regs) * tracehook_report_syscall_entry - task is about to attempt a system call * @regs: user register state of current task * - * This will be called if %TIF_SYSCALL_TRACE has been set, when the - * current task has just entered the kernel for a system call. + * This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set, + * when the current task has just entered the kernel for a system call. * Full user register state is available here. Changing the values * in @regs can affect the system call number and arguments to be tried. * It is safe to block here, preventing the system call from beginning. diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 538ba1a58f5b..9c3186578ce0 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -82,7 +82,7 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb) static inline void tracepoint_synchronize_unregister(void) { synchronize_srcu(&tracepoint_srcu); - synchronize_sched(); + synchronize_rcu(); } #else static inline void tracepoint_synchronize_unregister(void) @@ -166,7 +166,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) struct tracepoint_func *it_func_ptr; \ void *it_func; \ void *__data; \ - int __maybe_unused idx = 0; \ + int __maybe_unused __idx = 0; \ \ if (!(cond)) \ return; \ @@ -182,7 +182,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * doesn't work from the idle path. \ */ \ if (rcuidle) { \ - idx = srcu_read_lock_notrace(&tracepoint_srcu); \ + __idx = srcu_read_lock_notrace(&tracepoint_srcu);\ rcu_irq_enter_irqson(); \ } \ \ @@ -198,7 +198,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) \ if (rcuidle) { \ rcu_irq_exit_irqson(); \ - srcu_read_unlock_notrace(&tracepoint_srcu, idx);\ + srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\ } \ \ preempt_enable_notrace(); \ diff --git a/include/linux/tty.h b/include/linux/tty.h index 414db2bce715..bfa4e2ee94a9 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -366,6 +366,7 @@ struct tty_file_private { #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ #define TTY_HUPPED 18 /* Post driver->hangup() */ #define TTY_HUPPING 19 /* Hangup in progress */ +#define TTY_LDISC_CHANGING 20 /* Change pending - non-block IO */ #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ /* Values for tty->flow_change */ @@ -383,6 +384,12 @@ static inline void tty_set_flow_change(struct tty_struct *tty, int val) smp_mb(); } +static inline bool tty_io_nonblock(struct tty_struct *tty, struct file *file) +{ + return file->f_flags & O_NONBLOCK || + test_bit(TTY_LDISC_CHANGING, &tty->flags); +} + static inline bool tty_io_error(struct tty_struct *tty) { return test_bit(TTY_IO_ERROR, &tty->flags); @@ -556,6 +563,7 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); extern void tty_release_struct(struct tty_struct *tty, int idx); extern int tty_release(struct inode *inode, struct file *filp); extern void tty_init_termios(struct tty_struct *tty); +extern void tty_save_termios(struct tty_struct *tty); extern int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty); diff --git a/include/linux/types.h b/include/linux/types.h index 9834e90aa010..c2615d6a019e 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -212,8 +212,8 @@ struct ustat { * weird ABI and we need to ask it explicitly. * * The alignment is required to guarantee that bit 0 of @next will be - * clear under normal conditions -- as long as we use call_rcu(), - * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback. + * clear under normal conditions -- as long as we use call_rcu() or + * call_srcu() to queue the callback. * * This guarantee is important for few reasons: * - future call_rcu_lazy() will make use of lower bits in the pointer; diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index efe79c1cdd47..37b226e8df13 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -6,9 +6,6 @@ #include <linux/thread_info.h> #include <linux/kasan-checks.h> -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) #include <asm/uaccess.h> @@ -111,7 +108,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; might_fault(); - if (likely(access_ok(VERIFY_READ, from, n))) { + if (likely(access_ok(from, n))) { kasan_check_write(to, n); res = raw_copy_from_user(to, from, n); } @@ -129,7 +126,7 @@ static inline unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); - if (access_ok(VERIFY_WRITE, to, n)) { + if (access_ok(to, n)) { kasan_check_read(from, n); n = raw_copy_to_user(to, from, n); } @@ -160,7 +157,7 @@ static __always_inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n) { might_fault(); - if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) + if (access_ok(to, n) && access_ok(from, n)) n = raw_copy_in_user(to, from, n); return n; } @@ -267,7 +264,7 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); probe_kernel_read(&retval, addr, sizeof(retval)) #ifndef user_access_begin -#define user_access_begin() do { } while (0) +#define user_access_begin(ptr,len) access_ok(ptr, len) #define user_access_end() do { } while (0) #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) diff --git a/include/linux/udp.h b/include/linux/udp.h index 320d49d85484..2725c83395bf 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -49,7 +49,13 @@ struct udp_sock { unsigned int corkflag; /* Cork is required */ __u8 encap_type; /* Is this an Encapsulation socket? */ unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ - no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */ + no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */ + encap_enabled:1, /* This socket enabled encap + * processing; UDP tunnels and + * different encapsulation layer set + * this + */ + gro_enabled:1; /* Can accept GRO packets */ /* * Following member retains the information to create a UDP header * when the socket is uncorked. @@ -71,6 +77,7 @@ struct udp_sock { * For encapsulation sockets. */ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); + int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb); void (*encap_destroy)(struct sock *sk); /* GRO functions for UDP socket */ @@ -115,6 +122,23 @@ static inline bool udp_get_no_check6_rx(struct sock *sk) return udp_sk(sk)->no_check6_rx; } +static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb) +{ + int gso_size; + + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + gso_size = skb_shinfo(skb)->gso_size; + put_cmsg(msg, SOL_UDP, UDP_GRO, sizeof(gso_size), &gso_size); + } +} + +static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb) +{ + return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) && + skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4; +} + #define udp_portaddr_for_each_entry(__sk, list) \ hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node) diff --git a/include/linux/uio.h b/include/linux/uio.h index 55ce99ddb912..ecf584f6b82d 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/thread_info.h> +#include <crypto/hash.h> #include <uapi/linux/uio.h> struct page; @@ -266,9 +267,11 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) { i->count = count; } -size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); +size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); +size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, + struct iov_iter *i); int import_iovec(int type, const struct iovec __user * uvector, unsigned nr_segs, unsigned fast_segs, diff --git a/include/linux/usb.h b/include/linux/usb.h index 4cdd515a4385..5e49e82c4368 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -407,11 +407,11 @@ struct usb_host_bos { }; int __usb_get_extra_descriptor(char *buffer, unsigned size, - unsigned char type, void **ptr); + unsigned char type, void **ptr, size_t min); #define usb_get_extra_descriptor(ifpoint, type, ptr) \ __usb_get_extra_descriptor((ifpoint)->extra, \ (ifpoint)->extralen, \ - type, (void **)ptr) + type, (void **)ptr, sizeof(**(ptr))) /* ----------------------------------------------------------------------- */ diff --git a/include/linux/usb/ccid.h b/include/linux/usb/ccid.h new file mode 100644 index 000000000000..3431446d6864 --- /dev/null +++ b/include/linux/usb/ccid.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018 Vincent Pelletier + */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef __CCID_H +#define __CCID_H + +#include <linux/types.h> + +#define USB_INTERFACE_CLASS_CCID 0x0b + +struct ccid_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __le16 bcdCCID; + __u8 bMaxSlotIndex; + __u8 bVoltageSupport; + __le32 dwProtocols; + __le32 dwDefaultClock; + __le32 dwMaximumClock; + __u8 bNumClockSupported; + __le32 dwDataRate; + __le32 dwMaxDataRate; + __u8 bNumDataRatesSupported; + __le32 dwMaxIFSD; + __le32 dwSynchProtocols; + __le32 dwMechanical; + __le32 dwFeatures; + __le32 dwMaxCCIDMessageLength; + __u8 bClassGetResponse; + __u8 bClassEnvelope; + __le16 wLcdLayout; + __u8 bPINSupport; + __u8 bMaxCCIDBusySlots; +} __attribute__ ((packed)); + +#endif /* __CCID_H */ diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index 63758c399e4e..911e05af671e 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -60,9 +60,12 @@ struct ci_hdrc_platform_data { #define CI_HDRC_OVERRIDE_RX_BURST BIT(11) #define CI_HDRC_OVERRIDE_PHY_CONTROL BIT(12) /* Glue layer manages phy */ #define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13) +#define CI_HDRC_IMX_IS_HSIC BIT(14) enum usb_dr_mode dr_mode; #define CI_HDRC_CONTROLLER_RESET_EVENT 0 #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 +#define CI_HDRC_IMX_HSIC_ACTIVE_EVENT 2 +#define CI_HDRC_IMX_HSIC_SUSPEND_EVENT 3 int (*notify_event) (struct ci_hdrc *ci, unsigned event); struct regulator *reg_vbus; struct usb_otg_caps ci_otg_caps; diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index e5cd84a0f84a..7595056b96c1 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -61,6 +61,8 @@ struct usb_ep; * invalidated by the error may first be dequeued. * @context: For use by the completion callback * @list: For use by the gadget driver. + * @frame_number: Reports the interval number in (micro)frame in which the + * isochronous transfer was transmitted or received. * @status: Reports completion code, zero or a negative errno. * Normally, faults block the transfer queue from advancing until * the completion callback returns. @@ -112,6 +114,8 @@ struct usb_request { void *context; struct list_head list; + unsigned frame_number; /* ISO ONLY */ + int status; unsigned actual; }; diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 97e2ddec18b1..7dc3a411bece 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -235,11 +235,6 @@ static inline struct usb_hcd *bus_to_hcd(struct usb_bus *bus) return container_of(bus, struct usb_hcd, self); } -struct hcd_timeout { /* timeouts we allocate */ - struct list_head timeout_list; - struct timer_list timer; -}; - /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index b7a99ce56bc9..a1be64c9940f 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -66,4 +66,7 @@ /* Device needs a pause after every control message. */ #define USB_QUIRK_DELAY_CTRL_MSG BIT(13) +/* Hub needs extra delay after resetting its port. */ +#define USB_QUIRK_HUB_SLOW_RESET BIT(14) + #endif /* __LINUX_USB_QUIRKS_H */ diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index 7e7fbfb84e8e..50c74a77db55 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -89,6 +89,7 @@ struct tcpc_config { enum typec_port_data data; enum typec_role default_role; bool try_role_hw; /* try.{src,snk} implemented in hardware */ + bool self_powered; /* port belongs to a self powered device */ const struct typec_altmode_desc *alt_modes; }; diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index f25cef84b41d..2db8d60981fe 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -239,11 +239,6 @@ extern unsigned long node_page_state(struct pglist_data *pgdat, #define node_page_state(node, item) global_node_page_state(item) #endif /* CONFIG_NUMA */ -#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) -#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) -#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d) -#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d)) - #ifdef CONFIG_SMP void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); void __inc_zone_page_state(struct page *, enum zone_stat_item); diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 44985c4a1e86..417d9f37077a 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -90,9 +90,6 @@ struct watchdog_ops { * * The driver-data field may not be accessed directly. It must be accessed * via the watchdog_set_drvdata and watchdog_get_drvdata helpers. - * - * The lock field is for watchdog core internal use only and should not be - * touched. */ struct watchdog_device { int id; diff --git a/include/linux/writeback.h b/include/linux/writeback.h index fdfd04e348f6..738a0c24874f 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -246,7 +246,8 @@ static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, * * @bio is a part of the writeback in progress controlled by @wbc. Perform * writeback specific initialization. This is used to apply the cgroup - * writeback context. + * writeback context. Must be called after the bio has been associated with + * a device. */ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) { @@ -257,7 +258,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) * regular writeback instead of writing things out itself. */ if (wbc->wb) - bio_associate_blkcg(bio, wbc->wb->blkcg_css); + bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css); } #else /* CONFIG_CGROUP_WRITEBACK */ diff --git a/include/linux/xarray.h b/include/linux/xarray.h index d9514928ddac..f492e21c4aa2 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -289,9 +289,7 @@ struct xarray { void xa_init_flags(struct xarray *, gfp_t flags); void *xa_load(struct xarray *, unsigned long index); void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); -void *xa_cmpxchg(struct xarray *, unsigned long index, - void *old, void *entry, gfp_t); -int xa_reserve(struct xarray *, unsigned long index, gfp_t); +void *xa_erase(struct xarray *, unsigned long index); void *xa_store_range(struct xarray *, unsigned long first, unsigned long last, void *entry, gfp_t); bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -344,65 +342,6 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) } /** - * xa_erase() - Erase this entry from the XArray. - * @xa: XArray. - * @index: Index of entry. - * - * This function is the equivalent of calling xa_store() with %NULL as - * the third argument. The XArray does not need to allocate memory, so - * the user does not need to provide GFP flags. - * - * Context: Process context. Takes and releases the xa_lock. - * Return: The entry which used to be at this index. - */ -static inline void *xa_erase(struct xarray *xa, unsigned long index) -{ - return xa_store(xa, index, NULL, 0); -} - -/** - * xa_insert() - Store this entry in the XArray unless another entry is - * already present. - * @xa: XArray. - * @index: Index into array. - * @entry: New entry. - * @gfp: Memory allocation flags. - * - * If you would rather see the existing entry in the array, use xa_cmpxchg(). - * This function is for users who don't care what the entry is, only that - * one is present. - * - * Context: Process context. Takes and releases the xa_lock. - * May sleep if the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. - * -ENOMEM if memory could not be allocated. - */ -static inline int xa_insert(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) -{ - void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); - if (!curr) - return 0; - if (xa_is_err(curr)) - return xa_err(curr); - return -EEXIST; -} - -/** - * xa_release() - Release a reserved entry. - * @xa: XArray. - * @index: Index of entry. - * - * After calling xa_reserve(), you can call this function to release the - * reservation. If the entry at @index has been stored to, this function - * will do nothing. - */ -static inline void xa_release(struct xarray *xa, unsigned long index) -{ - xa_cmpxchg(xa, index, NULL, NULL, 0); -} - -/** * xa_for_each() - Iterate over a portion of an XArray. * @xa: XArray. * @entry: Entry retrieved from array. @@ -455,6 +394,7 @@ void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, void *entry, gfp_t); int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); +int __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -487,6 +427,58 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index, } /** + * xa_store_bh() - Store this entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * This function is like calling xa_store() except it disables softirqs + * while holding the array lock. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. + * Return: The entry which used to be at this index. + */ +static inline void *xa_store_bh(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock_bh(xa); + curr = __xa_store(xa, index, entry, gfp); + xa_unlock_bh(xa); + + return curr; +} + +/** + * xa_store_irq() - Erase this entry from the XArray. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * This function is like calling xa_store() except it disables interrupts + * while holding the array lock. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. + * Return: The entry which used to be at this index. + */ +static inline void *xa_store_irq(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock_irq(xa); + curr = __xa_store(xa, index, entry, gfp); + xa_unlock_irq(xa); + + return curr; +} + +/** * xa_erase_bh() - Erase this entry from the XArray. * @xa: XArray. * @index: Index of entry. @@ -495,7 +487,7 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index, * the third argument. The XArray does not need to allocate memory, so * the user does not need to provide GFP flags. * - * Context: Process context. Takes and releases the xa_lock while + * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. * Return: The entry which used to be at this index. */ @@ -535,6 +527,115 @@ static inline void *xa_erase_irq(struct xarray *xa, unsigned long index) } /** + * xa_cmpxchg() - Conditionally replace an entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @old: Old value to test against. + * @entry: New value to place in array. + * @gfp: Memory allocation flags. + * + * If the entry at @index is the same as @old, replace it with @entry. + * If the return value is equal to @old, then the exchange was successful. + * + * Context: Any context. Takes and releases the xa_lock. May sleep + * if the @gfp flags permit. + * Return: The old value at this index or xa_err() if an error happened. + */ +static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock(xa); + curr = __xa_cmpxchg(xa, index, old, entry, gfp); + xa_unlock(xa); + + return curr; +} + +/** + * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @old: Old value to test against. + * @entry: New value to place in array. + * @gfp: Memory allocation flags. + * + * This function is like calling xa_cmpxchg() except it disables softirqs + * while holding the array lock. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: The old value at this index or xa_err() if an error happened. + */ +static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock_bh(xa); + curr = __xa_cmpxchg(xa, index, old, entry, gfp); + xa_unlock_bh(xa); + + return curr; +} + +/** + * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @old: Old value to test against. + * @entry: New value to place in array. + * @gfp: Memory allocation flags. + * + * This function is like calling xa_cmpxchg() except it disables interrupts + * while holding the array lock. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: The old value at this index or xa_err() if an error happened. + */ +static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock_irq(xa); + curr = __xa_cmpxchg(xa, index, old, entry, gfp); + xa_unlock_irq(xa); + + return curr; +} + +/** + * xa_insert() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * If you would rather see the existing entry in the array, use xa_cmpxchg(). + * This function is for users who don't care what the entry is, only that + * one is present. + * + * Context: Process context. Takes and releases the xa_lock. + * May sleep if the @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int xa_insert(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); + if (!curr) + return 0; + if (xa_is_err(curr)) + return xa_err(curr); + return -EEXIST; +} + +/** * xa_alloc() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. @@ -575,7 +676,7 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, * Updates the @id pointer with the index, then stores the entry at that * index. A concurrent lookup will not see an uninitialised @id. * - * Context: Process context. Takes and releases the xa_lock while + * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. May sleep if the @gfp flags permit. * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if * there is no more space in the XArray. @@ -621,6 +722,98 @@ static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry, return err; } +/** + * xa_reserve() - Reserve this index in the XArray. + * @xa: XArray. + * @index: Index into array. + * @gfp: Memory allocation flags. + * + * Ensures there is somewhere to store an entry at @index in the array. + * If there is already something stored at @index, this function does + * nothing. If there was nothing there, the entry is marked as reserved. + * Loading from a reserved entry returns a %NULL pointer. + * + * If you do not use the entry that you have reserved, call xa_release() + * or xa_erase() to free any unnecessary memory. + * + * Context: Any context. Takes and releases the xa_lock. + * May sleep if the @gfp flags permit. + * Return: 0 if the reservation succeeded or -ENOMEM if it failed. + */ +static inline +int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + int ret; + + xa_lock(xa); + ret = __xa_reserve(xa, index, gfp); + xa_unlock(xa); + + return ret; +} + +/** + * xa_reserve_bh() - Reserve this index in the XArray. + * @xa: XArray. + * @index: Index into array. + * @gfp: Memory allocation flags. + * + * A softirq-disabling version of xa_reserve(). + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. + * Return: 0 if the reservation succeeded or -ENOMEM if it failed. + */ +static inline +int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + int ret; + + xa_lock_bh(xa); + ret = __xa_reserve(xa, index, gfp); + xa_unlock_bh(xa); + + return ret; +} + +/** + * xa_reserve_irq() - Reserve this index in the XArray. + * @xa: XArray. + * @index: Index into array. + * @gfp: Memory allocation flags. + * + * An interrupt-disabling version of xa_reserve(). + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. + * Return: 0 if the reservation succeeded or -ENOMEM if it failed. + */ +static inline +int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + int ret; + + xa_lock_irq(xa); + ret = __xa_reserve(xa, index, gfp); + xa_unlock_irq(xa); + + return ret; +} + +/** + * xa_release() - Release a reserved entry. + * @xa: XArray. + * @index: Index of entry. + * + * After calling xa_reserve(), you can call this function to release the + * reservation. If the entry at @index has been stored to, this function + * will do nothing. + */ +static inline void xa_release(struct xarray *xa, unsigned long index) +{ + xa_cmpxchg(xa, index, NULL, NULL, 0); +} + /* Everything below here is the Advanced API. Proceed with caution. */ /* diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h index 9e1f42cb57e9..52b073fea17f 100644 --- a/include/linux/xxhash.h +++ b/include/linux/xxhash.h @@ -107,6 +107,29 @@ uint32_t xxh32(const void *input, size_t length, uint32_t seed); */ uint64_t xxh64(const void *input, size_t length, uint64_t seed); +/** + * xxhash() - calculate wordsize hash of the input with a given seed + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * If the hash does not need to be comparable between machines with + * different word sizes, this function will call whichever of xxh32() + * or xxh64() is faster. + * + * Return: wordsize hash of the data. + */ + +static inline unsigned long xxhash(const void *input, size_t length, + uint64_t seed) +{ +#if BITS_PER_LONG == 64 + return xxh64(input, length, seed); +#else + return xxh32(input, length, seed); +#endif +} + /*-**************************** * Streaming Hash Functions *****************************/ diff --git a/include/math-emu/op-2.h b/include/math-emu/op-2.h index 4f26ecc1411b..13a374f51a22 100644 --- a/include/math-emu/op-2.h +++ b/include/math-emu/op-2.h @@ -31,61 +31,56 @@ #define _FP_FRAC_HIGH_2(X) (X##_f1) #define _FP_FRAC_LOW_2(X) (X##_f0) #define _FP_FRAC_WORD_2(X,w) (X##_f##w) +#define _FP_FRAC_SLL_2(X, N) ( \ + (void) (((N) < _FP_W_TYPE_SIZE) \ + ? ({ \ + if (__builtin_constant_p(N) && (N) == 1) { \ + X##_f1 = X##_f1 + X##_f1 + \ + (((_FP_WS_TYPE) (X##_f0)) < 0); \ + X##_f0 += X##_f0; \ + } else { \ + X##_f1 = X##_f1 << (N) | X##_f0 >> \ + (_FP_W_TYPE_SIZE - (N)); \ + X##_f0 <<= (N); \ + } \ + 0; \ + }) \ + : ({ \ + X##_f1 = X##_f0 << ((N) - _FP_W_TYPE_SIZE); \ + X##_f0 = 0; \ + }))) + + +#define _FP_FRAC_SRL_2(X, N) ( \ + (void) (((N) < _FP_W_TYPE_SIZE) \ + ? ({ \ + X##_f0 = X##_f0 >> (N) | X##_f1 << (_FP_W_TYPE_SIZE - (N)); \ + X##_f1 >>= (N); \ + }) \ + : ({ \ + X##_f0 = X##_f1 >> ((N) - _FP_W_TYPE_SIZE); \ + X##_f1 = 0; \ + }))) -#define _FP_FRAC_SLL_2(X,N) \ - do { \ - if ((N) < _FP_W_TYPE_SIZE) \ - { \ - if (__builtin_constant_p(N) && (N) == 1) \ - { \ - X##_f1 = X##_f1 + X##_f1 + (((_FP_WS_TYPE)(X##_f0)) < 0); \ - X##_f0 += X##_f0; \ - } \ - else \ - { \ - X##_f1 = X##_f1 << (N) | X##_f0 >> (_FP_W_TYPE_SIZE - (N)); \ - X##_f0 <<= (N); \ - } \ - } \ - else \ - { \ - X##_f1 = X##_f0 << ((N) - _FP_W_TYPE_SIZE); \ - X##_f0 = 0; \ - } \ - } while (0) - -#define _FP_FRAC_SRL_2(X,N) \ - do { \ - if ((N) < _FP_W_TYPE_SIZE) \ - { \ - X##_f0 = X##_f0 >> (N) | X##_f1 << (_FP_W_TYPE_SIZE - (N)); \ - X##_f1 >>= (N); \ - } \ - else \ - { \ - X##_f0 = X##_f1 >> ((N) - _FP_W_TYPE_SIZE); \ - X##_f1 = 0; \ - } \ - } while (0) /* Right shift with sticky-lsb. */ -#define _FP_FRAC_SRS_2(X,N,sz) \ - do { \ - if ((N) < _FP_W_TYPE_SIZE) \ - { \ - X##_f0 = (X##_f1 << (_FP_W_TYPE_SIZE - (N)) | X##_f0 >> (N) | \ - (__builtin_constant_p(N) && (N) == 1 \ - ? X##_f0 & 1 \ - : (X##_f0 << (_FP_W_TYPE_SIZE - (N))) != 0)); \ - X##_f1 >>= (N); \ - } \ - else \ - { \ - X##_f0 = (X##_f1 >> ((N) - _FP_W_TYPE_SIZE) | \ - (((X##_f1 << (2*_FP_W_TYPE_SIZE - (N))) | X##_f0) != 0)); \ - X##_f1 = 0; \ - } \ - } while (0) +#define _FP_FRAC_SRS_2(X, N, sz) ( \ + (void) (((N) < _FP_W_TYPE_SIZE) \ + ? ({ \ + X##_f0 = (X##_f1 << (_FP_W_TYPE_SIZE - (N)) | X##_f0 >> (N) \ + | (__builtin_constant_p(N) && (N) == 1 \ + ? X##_f0 & 1 \ + : (X##_f0 << (_FP_W_TYPE_SIZE - (N))) != 0)); \ + X##_f1 >>= (N); \ + }) \ + : ({ \ + X##_f0 = (X##_f1 >> ((N) - _FP_W_TYPE_SIZE) \ + | ((((N) == _FP_W_TYPE_SIZE \ + ? 0 \ + : (X##_f1 << (2*_FP_W_TYPE_SIZE - (N)))) \ + | X##_f0) != 0)); \ + X##_f1 = 0; \ + }))) #define _FP_FRAC_ADDI_2(X,I) \ __FP_FRAC_ADDI_2(X##_f1, X##_f0, I) diff --git a/include/math-emu/soft-fp.h b/include/math-emu/soft-fp.h index 3f284bc03180..5650c1628383 100644 --- a/include/math-emu/soft-fp.h +++ b/include/math-emu/soft-fp.h @@ -138,7 +138,7 @@ do { \ _FP_FRAC_ADDI_##wc(X, _FP_WORK_ROUND); \ } while (0) -#define _FP_ROUND_ZERO(wc, X) 0 +#define _FP_ROUND_ZERO(wc, X) (void)0 #define _FP_ROUND_PINF(wc, X) \ do { \ diff --git a/include/media/cec.h b/include/media/cec.h index 3fe5e5d2bb7e..707411ef8ba2 100644 --- a/include/media/cec.h +++ b/include/media/cec.h @@ -155,6 +155,7 @@ struct cec_adapter { unsigned int transmit_queue_sz; struct list_head wait_queue; struct cec_data *transmitting; + bool transmit_in_progress; struct task_struct *kthread_config; struct completion config_completion; diff --git a/include/media/davinci/vpbe.h b/include/media/davinci/vpbe.h index 79a566d7defd..5c31a7682492 100644 --- a/include/media/davinci/vpbe.h +++ b/include/media/davinci/vpbe.h @@ -100,10 +100,6 @@ struct vpbe_config { struct vpbe_device; struct vpbe_device_ops { - /* crop cap for the display */ - int (*g_cropcap)(struct vpbe_device *vpbe_dev, - struct v4l2_cropcap *cropcap); - /* Enumerate the outputs */ int (*enum_outputs)(struct vpbe_device *vpbe_dev, struct v4l2_output *output); diff --git a/include/media/media-request.h b/include/media/media-request.h index 0ce75c35131f..bd36d7431698 100644 --- a/include/media/media-request.h +++ b/include/media/media-request.h @@ -68,7 +68,7 @@ struct media_request { unsigned int access_count; struct list_head objects; unsigned int num_incomplete_objects; - struct wait_queue_head poll_wait; + wait_queue_head_t poll_wait; spinlock_t lock; }; diff --git a/include/media/mpeg2-ctrls.h b/include/media/mpeg2-ctrls.h new file mode 100644 index 000000000000..d21f40edc09e --- /dev/null +++ b/include/media/mpeg2-ctrls.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * These are the MPEG2 state controls for use with stateless MPEG-2 + * codec drivers. + * + * It turns out that these structs are not stable yet and will undergo + * more changes. So keep them private until they are stable and ready to + * become part of the official public API. + */ + +#ifndef _MPEG2_CTRLS_H_ +#define _MPEG2_CTRLS_H_ + +#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250) +#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251) + +/* enum v4l2_ctrl_type type values */ +#define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103 +#define V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104 + +#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1 +#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2 +#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3 +#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4 + +struct v4l2_mpeg2_sequence { + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */ + __u16 horizontal_size; + __u16 vertical_size; + __u32 vbv_buffer_size; + + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */ + __u8 profile_and_level_indication; + __u8 progressive_sequence; + __u8 chroma_format; + __u8 pad; +}; + +struct v4l2_mpeg2_picture { + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */ + __u8 picture_coding_type; + + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */ + __u8 f_code[2][2]; + __u8 intra_dc_precision; + __u8 picture_structure; + __u8 top_field_first; + __u8 frame_pred_frame_dct; + __u8 concealment_motion_vectors; + __u8 q_scale_type; + __u8 intra_vlc_format; + __u8 alternate_scan; + __u8 repeat_first_field; + __u8 progressive_frame; + __u8 pad; +}; + +struct v4l2_ctrl_mpeg2_slice_params { + __u32 bit_size; + __u32 data_bit_offset; + + struct v4l2_mpeg2_sequence sequence; + struct v4l2_mpeg2_picture picture; + + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */ + __u8 quantiser_scale_code; + + __u8 backward_ref_index; + __u8 forward_ref_index; + __u8 pad; +}; + +struct v4l2_ctrl_mpeg2_quantization { + /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */ + __u8 load_intra_quantiser_matrix; + __u8 load_non_intra_quantiser_matrix; + __u8 load_chroma_intra_quantiser_matrix; + __u8 load_chroma_non_intra_quantiser_matrix; + + __u8 intra_quantiser_matrix[64]; + __u8 non_intra_quantiser_matrix[64]; + __u8 chroma_intra_quantiser_matrix[64]; + __u8 chroma_non_intra_quantiser_matrix[64]; +}; + +#endif diff --git a/include/media/rc-map.h b/include/media/rc-map.h index bfa3017cecba..d621acadfbf3 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -277,6 +277,7 @@ struct rc_map *rc_map_get(const char *name); #define RC_MAP_WINFAST "rc-winfast" #define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe" #define RC_MAP_SU3000 "rc-su3000" +#define RC_MAP_XBOX_DVD "rc-xbox-dvd" #define RC_MAP_ZX_IRDEC "rc-zx-irdec" /* diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index 82715645617b..0c511ed8ffb0 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -396,4 +396,9 @@ int v4l2_g_parm_cap(struct video_device *vdev, int v4l2_s_parm_cap(struct video_device *vdev, struct v4l2_subdev *sd, struct v4l2_streamparm *a); +/* Compare two v4l2_fract structs */ +#define V4L2_FRACT_COMPARE(a, OP, b) \ + ((u64)(a).numerator * (b).denominator OP \ + (u64)(b).numerator * (a).denominator) + #endif /* V4L2_COMMON_H_ */ diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index 83ce0593b275..d63cf227b0ab 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -22,6 +22,12 @@ #include <linux/videodev2.h> #include <media/media-request.h> +/* + * Include the mpeg2 stateless codec compound control definitions. + * This will move to the public headers once this API is fully stable. + */ +#include <media/mpeg2-ctrls.h> + /* forward references */ struct file; struct v4l2_ctrl_handler; diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 456ac13eca1d..48531e57cc5a 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -74,10 +74,19 @@ struct v4l2_ctrl_handler; * indicates that file->private_data points to &struct v4l2_fh. * This flag is set by the core when v4l2_fh_init() is called. * All new drivers should use it. + * @V4L2_FL_QUIRK_INVERTED_CROP: + * some old M2M drivers use g/s_crop/cropcap incorrectly: crop and + * compose are swapped. If this flag is set, then the selection + * targets are swapped in the g/s_crop/cropcap functions in v4l2-ioctl.c. + * This allows those drivers to correctly implement the selection API, + * but the old crop API will still work as expected in order to preserve + * backwards compatibility. + * Never set this flag for new drivers. */ enum v4l2_video_device_flags { - V4L2_FL_REGISTERED = 0, - V4L2_FL_USES_V4L2_FH = 1, + V4L2_FL_REGISTERED = 0, + V4L2_FL_USES_V4L2_FH = 1, + V4L2_FL_QUIRK_INVERTED_CROP = 2, }; /* Priority helper functions */ diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h index 5848d92c30da..8533ece5026e 100644 --- a/include/media/v4l2-ioctl.h +++ b/include/media/v4l2-ioctl.h @@ -48,6 +48,9 @@ struct v4l2_fh; * @vidioc_enum_fmt_meta_cap: pointer to the function that implements * :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic * for metadata capture + * @vidioc_enum_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic + * for metadata output * @vidioc_g_fmt_vid_cap: pointer to the function that implements * :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for video capture * in single plane mode @@ -80,6 +83,8 @@ struct v4l2_fh; * Radio output * @vidioc_g_fmt_meta_cap: pointer to the function that implements * :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for metadata capture + * @vidioc_g_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for metadata output * @vidioc_s_fmt_vid_cap: pointer to the function that implements * :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for video capture * in single plane mode @@ -112,6 +117,8 @@ struct v4l2_fh; * Radio output * @vidioc_s_fmt_meta_cap: pointer to the function that implements * :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for metadata capture + * @vidioc_s_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for metadata output * @vidioc_try_fmt_vid_cap: pointer to the function that implements * :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for video capture * in single plane mode @@ -146,6 +153,8 @@ struct v4l2_fh; * Radio output * @vidioc_try_fmt_meta_cap: pointer to the function that implements * :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for metadata capture + * @vidioc_try_fmt_meta_out: pointer to the function that implements + * :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for metadata output * @vidioc_reqbufs: pointer to the function that implements * :ref:`VIDIOC_REQBUFS <vidioc_reqbufs>` ioctl * @vidioc_querybuf: pointer to the function that implements @@ -220,12 +229,8 @@ struct v4l2_fh; * :ref:`VIDIOC_G_MODULATOR <vidioc_g_modulator>` ioctl * @vidioc_s_modulator: pointer to the function that implements * :ref:`VIDIOC_S_MODULATOR <vidioc_g_modulator>` ioctl - * @vidioc_cropcap: pointer to the function that implements - * :ref:`VIDIOC_CROPCAP <vidioc_cropcap>` ioctl - * @vidioc_g_crop: pointer to the function that implements - * :ref:`VIDIOC_G_CROP <vidioc_g_crop>` ioctl - * @vidioc_s_crop: pointer to the function that implements - * :ref:`VIDIOC_S_CROP <vidioc_g_crop>` ioctl + * @vidioc_g_pixelaspect: pointer to the function that implements + * the pixelaspect part of the :ref:`VIDIOC_CROPCAP <vidioc_cropcap>` ioctl * @vidioc_g_selection: pointer to the function that implements * :ref:`VIDIOC_G_SELECTION <vidioc_g_selection>` ioctl * @vidioc_s_selection: pointer to the function that implements @@ -318,6 +323,8 @@ struct v4l2_ioctl_ops { struct v4l2_fmtdesc *f); int (*vidioc_enum_fmt_meta_cap)(struct file *file, void *fh, struct v4l2_fmtdesc *f); + int (*vidioc_enum_fmt_meta_out)(struct file *file, void *fh, + struct v4l2_fmtdesc *f); /* VIDIOC_G_FMT handlers */ int (*vidioc_g_fmt_vid_cap)(struct file *file, void *fh, @@ -346,6 +353,8 @@ struct v4l2_ioctl_ops { struct v4l2_format *f); int (*vidioc_g_fmt_meta_cap)(struct file *file, void *fh, struct v4l2_format *f); + int (*vidioc_g_fmt_meta_out)(struct file *file, void *fh, + struct v4l2_format *f); /* VIDIOC_S_FMT handlers */ int (*vidioc_s_fmt_vid_cap)(struct file *file, void *fh, @@ -374,6 +383,8 @@ struct v4l2_ioctl_ops { struct v4l2_format *f); int (*vidioc_s_fmt_meta_cap)(struct file *file, void *fh, struct v4l2_format *f); + int (*vidioc_s_fmt_meta_out)(struct file *file, void *fh, + struct v4l2_format *f); /* VIDIOC_TRY_FMT handlers */ int (*vidioc_try_fmt_vid_cap)(struct file *file, void *fh, @@ -402,6 +413,8 @@ struct v4l2_ioctl_ops { struct v4l2_format *f); int (*vidioc_try_fmt_meta_cap)(struct file *file, void *fh, struct v4l2_format *f); + int (*vidioc_try_fmt_meta_out)(struct file *file, void *fh, + struct v4l2_format *f); /* Buffer handlers */ int (*vidioc_reqbufs)(struct file *file, void *fh, @@ -491,12 +504,8 @@ struct v4l2_ioctl_ops { int (*vidioc_s_modulator)(struct file *file, void *fh, const struct v4l2_modulator *a); /* Crop ioctls */ - int (*vidioc_cropcap)(struct file *file, void *fh, - struct v4l2_cropcap *a); - int (*vidioc_g_crop)(struct file *file, void *fh, - struct v4l2_crop *a); - int (*vidioc_s_crop)(struct file *file, void *fh, - const struct v4l2_crop *a); + int (*vidioc_g_pixelaspect)(struct file *file, void *fh, + int buf_type, struct v4l2_fract *aspect); int (*vidioc_g_selection)(struct file *file, void *fh, struct v4l2_selection *s); int (*vidioc_s_selection)(struct file *file, void *fh, diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index 58c1ecf3d648..5467264771ec 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h @@ -624,7 +624,7 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) /* v4l2 request helper */ -void vb2_m2m_request_queue(struct media_request *req); +void v4l2_m2m_request_queue(struct media_request *req); /* v4l2 ioctl helpers */ diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 9102d6ca566e..47af609dc8f1 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -776,7 +776,11 @@ struct v4l2_subdev_internal_ops { #define V4L2_SUBDEV_FL_IS_SPI (1U << 1) /* Set this flag if this subdev needs a device node. */ #define V4L2_SUBDEV_FL_HAS_DEVNODE (1U << 2) -/* Set this flag if this subdev generates events. */ +/* + * Set this flag if this subdev generates events. + * Note controls can send events, thus drivers exposing controls + * should set this flag. + */ #define V4L2_SUBDEV_FL_HAS_EVENTS (1U << 3) struct regulator_bulk_data; diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index e86981d615ae..4a737b2c610b 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -239,6 +239,7 @@ struct vb2_queue; * @num_planes: number of planes in the buffer * on an internal driver queue. * @timestamp: frame timestamp in ns. + * @request: the request this buffer is associated with. * @req_obj: used to bind this buffer to a request. This * request object has a refcount. */ @@ -249,6 +250,7 @@ struct vb2_buffer { unsigned int memory; unsigned int num_planes; u64 timestamp; + struct media_request *request; struct media_request_object req_obj; /* private: internal use only diff --git a/include/net/act_api.h b/include/net/act_api.h index 05c7df41d737..dbc795ec659e 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -194,35 +194,5 @@ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, #endif } -#ifdef CONFIG_NET_CLS_ACT -int tc_setup_cb_egdev_register(const struct net_device *dev, - tc_setup_cb_t *cb, void *cb_priv); -void tc_setup_cb_egdev_unregister(const struct net_device *dev, - tc_setup_cb_t *cb, void *cb_priv); -int tc_setup_cb_egdev_call(const struct net_device *dev, - enum tc_setup_type type, void *type_data, - bool err_stop); -#else -static inline -int tc_setup_cb_egdev_register(const struct net_device *dev, - tc_setup_cb_t *cb, void *cb_priv) -{ - return 0; -} - -static inline -void tc_setup_cb_egdev_unregister(const struct net_device *dev, - tc_setup_cb_t *cb, void *cb_priv) -{ -} - -static inline -int tc_setup_cb_egdev_call(const struct net_device *dev, - enum tc_setup_type type, void *type_data, - bool err_stop) -{ - return 0; -} -#endif #endif diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 14b789a123e7..1656c5978498 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -317,6 +317,8 @@ bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, const struct in6_addr *addr); bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev, const struct in6_addr *addr); +int ipv6_anycast_init(void); +void ipv6_anycast_cleanup(void); /* Device notifier */ int register_inet6addr_notifier(struct notifier_block *nb); diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index de587948042a..1adefe42c0a6 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -77,7 +77,8 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *, struct sockaddr_rxrpc *, struct key *); int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *, enum rxrpc_call_completion *, u32 *); -u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *); +u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); +void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *, ktime_t *); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 1fa41b7a1be3..e0c41eb1c860 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -777,8 +777,10 @@ struct cfg80211_crypto_settings { * @probe_resp: probe response template (AP mode only) * @ftm_responder: enable FTM responder functionality; -1 for no change * (which also implies no change in LCI/civic location data) - * @lci: LCI subelement content - * @civicloc: Civic location subelement content + * @lci: Measurement Report element content, starting with Measurement Token + * (measurement type 8) + * @civicloc: Measurement Report element content, starting with Measurement + * Token (measurement type 11) * @lci_len: LCI data length * @civicloc_len: Civic location data length */ @@ -1296,6 +1298,7 @@ struct cfg80211_tid_stats { * @rx_beacon: number of beacons received from this peer * @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received * from this peer + * @connected_to_gate: true if mesh STA has a path to mesh gate * @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer * @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last * (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs. @@ -1350,6 +1353,8 @@ struct station_info { u64 rx_beacon; u64 rx_duration; u8 rx_beacon_signal_avg; + u8 connected_to_gate; + struct cfg80211_tid_stats *pertid; s8 ack_signal; s8 avg_ack_signal; @@ -1559,6 +1564,10 @@ struct bss_parameters { * @plink_timeout: If no tx activity is seen from a STA we've established * peering with for longer than this time (in seconds), then remove it * from the STA's list of peers. Default is 30 minutes. + * @dot11MeshConnectedToMeshGate: if set to true, advertise that this STA is + * connected to a mesh gate in mesh formation info. If false, the + * value in mesh formation is determined by the presence of root paths + * in the mesh path table */ struct mesh_config { u16 dot11MeshRetryTimeout; @@ -1578,6 +1587,7 @@ struct mesh_config { u16 dot11MeshHWMPperrMinInterval; u16 dot11MeshHWMPnetDiameterTraversalTime; u8 dot11MeshHWMPRootMode; + bool dot11MeshConnectedToMeshGate; u16 dot11MeshHWMPRannInterval; bool dot11MeshGateAnnouncementProtocol; bool dot11MeshForwarding; @@ -2815,7 +2825,7 @@ struct cfg80211_external_auth_params { }; /** - * cfg80211_ftm_responder_stats - FTM responder statistics + * struct cfg80211_ftm_responder_stats - FTM responder statistics * * @filled: bitflag of flags using the bits of &enum nl80211_ftm_stats to * indicate the relevant values in this struct for them @@ -2849,6 +2859,190 @@ struct cfg80211_ftm_responder_stats { }; /** + * struct cfg80211_pmsr_ftm_result - FTM result + * @failure_reason: if this measurement failed (PMSR status is + * %NL80211_PMSR_STATUS_FAILURE), this gives a more precise + * reason than just "failure" + * @burst_index: if reporting partial results, this is the index + * in [0 .. num_bursts-1] of the burst that's being reported + * @num_ftmr_attempts: number of FTM request frames transmitted + * @num_ftmr_successes: number of FTM request frames acked + * @busy_retry_time: if failure_reason is %NL80211_PMSR_FTM_FAILURE_PEER_BUSY, + * fill this to indicate in how many seconds a retry is deemed possible + * by the responder + * @num_bursts_exp: actual number of bursts exponent negotiated + * @burst_duration: actual burst duration negotiated + * @ftms_per_burst: actual FTMs per burst negotiated + * @lci_len: length of LCI information (if present) + * @civicloc_len: length of civic location information (if present) + * @lci: LCI data (may be %NULL) + * @civicloc: civic location data (may be %NULL) + * @rssi_avg: average RSSI over FTM action frames reported + * @rssi_spread: spread of the RSSI over FTM action frames reported + * @tx_rate: bitrate for transmitted FTM action frame response + * @rx_rate: bitrate of received FTM action frame + * @rtt_avg: average of RTTs measured (must have either this or @dist_avg) + * @rtt_variance: variance of RTTs measured (note that standard deviation is + * the square root of the variance) + * @rtt_spread: spread of the RTTs measured + * @dist_avg: average of distances (mm) measured + * (must have either this or @rtt_avg) + * @dist_variance: variance of distances measured (see also @rtt_variance) + * @dist_spread: spread of distances measured (see also @rtt_spread) + * @num_ftmr_attempts_valid: @num_ftmr_attempts is valid + * @num_ftmr_successes_valid: @num_ftmr_successes is valid + * @rssi_avg_valid: @rssi_avg is valid + * @rssi_spread_valid: @rssi_spread is valid + * @tx_rate_valid: @tx_rate is valid + * @rx_rate_valid: @rx_rate is valid + * @rtt_avg_valid: @rtt_avg is valid + * @rtt_variance_valid: @rtt_variance is valid + * @rtt_spread_valid: @rtt_spread is valid + * @dist_avg_valid: @dist_avg is valid + * @dist_variance_valid: @dist_variance is valid + * @dist_spread_valid: @dist_spread is valid + */ +struct cfg80211_pmsr_ftm_result { + const u8 *lci; + const u8 *civicloc; + unsigned int lci_len; + unsigned int civicloc_len; + enum nl80211_peer_measurement_ftm_failure_reasons failure_reason; + u32 num_ftmr_attempts, num_ftmr_successes; + s16 burst_index; + u8 busy_retry_time; + u8 num_bursts_exp; + u8 burst_duration; + u8 ftms_per_burst; + s32 rssi_avg; + s32 rssi_spread; + struct rate_info tx_rate, rx_rate; + s64 rtt_avg; + s64 rtt_variance; + s64 rtt_spread; + s64 dist_avg; + s64 dist_variance; + s64 dist_spread; + + u16 num_ftmr_attempts_valid:1, + num_ftmr_successes_valid:1, + rssi_avg_valid:1, + rssi_spread_valid:1, + tx_rate_valid:1, + rx_rate_valid:1, + rtt_avg_valid:1, + rtt_variance_valid:1, + rtt_spread_valid:1, + dist_avg_valid:1, + dist_variance_valid:1, + dist_spread_valid:1; +}; + +/** + * struct cfg80211_pmsr_result - peer measurement result + * @addr: address of the peer + * @host_time: host time (use ktime_get_boottime() adjust to the time when the + * measurement was made) + * @ap_tsf: AP's TSF at measurement time + * @status: status of the measurement + * @final: if reporting partial results, mark this as the last one; if not + * reporting partial results always set this flag + * @ap_tsf_valid: indicates the @ap_tsf value is valid + * @type: type of the measurement reported, note that we only support reporting + * one type at a time, but you can report multiple results separately and + * they're all aggregated for userspace. + */ +struct cfg80211_pmsr_result { + u64 host_time, ap_tsf; + enum nl80211_peer_measurement_status status; + + u8 addr[ETH_ALEN]; + + u8 final:1, + ap_tsf_valid:1; + + enum nl80211_peer_measurement_type type; + + union { + struct cfg80211_pmsr_ftm_result ftm; + }; +}; + +/** + * struct cfg80211_pmsr_ftm_request_peer - FTM request data + * @requested: indicates FTM is requested + * @preamble: frame preamble to use + * @burst_period: burst period to use + * @asap: indicates to use ASAP mode + * @num_bursts_exp: number of bursts exponent + * @burst_duration: burst duration + * @ftms_per_burst: number of FTMs per burst + * @ftmr_retries: number of retries for FTM request + * @request_lci: request LCI information + * @request_civicloc: request civic location information + * + * See also nl80211 for the respective attribute documentation. + */ +struct cfg80211_pmsr_ftm_request_peer { + enum nl80211_preamble preamble; + u16 burst_period; + u8 requested:1, + asap:1, + request_lci:1, + request_civicloc:1; + u8 num_bursts_exp; + u8 burst_duration; + u8 ftms_per_burst; + u8 ftmr_retries; +}; + +/** + * struct cfg80211_pmsr_request_peer - peer data for a peer measurement request + * @addr: MAC address + * @chandef: channel to use + * @report_ap_tsf: report the associated AP's TSF + * @ftm: FTM data, see &struct cfg80211_pmsr_ftm_request_peer + */ +struct cfg80211_pmsr_request_peer { + u8 addr[ETH_ALEN]; + struct cfg80211_chan_def chandef; + u8 report_ap_tsf:1; + struct cfg80211_pmsr_ftm_request_peer ftm; +}; + +/** + * struct cfg80211_pmsr_request - peer measurement request + * @cookie: cookie, set by cfg80211 + * @nl_portid: netlink portid - used by cfg80211 + * @drv_data: driver data for this request, if required for aborting, + * not otherwise freed or anything by cfg80211 + * @mac_addr: MAC address used for (randomised) request + * @mac_addr_mask: MAC address mask used for randomisation, bits that + * are 0 in the mask should be randomised, bits that are 1 should + * be taken from the @mac_addr + * @list: used by cfg80211 to hold on to the request + * @timeout: timeout (in milliseconds) for the whole operation, if + * zero it means there's no timeout + * @n_peers: number of peers to do measurements with + * @peers: per-peer measurement request data + */ +struct cfg80211_pmsr_request { + u64 cookie; + void *drv_data; + u32 n_peers; + u32 nl_portid; + + u32 timeout; + + u8 mac_addr[ETH_ALEN] __aligned(2); + u8 mac_addr_mask[ETH_ALEN] __aligned(2); + + struct list_head list; + + struct cfg80211_pmsr_request_peer peers[]; +}; + +/** * struct cfg80211_ops - backend description for wireless configuration * * This struct is registered by fullmac card drivers and/or wireless stacks @@ -3183,6 +3377,8 @@ struct cfg80211_ftm_responder_stats { * * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available. * Statistics should be cumulative, currently no way to reset is provided. + * @start_pmsr: start peer measurement (e.g. FTM) + * @abort_pmsr: abort peer measurement */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -3492,6 +3688,11 @@ struct cfg80211_ops { int (*get_ftm_responder_stats)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ftm_responder_stats *ftm_stats); + + int (*start_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_pmsr_request *request); + void (*abort_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_pmsr_request *request); }; /* @@ -3864,6 +4065,42 @@ struct wiphy_iftype_ext_capab { }; /** + * struct cfg80211_pmsr_capabilities - cfg80211 peer measurement capabilities + * @max_peers: maximum number of peers in a single measurement + * @report_ap_tsf: can report assoc AP's TSF for radio resource measurement + * @randomize_mac_addr: can randomize MAC address for measurement + * @ftm.supported: FTM measurement is supported + * @ftm.asap: ASAP-mode is supported + * @ftm.non_asap: non-ASAP-mode is supported + * @ftm.request_lci: can request LCI data + * @ftm.request_civicloc: can request civic location data + * @ftm.preambles: bitmap of preambles supported (&enum nl80211_preamble) + * @ftm.bandwidths: bitmap of bandwidths supported (&enum nl80211_chan_width) + * @ftm.max_bursts_exponent: maximum burst exponent supported + * (set to -1 if not limited; note that setting this will necessarily + * forbid using the value 15 to let the responder pick) + * @ftm.max_ftms_per_burst: maximum FTMs per burst supported (set to 0 if + * not limited) + */ +struct cfg80211_pmsr_capabilities { + unsigned int max_peers; + u8 report_ap_tsf:1, + randomize_mac_addr:1; + + struct { + u32 preambles; + u32 bandwidths; + s8 max_bursts_exponent; + u8 max_ftms_per_burst; + u8 supported:1, + asap:1, + non_asap:1, + request_lci:1, + request_civicloc:1; + } ftm; +}; + +/** * struct wiphy - wireless hardware description * @reg_notifier: the driver's regulatory notification callback, * note that if your driver uses wiphy_apply_custom_regulatory() @@ -4027,6 +4264,8 @@ struct wiphy_iftype_ext_capab { * @txq_limit: configuration of internal TX queue frame limit * @txq_memory_limit: configuration internal TX queue memory limit * @txq_quantum: configuration of internal TX queue scheduler quantum + * + * @pmsr_capa: peer measurement capabilities */ struct wiphy { /* assign these fields before you register the wiphy */ @@ -4163,6 +4402,8 @@ struct wiphy { u32 txq_memory_limit; u32 txq_quantum; + const struct cfg80211_pmsr_capabilities *pmsr_capa; + char priv[0] __aligned(NETDEV_ALIGN); }; @@ -4365,6 +4606,9 @@ struct cfg80211_cqm_config; * @owner_nlportid: (private) owner socket port ID * @nl_owner_dead: (private) owner socket went away * @cqm_config: (private) nl80211 RSSI monitor state + * @pmsr_list: (private) peer measurement requests + * @pmsr_lock: (private) peer measurements requests/results lock + * @pmsr_free_wk: (private) peer measurements cleanup work */ struct wireless_dev { struct wiphy *wiphy; @@ -4436,6 +4680,10 @@ struct wireless_dev { #endif struct cfg80211_cqm_config *cqm_config; + + struct list_head pmsr_list; + spinlock_t pmsr_lock; + struct work_struct pmsr_free_wk; }; static inline u8 *wdev_address(struct wireless_dev *wdev) @@ -5328,7 +5576,8 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, * cfg80211 then sends a notification to userspace. */ void cfg80211_notify_new_peer_candidate(struct net_device *dev, - const u8 *macaddr, const u8 *ie, u8 ie_len, gfp_t gfp); + const u8 *macaddr, const u8 *ie, u8 ie_len, + int sig_dbm, gfp_t gfp); /** * DOC: RFkill integration @@ -6630,6 +6879,31 @@ int cfg80211_external_auth_request(struct net_device *netdev, struct cfg80211_external_auth_params *params, gfp_t gfp); +/** + * cfg80211_pmsr_report - report peer measurement result data + * @wdev: the wireless device reporting the measurement + * @req: the original measurement request + * @result: the result data + * @gfp: allocation flags + */ +void cfg80211_pmsr_report(struct wireless_dev *wdev, + struct cfg80211_pmsr_request *req, + struct cfg80211_pmsr_result *result, + gfp_t gfp); + +/** + * cfg80211_pmsr_complete - report peer measurement completed + * @wdev: the wireless device reporting the measurement + * @req: the original measurement request + * @gfp: allocation flags + * + * Report that the entire measurement completed, after this + * the request pointer will no longer be valid. + */ +void cfg80211_pmsr_complete(struct wireless_dev *wdev, + struct cfg80211_pmsr_request *req, + gfp_t gfp); + /* Logging, debugging and troubleshooting/diagnostic helpers. */ /* wiphy_printk helpers, similar to dev_printk */ diff --git a/include/net/checksum.h b/include/net/checksum.h index aef2b2bb6603..0f319e13be2c 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -30,7 +30,7 @@ static inline __wsum csum_and_copy_from_user (const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { - if (access_ok(VERIFY_READ, src, len)) + if (access_ok(src, len)) return csum_partial_copy_from_user(src, dst, len, sum, err_ptr); if (len) @@ -46,7 +46,7 @@ static __inline__ __wsum csum_and_copy_to_user { sum = csum_partial(src, len, sum); - if (access_ok(VERIFY_WRITE, dst, len)) { + if (access_ok(dst, len)) { if (copy_to_user(dst, src, len) == 0) return sum; } diff --git a/include/net/devlink.h b/include/net/devlink.h index 45db0c79462d..67f4293bc970 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -365,6 +365,7 @@ enum devlink_param_generic_id { DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, + DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, /* add new param generic ids above here*/ __DEVLINK_PARAM_GENERIC_ID_MAX, @@ -392,6 +393,9 @@ enum devlink_param_generic_id { #define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME "msix_vec_per_pf_min" #define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE DEVLINK_PARAM_TYPE_U32 +#define DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME "fw_load_policy" +#define DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE DEVLINK_PARAM_TYPE_U8 + #define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \ { \ .id = DEVLINK_PARAM_GENERIC_ID_##_id, \ diff --git a/include/net/dsa.h b/include/net/dsa.h index 23690c44e167..b3eefe8e18fd 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -36,7 +36,7 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_DSA, DSA_TAG_PROTO_EDSA, DSA_TAG_PROTO_GSWIP, - DSA_TAG_PROTO_KSZ, + DSA_TAG_PROTO_KSZ9477, DSA_TAG_PROTO_LAN9303, DSA_TAG_PROTO_MTK, DSA_TAG_PROTO_QCA, @@ -113,6 +113,7 @@ struct dsa_device_ops { struct packet_type *pt); int (*flow_dissect)(const struct sk_buff *skb, __be16 *proto, int *offset); + unsigned int overhead; }; struct dsa_switch_tree { diff --git a/include/net/flow.h b/include/net/flow.h index 8ce21793094e..93f2c9a0f098 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -38,8 +38,8 @@ struct flowi_common { #define FLOWI_FLAG_KNOWN_NH 0x02 #define FLOWI_FLAG_SKIP_NH_OIF 0x04 __u32 flowic_secid; - struct flowi_tunnel flowic_tun_key; kuid_t flowic_uid; + struct flowi_tunnel flowic_tun_key; }; union flowi_uli { diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 6a4586dcdede..2b26979efb48 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -209,8 +209,8 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ FLOW_DISSECTOR_KEY_TIPC, /* struct flow_dissector_key_tipc */ FLOW_DISSECTOR_KEY_ARP, /* struct flow_dissector_key_arp */ - FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */ - FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */ + FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_vlan */ + FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_tags */ FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */ FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */ FLOW_DISSECTOR_KEY_ENC_KEYID, /* struct flow_dissector_key_keyid */ @@ -221,7 +221,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */ FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */ FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */ - FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */ + FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_vlan */ FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */ FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */ diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index 946bd53a9f81..ca23860adbb9 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -10,7 +10,7 @@ struct gnet_stats_basic_cpu { struct gnet_stats_basic_packed bstats; struct u64_stats_sync syncp; -}; +} __aligned(2 * sizeof(u64)); struct net_rate_estimator; diff --git a/include/net/geneve.h b/include/net/geneve.h index a7600ed55ea3..fc6a7e0a874a 100644 --- a/include/net/geneve.h +++ b/include/net/geneve.h @@ -60,6 +60,12 @@ struct genevehdr { struct geneve_opt options[]; }; +static inline bool netif_is_geneve(const struct net_device *dev) +{ + return dev->rtnl_link_ops && + !strcmp(dev->rtnl_link_ops->kind, "geneve"); +} + #ifdef CONFIG_INET struct net_device *geneve_dev_create_fb(struct net *net, const char *name, u8 name_assign_type, u16 dst_port); diff --git a/include/net/gre.h b/include/net/gre.h index 797142eee9cd..b60f212c16c6 100644 --- a/include/net/gre.h +++ b/include/net/gre.h @@ -37,8 +37,17 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name, int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, bool *csum_err, __be16 proto, int nhs); -bool is_gretap_dev(const struct net_device *dev); -bool is_ip6gretap_dev(const struct net_device *dev); +static inline bool netif_is_gretap(const struct net_device *dev) +{ + return dev->rtnl_link_ops && + !strcmp(dev->rtnl_link_ops->kind, "gretap"); +} + +static inline bool netif_is_ip6gretap(const struct net_device *dev) +{ + return dev->rtnl_link_ops && + !strcmp(dev->rtnl_link_ops->kind, "ip6gretap"); +} static inline int gre_calc_hlen(__be16 o_flags) { diff --git a/include/net/icmp.h b/include/net/icmp.h index 3ef2743a8eec..6ac3a5bd0117 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -41,7 +41,7 @@ struct net; void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info); int icmp_rcv(struct sk_buff *skb); -void icmp_err(struct sk_buff *skb, u32 info); +int icmp_err(struct sk_buff *skb, u32 info); int icmp_init(void); void icmp_out_count(struct net *net, unsigned char type); diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index d7578cf49c3a..c9c78c15bce0 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -146,10 +146,12 @@ struct ifacaddr6 { struct in6_addr aca_addr; struct fib6_info *aca_rt; struct ifacaddr6 *aca_next; + struct hlist_node aca_addr_lst; int aca_users; refcount_t aca_refcnt; unsigned long aca_cstamp; unsigned long aca_tstamp; + struct rcu_head rcu; }; #define IFA_HOST IPV6_ADDR_LOOPBACK diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 6e91e38a31da..9db98af46985 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -115,9 +115,8 @@ int inet6_hash(struct sock *sk); ((__sk)->sk_family == AF_INET6) && \ ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \ ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \ - (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif)) || \ - ((__sk)->sk_bound_dev_if == (__sdif))) && \ + (((__sk)->sk_bound_dev_if == (__dif)) || \ + ((__sk)->sk_bound_dev_if == (__sdif))) && \ net_eq(sock_net(__sk), (__net))) #endif /* _INET6_HASHTABLES_H */ diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 3ca969cbd161..975901a95c0f 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -2,6 +2,8 @@ #ifndef _INET_COMMON_H #define _INET_COMMON_H +#include <linux/indirect_call_wrapper.h> + extern const struct proto_ops inet_stream_ops; extern const struct proto_ops inet_dgram_ops; @@ -54,4 +56,11 @@ static inline void inet_ctl_sock_destroy(struct sock *sk) sock_release(sk->sk_socket); } +#define indirect_call_gro_receive(f2, f1, cb, head, skb) \ +({ \ + unlikely(gro_recursion_inc_test(skb)) ? \ + NAPI_GRO_CB(skb)->flush |= 1, NULL : \ + INDIRECT_CALL_2(cb, f2, f1, head, skb); \ +}) + #endif diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 9141e95529e7..babb14136705 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -79,6 +79,7 @@ struct inet_ehash_bucket { struct inet_bind_bucket { possible_net_t ib_net; + int l3mdev; unsigned short port; signed char fastreuse; signed char fastreuseport; @@ -188,10 +189,21 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) hashinfo->ehash_locks = NULL; } +static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if, + int dif, int sdif) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept, + bound_dev_if, dif, sdif); +#else + return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); +#endif +} + struct inet_bind_bucket * inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, struct inet_bind_hashbucket *head, - const unsigned short snum); + const unsigned short snum, int l3mdev); void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb); @@ -225,6 +237,7 @@ void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, unsigned long numentries, int scale, unsigned long low_limit, unsigned long high_limit); +int inet_hashinfo2_init_mod(struct inet_hashinfo *h); bool inet_ehash_insert(struct sock *sk, struct sock *osk); bool inet_ehash_nolisten(struct sock *sk, struct sock *osk); @@ -282,9 +295,8 @@ static inline struct sock *inet_lookup_listener(struct net *net, #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \ (((__sk)->sk_portpair == (__ports)) && \ ((__sk)->sk_addrpair == (__cookie)) && \ - (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif)) || \ - ((__sk)->sk_bound_dev_if == (__sdif))) && \ + (((__sk)->sk_bound_dev_if == (__dif)) || \ + ((__sk)->sk_bound_dev_if == (__sdif))) && \ net_eq(sock_net(__sk), (__net))) #else /* 32-bit arch */ #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ @@ -294,9 +306,8 @@ static inline struct sock *inet_lookup_listener(struct net *net, (((__sk)->sk_portpair == (__ports)) && \ ((__sk)->sk_daddr == (__saddr)) && \ ((__sk)->sk_rcv_saddr == (__daddr)) && \ - (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif)) || \ - ((__sk)->sk_bound_dev_if == (__sdif))) && \ + (((__sk)->sk_bound_dev_if == (__dif)) || \ + ((__sk)->sk_bound_dev_if == (__sdif))) && \ net_eq(sock_net(__sk), (__net))) #endif /* 64-bit arch */ diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index a80fd0ac4563..e8eef85006aa 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -130,6 +130,27 @@ static inline int inet_request_bound_dev_if(const struct sock *sk, return sk->sk_bound_dev_if; } +static inline int inet_sk_bound_l3mdev(const struct sock *sk) +{ +#ifdef CONFIG_NET_L3_MASTER_DEV + struct net *net = sock_net(sk); + + if (!net->ipv4.sysctl_tcp_l3mdev_accept) + return l3mdev_master_ifindex_by_index(net, + sk->sk_bound_dev_if); +#endif + + return 0; +} + +static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if, + int dif, int sdif) +{ + if (!bound_dev_if) + return !sdif || l3mdev_accept; + return bound_dev_if == dif || bound_dev_if == sdif; +} + struct inet_cork { unsigned int flags; __be32 addr; diff --git a/include/net/ip.h b/include/net/ip.h index 72593e171d14..8866bfce6121 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -155,6 +155,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, void ip_list_rcv(struct list_head *head, struct packet_type *pt, struct net_device *orig_dev); int ip_local_deliver(struct sk_buff *skb); +void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto); int ip_mr_input(struct sk_buff *skb); int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb); int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb); @@ -421,7 +422,8 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk, } struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx, - int fc_mx_len); + int fc_mx_len, + struct netlink_ext_ack *extack); static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics) { if (fib_metrics != &dst_default_metrics && diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 236e40ba06bf..69b4bcf880c9 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -69,6 +69,8 @@ struct ip6_tnl_encap_ops { size_t (*encap_hlen)(struct ip_tunnel_encap *e); int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, u8 *protocol, struct flowi6 *fl6); + int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, + u8 type, u8 code, int offset, __be32 info); }; #ifdef CONFIG_INET diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index b0d022ff6ea1..34f019650941 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -144,25 +144,6 @@ struct ip_tunnel { bool ignore_df; }; -#define TUNNEL_CSUM __cpu_to_be16(0x01) -#define TUNNEL_ROUTING __cpu_to_be16(0x02) -#define TUNNEL_KEY __cpu_to_be16(0x04) -#define TUNNEL_SEQ __cpu_to_be16(0x08) -#define TUNNEL_STRICT __cpu_to_be16(0x10) -#define TUNNEL_REC __cpu_to_be16(0x20) -#define TUNNEL_VERSION __cpu_to_be16(0x40) -#define TUNNEL_NO_KEY __cpu_to_be16(0x80) -#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100) -#define TUNNEL_OAM __cpu_to_be16(0x0200) -#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400) -#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800) -#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000) -#define TUNNEL_NOCACHE __cpu_to_be16(0x2000) -#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000) - -#define TUNNEL_OPTIONS_PRESENT \ - (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT) - struct tnl_ptk_info { __be16 flags; __be16 proto; @@ -311,6 +292,7 @@ struct ip_tunnel_encap_ops { size_t (*encap_hlen)(struct ip_tunnel_encap *e); int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, u8 *protocol, struct flowi4 *fl4); + int (*err_handler)(struct sk_buff *skb, u32 info); }; #define MAX_IPTUN_ENCAP_OPS 8 @@ -326,6 +308,26 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, int ip_tunnel_encap_setup(struct ip_tunnel *t, struct ip_tunnel_encap *ipencap); +static inline bool pskb_inet_may_pull(struct sk_buff *skb) +{ + int nhlen; + + switch (skb->protocol) { +#if IS_ENABLED(CONFIG_IPV6) + case htons(ETH_P_IPV6): + nhlen = sizeof(struct ipv6hdr); + break; +#endif + case htons(ETH_P_IP): + nhlen = sizeof(struct iphdr); + break; + default: + nhlen = 0; + } + + return pskb_network_may_pull(skb, nhlen); +} + static inline int ip_encap_hlen(struct ip_tunnel_encap *e) { const struct ip_tunnel_encap_ops *ops; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 829650540780..daf80863d3a5 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -975,6 +975,8 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb); int ip6_forward(struct sk_buff *skb); int ip6_input(struct sk_buff *skb); int ip6_mc_input(struct sk_buff *skb); +void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr, + bool have_final); int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index 3832099289c5..78fa0ac4613c 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -101,6 +101,17 @@ struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev) return master; } +int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex); +static inline +int l3mdev_master_upper_ifindex_by_index(struct net *net, int ifindex) +{ + rcu_read_lock(); + ifindex = l3mdev_master_upper_ifindex_by_index_rcu(net, ifindex); + rcu_read_unlock(); + + return ifindex; +} + u32 l3mdev_fib_table_rcu(const struct net_device *dev); u32 l3mdev_fib_table_by_index(struct net *net, int ifindex); static inline u32 l3mdev_fib_table(const struct net_device *dev) @@ -208,6 +219,17 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex) } static inline +int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex) +{ + return 0; +} +static inline +int l3mdev_master_upper_ifindex_by_index(struct net *net, int ifindex) +{ + return 0; +} + +static inline struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev) { return NULL; diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 71985e95d2d9..88219cc137c3 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -467,7 +467,7 @@ struct ieee80211_mu_group_data { }; /** - * ieee80211_ftm_responder_params - FTM responder parameters + * struct ieee80211_ftm_responder_params - FTM responder parameters * * @lci: LCI subelement content * @civicloc: CIVIC location subelement content @@ -496,6 +496,8 @@ struct ieee80211_ftm_responder_params { * @uora_ocw_range: UORA element's OCW Range field * @frame_time_rts_th: HE duration RTS threshold, in units of 32us * @he_support: does this BSS support HE + * @twt_requester: does this BSS support TWT requester (relevant for managed + * mode only, set if the AP advertises TWT responder role) * @assoc: association status * @ibss_joined: indicates whether this station is part of an IBSS * or not @@ -594,6 +596,7 @@ struct ieee80211_bss_conf { u8 uora_ocw_range; u16 frame_time_rts_th; bool he_support; + bool twt_requester; /* association related data */ bool assoc, ibss_joined; bool ibss_creator; @@ -3239,6 +3242,11 @@ enum ieee80211_reconfig_type { * When the scan finishes, ieee80211_scan_completed() must be called; * note that it also must be called when the scan cannot finish due to * any error unless this callback returned a negative error code. + * This callback is also allowed to return the special return value 1, + * this indicates that hardware scan isn't desirable right now and a + * software scan should be done instead. A driver wishing to use this + * capability must ensure its (hardware) scan capabilities aren't + * advertised as more capable than mac80211's software scan is. * The callback can sleep. * * @cancel_hw_scan: Ask the low-level tp cancel the active hw scan. @@ -3623,6 +3631,9 @@ enum ieee80211_reconfig_type { * skb is always a real frame, head may or may not be an A-MSDU. * @get_ftm_responder_stats: Retrieve FTM responder statistics, if available. * Statistics should be cumulative, currently no way to reset is provided. + * + * @start_pmsr: start peer measurement (e.g. FTM) (this call can sleep) + * @abort_pmsr: abort peer measurement (this call can sleep) */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -3911,6 +3922,10 @@ struct ieee80211_ops { int (*get_ftm_responder_stats)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_ftm_responder_stats *ftm_stats); + int (*start_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request); + void (*abort_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request); }; /** @@ -6091,6 +6106,14 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); * @txq: pointer obtained from station or virtual interface * * Returns the skb if successful, %NULL if no frame was available. + * + * Note that this must be called in an rcu_read_lock() critical section, + * which can only be released after the SKB was handled. Some pointers in + * skb->cb, e.g. the key pointer, are protected by by RCU and thus the + * critical section must persist not just for the duration of this call + * but for the duration of the frame handling. + * However, also note that while in the wake_tx_queue() method, + * rcu_read_lock() is already held. */ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index f58b384aa6c9..7c1ab9edba03 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -140,8 +140,8 @@ struct neighbour { unsigned long updated; rwlock_t lock; refcount_t refcnt; - struct sk_buff_head arp_queue; unsigned int arp_queue_len_bytes; + struct sk_buff_head arp_queue; struct timer_list timer; unsigned long used; atomic_t probes; @@ -149,11 +149,13 @@ struct neighbour { __u8 nud_state; __u8 type; __u8 dead; + u8 protocol; seqlock_t ha_lock; - unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; + unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8); struct hh_cache hh; int (*output)(struct neighbour *, struct sk_buff *); const struct neigh_ops *ops; + struct list_head gc_list; struct rcu_head rcu; struct net_device *dev; u8 primary_key[0]; @@ -172,6 +174,7 @@ struct pneigh_entry { possible_net_t net; struct net_device *dev; u8 flags; + u8 protocol; u8 key[0]; }; @@ -214,6 +217,8 @@ struct neigh_table { struct timer_list proxy_timer; struct sk_buff_head proxy_queue; atomic_t entries; + atomic_t gc_entries; + struct list_head gc_list; rwlock_t lock; unsigned long last_rand; struct neigh_statistics __percpu *stats; @@ -250,6 +255,7 @@ static inline void *neighbour_priv(const struct neighbour *n) #define NEIGH_UPDATE_F_ISROUTER 0x40000000 #define NEIGH_UPDATE_F_ADMIN 0x80000000 +extern const struct nla_policy nda_policy[]; static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey) { @@ -454,6 +460,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) { + unsigned int hh_alen = 0; unsigned int seq; unsigned int hh_len; @@ -461,16 +468,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb seq = read_seqbegin(&hh->hh_lock); hh_len = hh->hh_len; if (likely(hh_len <= HH_DATA_MOD)) { - /* this is inlined by gcc */ - memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD); + hh_alen = HH_DATA_MOD; + + /* skb_push() would proceed silently if we have room for + * the unaligned size but not for the aligned size: + * check headroom explicitly. + */ + if (likely(skb_headroom(skb) >= HH_DATA_MOD)) { + /* this is inlined by gcc */ + memcpy(skb->data - HH_DATA_MOD, hh->hh_data, + HH_DATA_MOD); + } } else { - unsigned int hh_alen = HH_DATA_ALIGN(hh_len); + hh_alen = HH_DATA_ALIGN(hh_len); - memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); + if (likely(skb_headroom(skb) >= hh_alen)) { + memcpy(skb->data - hh_alen, hh->hh_data, + hh_alen); + } } } while (read_seqretry(&hh->hh_lock, seq)); - skb_push(skb, hh_len); + if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) { + kfree_skb(skb); + return NET_XMIT_DROP; + } + + __skb_push(skb, hh_len); return dev_queue_xmit(skb); } @@ -528,24 +552,6 @@ static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, } while (read_seqretry(&n->ha_lock, seq)); } -static inline void neigh_update_ext_learned(struct neighbour *neigh, u32 flags, - int *notify) -{ - u8 ndm_flags = 0; - - if (!(flags & NEIGH_UPDATE_F_ADMIN)) - return; - - ndm_flags |= (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0; - if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) { - if (ndm_flags & NTF_EXT_LEARNED) - neigh->flags |= NTF_EXT_LEARNED; - else - neigh->flags &= ~NTF_EXT_LEARNED; - *notify = 1; - } -} - static inline void neigh_update_is_router(struct neighbour *neigh, u32 flags, int *notify) { diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h index 74af19c3a8f7..4cd56808ac4e 100644 --- a/include/net/netfilter/br_netfilter.h +++ b/include/net/netfilter/br_netfilter.h @@ -6,12 +6,12 @@ static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) { - skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC); + struct nf_bridge_info *b = skb_ext_add(skb, SKB_EXT_BRIDGE_NF); - if (likely(skb->nf_bridge)) - refcount_set(&(skb->nf_bridge->use), 1); + if (b) + memset(b, 0, sizeof(*b)); - return skb->nf_bridge; + return b; } void nf_bridge_update_protocol(struct sk_buff *skb); @@ -22,12 +22,6 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net, struct sock *sk, int (*okfn)(struct net *, struct sock *, struct sk_buff *)); -static inline struct nf_bridge_info * -nf_bridge_info_get(const struct sk_buff *skb) -{ - return skb->nf_bridge; -} - unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb); static inline void nf_bridge_push_encap_header(struct sk_buff *skb) diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h index cd24be4c4a99..13d55206bb9f 100644 --- a/include/net/netfilter/ipv4/nf_nat_masquerade.h +++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h @@ -9,7 +9,7 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, const struct nf_nat_range2 *range, const struct net_device *out); -void nf_nat_masquerade_ipv4_register_notifier(void); +int nf_nat_masquerade_ipv4_register_notifier(void); void nf_nat_masquerade_ipv4_unregister_notifier(void); #endif /*_NF_NAT_MASQUERADE_IPV4_H_ */ diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h index 0c3b5ebf0bb8..2917bf95c437 100644 --- a/include/net/netfilter/ipv6/nf_nat_masquerade.h +++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h @@ -5,7 +5,7 @@ unsigned int nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, const struct net_device *out); -void nf_nat_masquerade_ipv6_register_notifier(void); +int nf_nat_masquerade_ipv6_register_notifier(void); void nf_nat_masquerade_ipv6_unregister_notifier(void); #endif /* _NF_NAT_MASQUERADE_IPV6_H_ */ diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 7e012312cd61..249d0a5b12b8 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -27,12 +27,17 @@ #include <net/netfilter/nf_conntrack_tuple.h> +struct nf_ct_udp { + unsigned long stream_ts; +}; + /* per conntrack: protocol private data */ union nf_conntrack_proto { /* insert conntrack proto private data here */ struct nf_ct_dccp dccp; struct ip_ct_sctp sctp; struct ip_ct_tcp tcp; + struct nf_ct_udp udp; struct nf_ct_gre gre; unsigned int tmpl_padto; }; diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h index 79d8d16732b4..bc6745d3010e 100644 --- a/include/net/netfilter/nf_conntrack_acct.h +++ b/include/net/netfilter/nf_conntrack_acct.h @@ -46,9 +46,6 @@ struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp) return acct; }; -unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct, - int dir); - /* Check if connection tracking accounting is enabled */ static inline bool nf_ct_acct_enabled(struct net *net) { @@ -61,8 +58,7 @@ static inline void nf_ct_set_acct(struct net *net, bool enable) net->ct.sysctl_acct = enable; } -int nf_conntrack_acct_pernet_init(struct net *net); -void nf_conntrack_acct_pernet_fini(struct net *net); +void nf_conntrack_acct_pernet_init(struct net *net); int nf_conntrack_acct_init(void); void nf_conntrack_acct_fini(void); diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h index 4b2b2baf8ab4..f32fc8289473 100644 --- a/include/net/netfilter/nf_conntrack_count.h +++ b/include/net/netfilter/nf_conntrack_count.h @@ -5,17 +5,10 @@ struct nf_conncount_data; -enum nf_conncount_list_add { - NF_CONNCOUNT_ADDED, /* list add was ok */ - NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */ - NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */ -}; - struct nf_conncount_list { spinlock_t list_lock; struct list_head head; /* connections with the same filtering key */ unsigned int count; /* length of list */ - bool dead; }; struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family, @@ -29,18 +22,12 @@ unsigned int nf_conncount_count(struct net *net, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone); -void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list, - const struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_zone *zone, - bool *addit); +int nf_conncount_add(struct net *net, struct nf_conncount_list *list, + const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_zone *zone); void nf_conncount_list_init(struct nf_conncount_list *list); -enum nf_conncount_list_add -nf_conncount_add(struct nf_conncount_list *list, - const struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_zone *zone); - bool nf_conncount_gc_list(struct net *net, struct nf_conncount_list *list); diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index 3f1ce9a8776e..52b44192b43f 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -142,7 +142,7 @@ void nf_ct_expect_event_report(enum ip_conntrack_expect_events event, struct nf_conntrack_expect *exp, u32 portid, int report); -int nf_conntrack_ecache_pernet_init(struct net *net); +void nf_conntrack_ecache_pernet_init(struct net *net); void nf_conntrack_ecache_pernet_fini(struct net *net); int nf_conntrack_ecache_init(void); @@ -182,10 +182,7 @@ static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e, u32 portid, int report) {} -static inline int nf_conntrack_ecache_pernet_init(struct net *net) -{ - return 0; -} +static inline void nf_conntrack_ecache_pernet_init(struct net *net) {} static inline void nf_conntrack_ecache_pernet_fini(struct net *net) { diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index 2492120b8097..ec52a8dc32fd 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h @@ -124,8 +124,7 @@ static inline void *nfct_help_data(const struct nf_conn *ct) return (void *)help->data; } -int nf_conntrack_helper_pernet_init(struct net *net); -void nf_conntrack_helper_pernet_fini(struct net *net); +void nf_conntrack_helper_pernet_init(struct net *net); int nf_conntrack_helper_init(void); void nf_conntrack_helper_fini(void); diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index eed04af9b75e..ae7b86f587f2 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -153,4 +153,43 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb, const char *fmt, ...) { } #endif /* CONFIG_SYSCTL */ +static inline struct nf_generic_net *nf_generic_pernet(struct net *net) +{ + return &net->ct.nf_ct_proto.generic; +} + +static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net) +{ + return &net->ct.nf_ct_proto.tcp; +} + +static inline struct nf_udp_net *nf_udp_pernet(struct net *net) +{ + return &net->ct.nf_ct_proto.udp; +} + +static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net) +{ + return &net->ct.nf_ct_proto.icmp; +} + +static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net) +{ + return &net->ct.nf_ct_proto.icmpv6; +} + +#ifdef CONFIG_NF_CT_PROTO_DCCP +static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net) +{ + return &net->ct.nf_ct_proto.dccp; +} +#endif + +#ifdef CONFIG_NF_CT_PROTO_SCTP +static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net) +{ + return &net->ct.nf_ct_proto.sctp; +} +#endif + #endif /*_NF_CONNTRACK_PROTOCOL_H*/ diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h index 3b661986be8f..0ed617bf0a3d 100644 --- a/include/net/netfilter/nf_conntrack_timestamp.h +++ b/include/net/netfilter/nf_conntrack_timestamp.h @@ -49,21 +49,12 @@ static inline void nf_ct_set_tstamp(struct net *net, bool enable) } #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP -int nf_conntrack_tstamp_pernet_init(struct net *net); -void nf_conntrack_tstamp_pernet_fini(struct net *net); +void nf_conntrack_tstamp_pernet_init(struct net *net); int nf_conntrack_tstamp_init(void); void nf_conntrack_tstamp_fini(void); #else -static inline int nf_conntrack_tstamp_pernet_init(struct net *net) -{ - return 0; -} - -static inline void nf_conntrack_tstamp_pernet_fini(struct net *net) -{ - return; -} +static inline void nf_conntrack_tstamp_pernet_init(struct net *net) {} static inline int nf_conntrack_tstamp_init(void) { diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 77e2761d4f2f..7d5cda7ce32a 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -95,10 +95,6 @@ void flow_offload_free(struct flow_offload *flow); int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow); struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table, struct flow_offload_tuple *tuple); -int nf_flow_table_iterate(struct nf_flowtable *flow_table, - void (*iter)(struct flow_offload *flow, void *data), - void *data); - void nf_flow_table_cleanup(struct net_device *dev); int nf_flow_table_init(struct nf_flowtable *flow_table); diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h index d300b8f03972..d774ca0c4c5e 100644 --- a/include/net/netfilter/nf_nat_l3proto.h +++ b/include/net/netfilter/nf_nat_l3proto.h @@ -2,18 +2,11 @@ #ifndef _NF_NAT_L3PROTO_H #define _NF_NAT_L3PROTO_H -struct nf_nat_l4proto; struct nf_nat_l3proto { u8 l3proto; - bool (*in_range)(const struct nf_conntrack_tuple *t, - const struct nf_nat_range2 *range); - - u32 (*secure_port)(const struct nf_conntrack_tuple *t, __be16); - bool (*manip_pkt)(struct sk_buff *skb, unsigned int iphdroff, - const struct nf_nat_l4proto *l4proto, const struct nf_conntrack_tuple *target, enum nf_nat_manip_type maniptype); diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h index b4d6b29bca62..95a4655bd1ad 100644 --- a/include/net/netfilter/nf_nat_l4proto.h +++ b/include/net/netfilter/nf_nat_l4proto.h @@ -5,78 +5,12 @@ #include <net/netfilter/nf_nat.h> #include <linux/netfilter/nfnetlink_conntrack.h> -struct nf_nat_range; struct nf_nat_l3proto; -struct nf_nat_l4proto { - /* Protocol number. */ - u8 l4proto; - - /* Translate a packet to the target according to manip type. - * Return true if succeeded. - */ - bool (*manip_pkt)(struct sk_buff *skb, - const struct nf_nat_l3proto *l3proto, - unsigned int iphdroff, unsigned int hdroff, - const struct nf_conntrack_tuple *tuple, - enum nf_nat_manip_type maniptype); - - /* Is the manipable part of the tuple between min and max incl? */ - bool (*in_range)(const struct nf_conntrack_tuple *tuple, - enum nf_nat_manip_type maniptype, - const union nf_conntrack_man_proto *min, - const union nf_conntrack_man_proto *max); - - /* Alter the per-proto part of the tuple (depending on - * maniptype), to give a unique tuple in the given range if - * possible. Per-protocol part of tuple is initialized to the - * incoming packet. - */ - void (*unique_tuple)(const struct nf_nat_l3proto *l3proto, - struct nf_conntrack_tuple *tuple, - const struct nf_nat_range2 *range, - enum nf_nat_manip_type maniptype, - const struct nf_conn *ct); - - int (*nlattr_to_range)(struct nlattr *tb[], - struct nf_nat_range2 *range); -}; - -/* Protocol registration. */ -int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto); -void nf_nat_l4proto_unregister(u8 l3proto, - const struct nf_nat_l4proto *l4proto); - -const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto); - -/* Built-in protocols. */ -extern const struct nf_nat_l4proto nf_nat_l4proto_tcp; -extern const struct nf_nat_l4proto nf_nat_l4proto_udp; -extern const struct nf_nat_l4proto nf_nat_l4proto_icmp; -extern const struct nf_nat_l4proto nf_nat_l4proto_icmpv6; -extern const struct nf_nat_l4proto nf_nat_l4proto_unknown; -#ifdef CONFIG_NF_NAT_PROTO_DCCP -extern const struct nf_nat_l4proto nf_nat_l4proto_dccp; -#endif -#ifdef CONFIG_NF_NAT_PROTO_SCTP -extern const struct nf_nat_l4proto nf_nat_l4proto_sctp; -#endif -#ifdef CONFIG_NF_NAT_PROTO_UDPLITE -extern const struct nf_nat_l4proto nf_nat_l4proto_udplite; -#endif - -bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple, - enum nf_nat_manip_type maniptype, - const union nf_conntrack_man_proto *min, - const union nf_conntrack_man_proto *max); - -void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, - struct nf_conntrack_tuple *tuple, - const struct nf_nat_range2 *range, - enum nf_nat_manip_type maniptype, - const struct nf_conn *ct, u16 *rover); - -int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[], - struct nf_nat_range2 *range); - +/* Translate a packet to the target according to manip type. Return on success. */ +bool nf_nat_l4proto_manip_pkt(struct sk_buff *skb, + const struct nf_nat_l3proto *l3proto, + unsigned int iphdroff, unsigned int hdroff, + const struct nf_conntrack_tuple *tuple, + enum nf_nat_manip_type maniptype); #endif /*_NF_NAT_L4PROTO_H*/ diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 9795d628a127..51cba0b8adf5 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -97,18 +97,14 @@ struct netns_ct { struct delayed_work ecache_dwork; bool ecache_dwork_pending; #endif + bool auto_assign_helper_warned; #ifdef CONFIG_SYSCTL struct ctl_table_header *sysctl_header; - struct ctl_table_header *acct_sysctl_header; - struct ctl_table_header *tstamp_sysctl_header; - struct ctl_table_header *event_sysctl_header; - struct ctl_table_header *helper_sysctl_header; #endif unsigned int sysctl_log_invalid; /* Log invalid packets */ int sysctl_events; int sysctl_acct; int sysctl_auto_assign_helper; - bool auto_assign_helper_warned; int sysctl_tstamp; int sysctl_checksum; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index e47503b4e4d1..104a6669e344 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -103,6 +103,9 @@ struct netns_ipv4 { /* Shall we try to damage output packets if routing dev changes? */ int sysctl_ip_dynaddr; int sysctl_ip_early_demux; +#ifdef CONFIG_NET_L3_MASTER_DEV + int sysctl_raw_l3mdev_accept; +#endif int sysctl_tcp_early_demux; int sysctl_udp_early_demux; diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 9991e5ef52cc..59f45b1e9dac 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -5,6 +5,7 @@ #include <linux/list.h> #include <linux/wait.h> #include <linux/workqueue.h> +#include <linux/rhashtable-types.h> #include <linux/xfrm.h> #include <net/dst_ops.h> @@ -53,6 +54,7 @@ struct netns_xfrm { unsigned int policy_count[XFRM_POLICY_MAX * 2]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; + struct list_head inexact_bins; struct sock *nlsk; diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 72ffb3120ced..40965fbbcd31 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -81,6 +81,14 @@ void __tcf_block_cb_unregister(struct tcf_block *block, struct tcf_block_cb *block_cb); void tcf_block_cb_unregister(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident); +int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, + tc_indr_block_bind_cb_t *cb, void *cb_ident); +int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, + tc_indr_block_bind_cb_t *cb, void *cb_ident); +void __tc_indr_block_cb_unregister(struct net_device *dev, + tc_indr_block_bind_cb_t *cb, void *cb_ident); +void tc_indr_block_cb_unregister(struct net_device *dev, + tc_indr_block_bind_cb_t *cb, void *cb_ident); int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res, bool compat_mode); @@ -183,6 +191,32 @@ void tcf_block_cb_unregister(struct tcf_block *block, { } +static inline +int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, + tc_indr_block_bind_cb_t *cb, void *cb_ident) +{ + return 0; +} + +static inline +int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, + tc_indr_block_bind_cb_t *cb, void *cb_ident) +{ + return 0; +} + +static inline +void __tc_indr_block_cb_unregister(struct net_device *dev, + tc_indr_block_bind_cb_t *cb, void *cb_ident) +{ +} + +static inline +void tc_indr_block_cb_unregister(struct net_device *dev, + tc_indr_block_bind_cb_t *cb, void *cb_ident) +{ +} + static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res, bool compat_mode) { @@ -585,8 +619,8 @@ tcf_match_indev(struct sk_buff *skb, int ifindex) } #endif /* CONFIG_NET_CLS_IND */ -int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts, - enum tc_setup_type type, void *type_data, bool err_stop); +int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, + void *type_data, bool err_stop); enum tc_block_command { TC_BLOCK_BIND, @@ -609,6 +643,7 @@ struct tc_cls_common_offload { struct tc_cls_u32_knode { struct tcf_exts *exts; + struct tcf_result *res; struct tc_u32_sel *sel; u32 handle; u32 val; @@ -787,12 +822,21 @@ enum tc_mq_command { TC_MQ_CREATE, TC_MQ_DESTROY, TC_MQ_STATS, + TC_MQ_GRAFT, +}; + +struct tc_mq_opt_offload_graft_params { + unsigned long queue; + u32 child_handle; }; struct tc_mq_qopt_offload { enum tc_mq_command command; u32 handle; - struct tc_qopt_offload_stats stats; + union { + struct tc_qopt_offload_stats stats; + struct tc_mq_opt_offload_graft_params graft_params; + }; }; enum tc_red_command { @@ -800,13 +844,16 @@ enum tc_red_command { TC_RED_DESTROY, TC_RED_STATS, TC_RED_XSTATS, + TC_RED_GRAFT, }; struct tc_red_qopt_offload_params { u32 min; u32 max; u32 probability; + u32 limit; bool is_ecn; + bool is_harddrop; struct gnet_stats_queue *qstats; }; @@ -818,6 +865,51 @@ struct tc_red_qopt_offload { struct tc_red_qopt_offload_params set; struct tc_qopt_offload_stats stats; struct red_stats *xstats; + u32 child_handle; + }; +}; + +enum tc_gred_command { + TC_GRED_REPLACE, + TC_GRED_DESTROY, + TC_GRED_STATS, +}; + +struct tc_gred_vq_qopt_offload_params { + bool present; + u32 limit; + u32 prio; + u32 min; + u32 max; + bool is_ecn; + bool is_harddrop; + u32 probability; + /* Only need backlog, see struct tc_prio_qopt_offload_params */ + u32 *backlog; +}; + +struct tc_gred_qopt_offload_params { + bool grio_on; + bool wred_on; + unsigned int dp_cnt; + unsigned int dp_def; + struct gnet_stats_queue *qstats; + struct tc_gred_vq_qopt_offload_params tab[MAX_DPs]; +}; + +struct tc_gred_qopt_offload_stats { + struct gnet_stats_basic_packed bstats[MAX_DPs]; + struct gnet_stats_queue qstats[MAX_DPs]; + struct red_stats *xstats[MAX_DPs]; +}; + +struct tc_gred_qopt_offload { + enum tc_gred_command command; + u32 handle; + u32 parent; + union { + struct tc_gred_qopt_offload_params set; + struct tc_gred_qopt_offload_stats stats; }; }; @@ -854,4 +946,14 @@ struct tc_prio_qopt_offload { }; }; +enum tc_root_command { + TC_ROOT_GRAFT, +}; + +struct tc_root_qopt_offload { + enum tc_root_command command; + u32 handle; + bool ingress; +}; + #endif diff --git a/include/net/protocol.h b/include/net/protocol.h index 4fc75f7ae23b..92b3eaad6088 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -42,7 +42,10 @@ struct net_protocol { int (*early_demux)(struct sk_buff *skb); int (*early_demux_handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb); - void (*err_handler)(struct sk_buff *skb, u32 info); + + /* This returns an error if we weren't able to handle the error. */ + int (*err_handler)(struct sk_buff *skb, u32 info); + unsigned int no_policy:1, netns_ok:1, /* does the protocol do more stringent @@ -58,10 +61,12 @@ struct inet6_protocol { void (*early_demux_handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb); - void (*err_handler)(struct sk_buff *skb, + /* This returns an error if we weren't able to handle the error. */ + int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info); + unsigned int flags; /* INET6_PROTO_xxx */ }; diff --git a/include/net/raw.h b/include/net/raw.h index 9c9fa98a91a4..821ff4887f77 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -17,7 +17,7 @@ #ifndef _RAW_H #define _RAW_H - +#include <net/inet_sock.h> #include <net/protocol.h> #include <linux/icmp.h> @@ -61,6 +61,7 @@ void raw_seq_stop(struct seq_file *seq, void *v); int raw_hash_sk(struct sock *sk); void raw_unhash_sk(struct sock *sk); +void raw_init(void); struct raw_sock { /* inet_sock has to be the first member */ @@ -74,4 +75,15 @@ static inline struct raw_sock *raw_sk(const struct sock *sk) return (struct raw_sock *)sk; } +static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if, + int dif, int sdif) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + return inet_bound_dev_eq(!!net->ipv4.sysctl_raw_l3mdev_accept, + bound_dev_if, dif, sdif); +#else + return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); +#endif +} + #endif /* _RAW_H */ diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index cf26e5aacac4..e2091bb2b3a8 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -159,7 +159,8 @@ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]); struct net_device *rtnl_create_link(struct net *net, const char *ifname, unsigned char name_assign_type, const struct rtnl_link_ops *ops, - struct nlattr *tb[]); + struct nlattr *tb[], + struct netlink_ext_ack *extack); int rtnl_delete_link(struct net_device *dev); int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 4d736427a4cb..9481f2c142e2 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -24,6 +24,9 @@ struct bpf_flow_keys; typedef int tc_setup_cb_t(enum tc_setup_type type, void *type_data, void *cb_priv); +typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, + enum tc_setup_type type, void *type_data); + struct qdisc_rate_table { struct tc_ratespec rate; u32 data[256]; @@ -579,6 +582,30 @@ void qdisc_put(struct Qdisc *qdisc); void qdisc_put_unlocked(struct Qdisc *qdisc); void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, unsigned int len); +#ifdef CONFIG_NET_SCHED +int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, + void *type_data); +void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, + struct Qdisc *new, struct Qdisc *old, + enum tc_setup_type type, void *type_data, + struct netlink_ext_ack *extack); +#else +static inline int +qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, + void *type_data) +{ + q->flags &= ~TCQ_F_OFFLOADED; + return 0; +} + +static inline void +qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, + struct Qdisc *new, struct Qdisc *old, + enum tc_setup_type type, void *type_data, + struct netlink_ext_ack *extack) +{ +} +#endif struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, const struct Qdisc_ops *ops, struct netlink_ext_ack *extack); diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 8dadc74c22e7..4588bdc2b8f0 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -71,7 +71,7 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM }; SCTP_NUM_AUTH_CHUNK_TYPES) /* These are the different flavours of event. */ -enum sctp_event { +enum sctp_event_type { SCTP_EVENT_T_CHUNK = 1, SCTP_EVENT_T_TIMEOUT, SCTP_EVENT_T_OTHER, diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 8c2caa370e0f..1d13ec3f2707 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -151,8 +151,8 @@ int sctp_primitive_RECONF(struct net *net, struct sctp_association *asoc, * sctp/input.c */ int sctp_rcv(struct sk_buff *skb); -void sctp_v4_err(struct sk_buff *skb, u32 info); -void sctp_hash_endpoint(struct sctp_endpoint *); +int sctp_v4_err(struct sk_buff *skb, u32 info); +int sctp_hash_endpoint(struct sctp_endpoint *ep); void sctp_unhash_endpoint(struct sctp_endpoint *); struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, struct sctphdr *, struct sctp_association **, @@ -608,4 +608,21 @@ static inline __u32 sctp_dst_mtu(const struct dst_entry *dst) SCTP_DEFAULT_MINSEGMENT)); } +static inline bool sctp_transport_pmtu_check(struct sctp_transport *t) +{ + __u32 pmtu = sctp_dst_mtu(t->dst); + + if (t->pathmtu == pmtu) + return true; + + t->pathmtu = pmtu; + + return false; +} + +static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize) +{ + return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize); +} + #endif /* __net_sctp_h__ */ diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 9e3d32746430..24825a81829e 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -173,7 +173,7 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire; __u8 sctp_get_chunk_type(struct sctp_chunk *chunk); const struct sctp_sm_table_entry *sctp_sm_lookup_event( struct net *net, - enum sctp_event event_type, + enum sctp_event_type event_type, enum sctp_state state, union sctp_subtype event_subtype); int sctp_chunk_iif(const struct sctp_chunk *); @@ -313,7 +313,7 @@ struct sctp_chunk *sctp_process_strreset_resp( /* Prototypes for statetable processing. */ -int sctp_do_sm(struct net *net, enum sctp_event event_type, +int sctp_do_sm(struct net *net, enum sctp_event_type event_type, union sctp_subtype subtype, enum sctp_state state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, gfp_t gfp); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index a11f93790476..003020eb6e66 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -96,7 +96,9 @@ struct sctp_stream; struct sctp_bind_bucket { unsigned short port; - unsigned short fastreuse; + signed char fastreuse; + signed char fastreuseport; + kuid_t fastuid; struct hlist_node node; struct hlist_head owner; struct net *net; @@ -215,7 +217,7 @@ struct sctp_sock { * These two structures must be grouped together for the usercopy * whitelist region. */ - struct sctp_event_subscribe subscribe; + __u16 subscribe; struct sctp_initmsg initmsg; int user_frag; @@ -1190,6 +1192,8 @@ int sctp_bind_addr_conflict(struct sctp_bind_addr *, const union sctp_addr *, struct sctp_sock *, struct sctp_sock *); int sctp_bind_addr_state(const struct sctp_bind_addr *bp, const union sctp_addr *addr); +int sctp_bind_addrs_check(struct sctp_sock *sp, + struct sctp_sock *sp2, int cnt2); union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, const union sctp_addr *addrs, int addrcnt, @@ -2073,8 +2077,12 @@ struct sctp_association { int sent_cnt_removable; + __u16 subscribe; + __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1]; __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1]; + + struct rcu_head rcu; }; diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index 51b4e0626c34..bd922a0fe914 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -164,30 +164,39 @@ void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event, __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event); +static inline void sctp_ulpevent_type_set(__u16 *subscribe, + __u16 sn_type, __u8 on) +{ + if (sn_type > SCTP_SN_TYPE_MAX) + return; + + if (on) + *subscribe |= (1 << (sn_type - SCTP_SN_TYPE_BASE)); + else + *subscribe &= ~(1 << (sn_type - SCTP_SN_TYPE_BASE)); +} + /* Is this event type enabled? */ -static inline int sctp_ulpevent_type_enabled(__u16 sn_type, - struct sctp_event_subscribe *mask) +static inline bool sctp_ulpevent_type_enabled(__u16 subscribe, __u16 sn_type) { - int offset = sn_type - SCTP_SN_TYPE_BASE; - char *amask = (char *) mask; + if (sn_type > SCTP_SN_TYPE_MAX) + return false; - if (offset >= sizeof(struct sctp_event_subscribe)) - return 0; - return amask[offset]; + return subscribe & (1 << (sn_type - SCTP_SN_TYPE_BASE)); } /* Given an event subscription, is this event enabled? */ -static inline int sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event, - struct sctp_event_subscribe *mask) +static inline bool sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event, + __u16 subscribe) { __u16 sn_type; - int enabled = 1; - if (sctp_ulpevent_is_notification(event)) { - sn_type = sctp_ulpevent_get_notification_type(event); - enabled = sctp_ulpevent_type_enabled(sn_type, mask); - } - return enabled; + if (!sctp_ulpevent_is_notification(event)) + return true; + + sn_type = sctp_ulpevent_get_notification_type(event); + + return sctp_ulpevent_type_enabled(subscribe, sn_type); } #endif /* __sctp_ulpevent_h__ */ diff --git a/include/net/seg6.h b/include/net/seg6.h index 2567941a2f32..8b2dc6869fd1 100644 --- a/include/net/seg6.h +++ b/include/net/seg6.h @@ -16,7 +16,6 @@ #include <linux/net.h> #include <linux/ipv6.h> -#include <net/lwtunnel.h> #include <linux/seg6.h> #include <linux/rhashtable-types.h> diff --git a/include/net/sock.h b/include/net/sock.h index f665d74ae509..2b229f7be8eb 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -298,6 +298,7 @@ struct sock_common { * @sk_filter: socket filtering instructions * @sk_timer: sock cleanup timer * @sk_stamp: time stamp of last packet received + * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only * @sk_tsflags: SO_TIMESTAMPING socket options * @sk_tskey: counter to disambiguate concurrent tstamp requests * @sk_zckey: counter to order MSG_ZEROCOPY notifications @@ -474,6 +475,9 @@ struct sock { const struct cred *sk_peer_cred; long sk_rcvtimeo; ktime_t sk_stamp; +#if BITS_PER_LONG==32 + seqlock_t sk_stamp_seq; +#endif u16 sk_tsflags; u8 sk_shutdown; u32 sk_tskey; @@ -1110,7 +1114,7 @@ struct proto { unsigned int inuse_idx; #endif - bool (*stream_memory_free)(const struct sock *sk); + bool (*stream_memory_free)(const struct sock *sk, int wake); bool (*stream_memory_read)(const struct sock *sk); /* Memory pressure */ void (*enter_memory_pressure)(struct sock *sk); @@ -1192,19 +1196,29 @@ static inline void sk_refcnt_debug_release(const struct sock *sk) #define sk_refcnt_debug_release(sk) do { } while (0) #endif /* SOCK_REFCNT_DEBUG */ -static inline bool sk_stream_memory_free(const struct sock *sk) +static inline bool __sk_stream_memory_free(const struct sock *sk, int wake) { if (sk->sk_wmem_queued >= sk->sk_sndbuf) return false; return sk->sk_prot->stream_memory_free ? - sk->sk_prot->stream_memory_free(sk) : true; + sk->sk_prot->stream_memory_free(sk, wake) : true; } -static inline bool sk_stream_is_writeable(const struct sock *sk) +static inline bool sk_stream_memory_free(const struct sock *sk) +{ + return __sk_stream_memory_free(sk, 0); +} + +static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake) { return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && - sk_stream_memory_free(sk); + __sk_stream_memory_free(sk, wake); +} + +static inline bool sk_stream_is_writeable(const struct sock *sk) +{ + return __sk_stream_is_writeable(sk, 0); } static inline int sk_under_cgroup_hierarchy(struct sock *sk, @@ -2287,6 +2301,34 @@ static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb) atomic_add(segs, &sk->sk_drops); } +static inline ktime_t sock_read_timestamp(struct sock *sk) +{ +#if BITS_PER_LONG==32 + unsigned int seq; + ktime_t kt; + + do { + seq = read_seqbegin(&sk->sk_stamp_seq); + kt = sk->sk_stamp; + } while (read_seqretry(&sk->sk_stamp_seq, seq)); + + return kt; +#else + return sk->sk_stamp; +#endif +} + +static inline void sock_write_timestamp(struct sock *sk, ktime_t kt) +{ +#if BITS_PER_LONG==32 + write_seqlock(&sk->sk_stamp_seq); + sk->sk_stamp = kt; + write_sequnlock(&sk->sk_stamp_seq); +#else + sk->sk_stamp = kt; +#endif +} + void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb); void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, @@ -2311,7 +2353,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) __sock_recv_timestamp(msg, sk, skb); else - sk->sk_stamp = kt; + sock_write_timestamp(sk, kt); if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid) __sock_recv_wifi_status(msg, sk, skb); @@ -2332,30 +2374,47 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY) __sock_recv_ts_and_drops(msg, sk, skb); else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP))) - sk->sk_stamp = skb->tstamp; + sock_write_timestamp(sk, skb->tstamp); else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP)) - sk->sk_stamp = 0; + sock_write_timestamp(sk, 0); } void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags); /** - * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped + * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped * @sk: socket sending this packet * @tsflags: timestamping flags to use * @tx_flags: completed with instructions for time stamping + * @tskey: filled in with next sk_tskey (not for TCP, which uses seqno) * * Note: callers should take care of initial ``*tx_flags`` value (usually 0) */ -static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags, - __u8 *tx_flags) +static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags, + __u8 *tx_flags, __u32 *tskey) { - if (unlikely(tsflags)) + if (unlikely(tsflags)) { __sock_tx_timestamp(tsflags, tx_flags); + if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey && + tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) + *tskey = sk->sk_tskey++; + } if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) *tx_flags |= SKBTX_WIFI_STATUS; } +static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags, + __u8 *tx_flags) +{ + _sock_tx_timestamp(sk, tsflags, tx_flags, NULL); +} + +static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags) +{ + _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags, + &skb_shinfo(skb)->tskey); +} + /** * sk_eat_skb - Release a skb if it is no longer needed * @sk: socket to eat this skb from diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 881ecb1555bf..a7fdab5ee6c3 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -95,8 +95,8 @@ struct switchdev_obj_port_vlan { u16 vid_end; }; -#define SWITCHDEV_OBJ_PORT_VLAN(obj) \ - container_of(obj, struct switchdev_obj_port_vlan, obj) +#define SWITCHDEV_OBJ_PORT_VLAN(OBJ) \ + container_of((OBJ), struct switchdev_obj_port_vlan, obj) /* SWITCHDEV_OBJ_ID_PORT_MDB */ struct switchdev_obj_port_mdb { @@ -105,8 +105,8 @@ struct switchdev_obj_port_mdb { u16 vid; }; -#define SWITCHDEV_OBJ_PORT_MDB(obj) \ - container_of(obj, struct switchdev_obj_port_mdb, obj) +#define SWITCHDEV_OBJ_PORT_MDB(OBJ) \ + container_of((OBJ), struct switchdev_obj_port_mdb, obj) void switchdev_trans_item_enqueue(struct switchdev_trans *trans, void *data, void (*destructor)(void const *), @@ -121,10 +121,6 @@ typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj); * @switchdev_port_attr_get: Get a port attribute (see switchdev_attr). * * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr). - * - * @switchdev_port_obj_add: Add an object to port (see switchdev_obj_*). - * - * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj_*). */ struct switchdev_ops { int (*switchdev_port_attr_get)(struct net_device *dev, @@ -132,11 +128,6 @@ struct switchdev_ops { int (*switchdev_port_attr_set)(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans); - int (*switchdev_port_obj_add)(struct net_device *dev, - const struct switchdev_obj *obj, - struct switchdev_trans *trans); - int (*switchdev_port_obj_del)(struct net_device *dev, - const struct switchdev_obj *obj); }; enum switchdev_notifier_type { @@ -146,6 +137,11 @@ enum switchdev_notifier_type { SWITCHDEV_FDB_DEL_TO_DEVICE, SWITCHDEV_FDB_OFFLOADED, + SWITCHDEV_PORT_OBJ_ADD, /* Blocking. */ + SWITCHDEV_PORT_OBJ_DEL, /* Blocking. */ + + SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE, + SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE, SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE, SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE, SWITCHDEV_VXLAN_FDB_OFFLOADED, @@ -153,6 +149,7 @@ enum switchdev_notifier_type { struct switchdev_notifier_info { struct net_device *dev; + struct netlink_ext_ack *extack; }; struct switchdev_notifier_fdb_info { @@ -163,12 +160,25 @@ struct switchdev_notifier_fdb_info { offloaded:1; }; +struct switchdev_notifier_port_obj_info { + struct switchdev_notifier_info info; /* must be first */ + const struct switchdev_obj *obj; + struct switchdev_trans *trans; + bool handled; +}; + static inline struct net_device * switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info) { return info->dev; } +static inline struct netlink_ext_ack * +switchdev_notifier_info_to_extack(const struct switchdev_notifier_info *info) +{ + return info->extack; +} + #ifdef CONFIG_NET_SWITCHDEV void switchdev_deferred_process(void); @@ -177,13 +187,22 @@ int switchdev_port_attr_get(struct net_device *dev, int switchdev_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr); int switchdev_port_obj_add(struct net_device *dev, - const struct switchdev_obj *obj); + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack); int switchdev_port_obj_del(struct net_device *dev, const struct switchdev_obj *obj); + int register_switchdev_notifier(struct notifier_block *nb); int unregister_switchdev_notifier(struct notifier_block *nb); int call_switchdev_notifiers(unsigned long val, struct net_device *dev, struct switchdev_notifier_info *info); + +int register_switchdev_blocking_notifier(struct notifier_block *nb); +int unregister_switchdev_blocking_notifier(struct notifier_block *nb); +int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev, + struct switchdev_notifier_info *info, + struct netlink_ext_ack *extack); + void switchdev_port_fwd_mark_set(struct net_device *dev, struct net_device *group_dev, bool joining); @@ -191,6 +210,19 @@ void switchdev_port_fwd_mark_set(struct net_device *dev, bool switchdev_port_same_parent_id(struct net_device *a, struct net_device *b); +int switchdev_handle_port_obj_add(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + bool (*check_cb)(const struct net_device *dev), + int (*add_cb)(struct net_device *dev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans, + struct netlink_ext_ack *extack)); +int switchdev_handle_port_obj_del(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + bool (*check_cb)(const struct net_device *dev), + int (*del_cb)(struct net_device *dev, + const struct switchdev_obj *obj)); + #define SWITCHDEV_SET_OPS(netdev, ops) ((netdev)->switchdev_ops = (ops)) #else @@ -211,7 +243,8 @@ static inline int switchdev_port_attr_set(struct net_device *dev, } static inline int switchdev_port_obj_add(struct net_device *dev, - const struct switchdev_obj *obj) + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } @@ -239,12 +272,55 @@ static inline int call_switchdev_notifiers(unsigned long val, return NOTIFY_DONE; } +static inline int +register_switchdev_blocking_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int +unregister_switchdev_blocking_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int +call_switchdev_blocking_notifiers(unsigned long val, + struct net_device *dev, + struct switchdev_notifier_info *info, + struct netlink_ext_ack *extack) +{ + return NOTIFY_DONE; +} + static inline bool switchdev_port_same_parent_id(struct net_device *a, struct net_device *b) { return false; } +static inline int +switchdev_handle_port_obj_add(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + bool (*check_cb)(const struct net_device *dev), + int (*add_cb)(struct net_device *dev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans, + struct netlink_ext_ack *extack)) +{ + return 0; +} + +static inline int +switchdev_handle_port_obj_del(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + bool (*check_cb)(const struct net_device *dev), + int (*del_cb)(struct net_device *dev, + const struct switchdev_obj *obj)) +{ + return 0; +} + #define SWITCHDEV_SET_OPS(netdev, ops) do {} while (0) #endif diff --git a/include/net/tcp.h b/include/net/tcp.h index a18914d20486..e0a65c067662 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -313,7 +313,7 @@ extern struct proto tcp_prot; void tcp_tasklet_init(void); -void tcp_v4_err(struct sk_buff *skb, u32); +int tcp_v4_err(struct sk_buff *skb, u32); void tcp_shutdown(struct sock *sk, int how); @@ -1124,7 +1124,7 @@ void tcp_rate_check_app_limited(struct sock *sk); */ static inline int tcp_is_sack(const struct tcp_sock *tp) { - return tp->rx_opt.sack_ok; + return likely(tp->rx_opt.sack_ok); } static inline bool tcp_is_reno(const struct tcp_sock *tp) @@ -1315,33 +1315,16 @@ static inline __sum16 tcp_v4_check(int len, __be32 saddr, return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); } -static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) -{ - return __skb_checksum_complete(skb); -} - static inline bool tcp_checksum_complete(struct sk_buff *skb) { return !skb_csum_unnecessary(skb) && - __tcp_checksum_complete(skb); + __skb_checksum_complete(skb); } bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); int tcp_filter(struct sock *sk, struct sk_buff *skb); - -#undef STATE_TRACE - -#ifdef STATE_TRACE -static const char *statename[]={ - "Unused","Established","Syn Sent","Syn Recv", - "Fin Wait 1","Fin Wait 2","Time Wait", "Close", - "Close Wait","Last ACK","Listen","Closing" -}; -#endif void tcp_set_state(struct sock *sk, int state); - void tcp_done(struct sock *sk); - int tcp_abort(struct sock *sk, int err); static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) @@ -1385,7 +1368,7 @@ static inline int tcp_win_from_space(const struct sock *sk, int space) /* Note: caller must be prepared to deal with negative returns */ static inline int tcp_space(const struct sock *sk) { - return tcp_win_from_space(sk, sk->sk_rcvbuf - + return tcp_win_from_space(sk, sk->sk_rcvbuf - sk->sk_backlog.len - atomic_read(&sk->sk_rmem_alloc)); } @@ -1572,9 +1555,21 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, const struct sock *addr_sk); #ifdef CONFIG_TCP_MD5SIG -struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, - const union tcp_md5_addr *addr, - int family); +#include <linux/jump_label.h> +extern struct static_key tcp_md5_needed; +struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, + const union tcp_md5_addr *addr, + int family); +static inline struct tcp_md5sig_key * +tcp_md5_do_lookup(const struct sock *sk, + const union tcp_md5_addr *addr, + int family) +{ + if (!static_key_false(&tcp_md5_needed)) + return NULL; + return __tcp_md5_do_lookup(sk, addr, family); +} + #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) #else static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, @@ -1875,12 +1870,16 @@ static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat; } -static inline bool tcp_stream_memory_free(const struct sock *sk) +/* @wake is one when sk_stream_write_space() calls us. + * This sends EPOLLOUT only if notsent_bytes is half the limit. + * This mimics the strategy used in sock_def_write_space(). + */ +static inline bool tcp_stream_memory_free(const struct sock *sk, int wake) { const struct tcp_sock *tp = tcp_sk(sk); u32 notsent_bytes = tp->write_seq - tp->snd_nxt; - return notsent_bytes < tcp_notsent_lowat(tp); + return (notsent_bytes << wake) < tcp_notsent_lowat(tp); } #ifdef CONFIG_PROC_FS diff --git a/include/net/tls.h b/include/net/tls.h index bab5627ff5e3..2a6ac8d642af 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -76,6 +76,10 @@ * * void (*unhash)(struct tls_device *device, struct sock *sk); * This function cleans listen state set by Inline TLS driver + * + * void (*release)(struct kref *kref); + * Release the registered device and allocated resources + * @kref: Number of reference to tls_device */ struct tls_device { char name[TLS_DEVICE_NAME_MAX]; @@ -83,6 +87,8 @@ struct tls_device { int (*feature)(struct tls_device *device); int (*hash)(struct tls_device *device, struct sock *sk); void (*unhash)(struct tls_device *device, struct sock *sk); + void (*release)(struct kref *kref); + struct kref kref; }; enum { @@ -454,6 +460,15 @@ tls_offload_ctx_tx(const struct tls_context *tls_ctx) return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx; } +static inline bool tls_sw_has_ctx_tx(const struct sock *sk) +{ + struct tls_context *ctx = tls_get_ctx(sk); + + if (!ctx) + return false; + return !!tls_sw_ctx_tx(ctx); +} + static inline struct tls_offload_context_rx * tls_offload_ctx_rx(const struct tls_context *tls_ctx) { diff --git a/include/net/udp.h b/include/net/udp.h index 9e82cb391dea..fd6d948755c8 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -252,6 +252,17 @@ static inline int udp_rqueue_get(struct sock *sk) return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit); } +static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if, + int dif, int sdif) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept, + bound_dev_if, dif, sdif); +#else + return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); +#endif +} + /* net/ipv4/udp.c */ void udp_destruct_sock(struct sock *sk); void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len); @@ -272,7 +283,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); int udp_get_port(struct sock *sk, unsigned short snum, int (*saddr_cmp)(const struct sock *, const struct sock *)); -void udp_err(struct sk_buff *, u32); +int udp_err(struct sk_buff *, u32); int udp_abort(struct sock *sk, int err); int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); int udp_push_pending_frames(struct sock *sk); @@ -406,17 +417,24 @@ static inline int copy_linear_skb(struct sk_buff *skb, int len, int off, } while(0) #if IS_ENABLED(CONFIG_IPV6) -#define __UDPX_INC_STATS(sk, field) \ -do { \ - if ((sk)->sk_family == AF_INET) \ - __UDP_INC_STATS(sock_net(sk), field, 0); \ - else \ - __UDP6_INC_STATS(sock_net(sk), field, 0); \ -} while (0) +#define __UDPX_MIB(sk, ipv4) \ +({ \ + ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \ + sock_net(sk)->mib.udp_statistics) : \ + (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \ + sock_net(sk)->mib.udp_stats_in6); \ +}) #else -#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0) +#define __UDPX_MIB(sk, ipv4) \ +({ \ + IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \ + sock_net(sk)->mib.udp_statistics; \ +}) #endif +#define __UDPX_INC_STATS(sk, field) \ + __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field) + #ifdef CONFIG_PROC_FS struct udp_seq_afinfo { sa_family_t family; @@ -450,4 +468,26 @@ DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); void udpv6_encap_enable(void); #endif +static inline struct sk_buff *udp_rcv_segment(struct sock *sk, + struct sk_buff *skb, bool ipv4) +{ + struct sk_buff *segs; + + /* the GSO CB lays after the UDP one, no need to save and restore any + * CB fragment + */ + segs = __skb_gso_segment(skb, NETIF_F_SG, false); + if (unlikely(IS_ERR_OR_NULL(segs))) { + int segs_nr = skb_shinfo(skb)->gso_segs; + + atomic_add(segs_nr, &sk->sk_drops); + SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr); + kfree_skb(skb); + return NULL; + } + + consume_skb(skb); + return segs; +} + #endif /* _UDP_H */ diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index fe680ab6b15a..b8137953fea3 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -30,6 +30,7 @@ struct udp_port_cfg { __be16 local_udp_port; __be16 peer_udp_port; + int bind_ifindex; unsigned int use_udp_checksums:1, use_udp6_tx_checksums:1, use_udp6_rx_checksums:1, @@ -64,6 +65,8 @@ static inline int udp_sock_create(struct net *net, } typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); +typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk, + struct sk_buff *skb); typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk, struct list_head *head, @@ -76,6 +79,7 @@ struct udp_tunnel_sock_cfg { /* Used for setting up udp_sock fields, see udp.h for details */ __u8 encap_type; udp_tunnel_encap_rcv_t encap_rcv; + udp_tunnel_encap_err_lookup_t encap_err_lookup; udp_tunnel_encap_destroy_t encap_destroy; udp_tunnel_gro_receive_t gro_receive; udp_tunnel_gro_complete_t gro_complete; @@ -165,6 +169,12 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum) static inline void udp_tunnel_encap_enable(struct socket *sock) { + struct udp_sock *up = udp_sk(sock->sk); + + if (up->encap_enabled) + return; + + up->encap_enabled = 1; #if IS_ENABLED(CONFIG_IPV6) if (sock->sk->sk_family == PF_INET6) ipv6_stub->udpv6_encap_enable(); diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 03431c148e16..236403eb5ba6 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -216,6 +216,7 @@ struct vxlan_config { unsigned long age_interval; unsigned int addrmax; bool no_share; + enum ifla_vxlan_df df; }; struct vxlan_dev_node { @@ -420,11 +421,16 @@ struct switchdev_notifier_vxlan_fdb_info { u8 eth_addr[ETH_ALEN]; __be32 vni; bool offloaded; + bool added_by_user; }; #if IS_ENABLED(CONFIG_VXLAN) int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni, struct switchdev_notifier_vxlan_fdb_info *fdb_info); +int vxlan_fdb_replay(const struct net_device *dev, __be32 vni, + struct notifier_block *nb); +void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni); + #else static inline int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni, @@ -432,6 +438,17 @@ vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni, { return -ENOENT; } + +static inline int vxlan_fdb_replay(const struct net_device *dev, __be32 vni, + struct notifier_block *nb) +{ + return -EOPNOTSUPP; +} + +static inline void +vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni) +{ +} #endif #endif diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 0eb390c205af..7298a53b9702 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -577,6 +577,7 @@ struct xfrm_policy { /* This lock only affects elements except for entry. */ rwlock_t lock; refcount_t refcnt; + u32 pos; struct timer_list timer; atomic_t genid; @@ -589,6 +590,7 @@ struct xfrm_policy { struct xfrm_lifetime_cur curlft; struct xfrm_policy_walk_entry walk; struct xfrm_policy_queue polq; + bool bydst_reinsert; u8 type; u8 action; u8 flags; @@ -596,6 +598,7 @@ struct xfrm_policy { u16 family; struct xfrm_sec_ctx *security; struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; + struct hlist_node bydst_inexact_list; struct rcu_head rcu; }; @@ -1093,7 +1096,6 @@ struct xfrm_offload { }; struct sec_path { - refcount_t refcnt; int len; int olen; @@ -1101,41 +1103,13 @@ struct sec_path { struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH]; }; -static inline int secpath_exists(struct sk_buff *skb) -{ -#ifdef CONFIG_XFRM - return skb->sp != NULL; -#else - return 0; -#endif -} - -static inline struct sec_path * -secpath_get(struct sec_path *sp) -{ - if (sp) - refcount_inc(&sp->refcnt); - return sp; -} - -void __secpath_destroy(struct sec_path *sp); - -static inline void -secpath_put(struct sec_path *sp) -{ - if (sp && refcount_dec_and_test(&sp->refcnt)) - __secpath_destroy(sp); -} - -struct sec_path *secpath_dup(struct sec_path *src); -int secpath_set(struct sk_buff *skb); +struct sec_path *secpath_set(struct sk_buff *skb); static inline void secpath_reset(struct sk_buff *skb) { #ifdef CONFIG_XFRM - secpath_put(skb->sp); - skb->sp = NULL; + skb_ext_del(skb, SKB_EXT_SEC_PATH); #endif } @@ -1191,7 +1165,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir, if (sk && sk->sk_policy[XFRM_POLICY_IN]) return __xfrm_policy_check(sk, ndir, skb, family); - return (!net->xfrm.policy_count[dir] && !skb->sp) || + return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) || (skb_dst(skb)->flags & DST_NOPOLICY) || __xfrm_policy_check(sk, ndir, skb, family); } @@ -1552,6 +1526,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, int (*func)(struct xfrm_state *, int, void*), void *); void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net); struct xfrm_state *xfrm_state_alloc(struct net *net); +void xfrm_state_free(struct xfrm_state *x); struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, const struct flowi *fl, @@ -1902,14 +1877,16 @@ static inline void xfrm_states_delete(struct xfrm_state **states, int n) #ifdef CONFIG_XFRM static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb) { - return skb->sp->xvec[skb->sp->len - 1]; + struct sec_path *sp = skb_sec_path(skb); + + return sp->xvec[sp->len - 1]; } #endif static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb) { #ifdef CONFIG_XFRM - struct sec_path *sp = skb->sp; + struct sec_path *sp = skb_sec_path(skb); if (!sp || !sp->olen || sp->len != sp->olen) return NULL; @@ -1967,7 +1944,7 @@ static inline void xfrm_dev_state_delete(struct xfrm_state *x) static inline void xfrm_dev_state_free(struct xfrm_state *x) { struct xfrm_state_offload *xso = &x->xso; - struct net_device *dev = xso->dev; + struct net_device *dev = xso->dev; if (dev && dev->xfrmdev_ops) { if (dev->xfrmdev_ops->xdo_dev_state_free) diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h index f62b842e6596..f8982e4e9702 100644 --- a/include/rdma/ib_fmr_pool.h +++ b/include/rdma/ib_fmr_pool.h @@ -88,6 +88,6 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, int list_len, u64 io_virtual_address); -int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr); +void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr); #endif /* IB_FMR_POOL_H */ diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index f6ba366051c7..fdef558e3a2d 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -277,6 +277,7 @@ enum ib_port_capability_mask_bits { IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, + IB_PORT_CAP_MASK2_SUP = 1 << 15, IB_PORT_CM_SUP = 1 << 16, IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, IB_PORT_REINIT_SUP = 1 << 18, @@ -295,6 +296,15 @@ enum ib_port_capability_mask_bits { IB_PORT_HIERARCHY_INFO_SUP = 1ULL << 31, }; +enum ib_port_capability_mask2_bits { + IB_PORT_SET_NODE_DESC_SUP = 1 << 0, + IB_PORT_EX_PORT_INFO_EX_SUP = 1 << 1, + IB_PORT_VIRT_SUP = 1 << 2, + IB_PORT_SWITCH_PORT_STATE_TABLE_SUP = 1 << 3, + IB_PORT_LINK_WIDTH_2X_SUP = 1 << 4, + IB_PORT_LINK_SPEED_HDR_SUP = 1 << 5, +}; + #define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26) struct opa_class_port_info { diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 9c0c2132a2d6..a3ceed3a040a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -41,14 +41,11 @@ #include <linux/types.h> #include <linux/device.h> -#include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/rwsem.h> -#include <linux/scatterlist.h> #include <linux/workqueue.h> -#include <linux/socket.h> #include <linux/irq_poll.h> #include <uapi/linux/if_ether.h> #include <net/ipv6.h> @@ -56,7 +53,7 @@ #include <linux/string.h> #include <linux/slab.h> #include <linux/netdevice.h> - +#include <linux/refcount.h> #include <linux/if_link.h> #include <linux/atomic.h> #include <linux/mmu_notifier.h> @@ -437,6 +434,7 @@ enum ib_port_state { enum ib_port_width { IB_WIDTH_1X = 1, + IB_WIDTH_2X = 16, IB_WIDTH_4X = 2, IB_WIDTH_8X = 4, IB_WIDTH_12X = 8 @@ -446,6 +444,7 @@ static inline int ib_width_enum_to_int(enum ib_port_width width) { switch (width) { case IB_WIDTH_1X: return 1; + case IB_WIDTH_2X: return 2; case IB_WIDTH_4X: return 4; case IB_WIDTH_8X: return 8; case IB_WIDTH_12X: return 12; @@ -595,6 +594,7 @@ struct ib_port_attr { u8 active_width; u8 active_speed; u8 phys_state; + u16 port_cap_flags2; }; enum ib_device_modify_flags { @@ -732,7 +732,11 @@ enum ib_rate { IB_RATE_25_GBPS = 15, IB_RATE_100_GBPS = 16, IB_RATE_200_GBPS = 17, - IB_RATE_300_GBPS = 18 + IB_RATE_300_GBPS = 18, + IB_RATE_28_GBPS = 19, + IB_RATE_50_GBPS = 20, + IB_RATE_400_GBPS = 21, + IB_RATE_600_GBPS = 22, }; /** @@ -1508,6 +1512,10 @@ struct ib_ucontext { #endif struct ib_rdmacg_object cg_obj; + /* + * Implementation details of the RDMA core, don't use in drivers: + */ + struct rdma_restrack_entry res; }; struct ib_uobject { @@ -2256,82 +2264,86 @@ struct ib_counters_read_attr { struct uverbs_attr_bundle; -struct ib_device { - /* Do not access @dma_device directly from ULP nor from HW drivers. */ - struct device *dma_device; - - char name[IB_DEVICE_NAME_MAX]; - - struct list_head event_handler_list; - spinlock_t event_handler_lock; - - rwlock_t client_data_lock; - struct list_head core_list; - /* Access to the client_data_list is protected by the client_data_lock - * rwlock and the lists_rwsem read-write semaphore - */ - struct list_head client_data_list; - - struct ib_cache cache; - /** - * port_immutable is indexed by port number - */ - struct ib_port_immutable *port_immutable; - - int num_comp_vectors; - - struct ib_port_pkey_list *port_pkey_list; - - struct iw_cm_verbs *iwcm; - +/** + * struct ib_device_ops - InfiniBand device operations + * This structure defines all the InfiniBand device operations, providers will + * need to define the supported operations, otherwise they will be set to null. + */ +struct ib_device_ops { + int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr, + const struct ib_send_wr **bad_send_wr); + int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, + const struct ib_recv_wr **bad_recv_wr); + void (*drain_rq)(struct ib_qp *qp); + void (*drain_sq)(struct ib_qp *qp); + int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc); + int (*peek_cq)(struct ib_cq *cq, int wc_cnt); + int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags); + int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt); + int (*post_srq_recv)(struct ib_srq *srq, + const struct ib_recv_wr *recv_wr, + const struct ib_recv_wr **bad_recv_wr); + int (*process_mad)(struct ib_device *device, int process_mad_flags, + u8 port_num, const struct ib_wc *in_wc, + const struct ib_grh *in_grh, + const struct ib_mad_hdr *in_mad, size_t in_mad_size, + struct ib_mad_hdr *out_mad, size_t *out_mad_size, + u16 *out_mad_pkey_index); + int (*query_device)(struct ib_device *device, + struct ib_device_attr *device_attr, + struct ib_udata *udata); + int (*modify_device)(struct ib_device *device, int device_modify_mask, + struct ib_device_modify *device_modify); + void (*get_dev_fw_str)(struct ib_device *device, char *str); + const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev, + int comp_vector); + int (*query_port)(struct ib_device *device, u8 port_num, + struct ib_port_attr *port_attr); + int (*modify_port)(struct ib_device *device, u8 port_num, + int port_modify_mask, + struct ib_port_modify *port_modify); /** - * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the - * driver initialized data. The struct is kfree()'ed by the sysfs - * core when the device is removed. A lifespan of -1 in the return - * struct tells the core to set a default lifespan. + * The following mandatory functions are used only at device + * registration. Keep functions such as these at the end of this + * structure to avoid cache line misses when accessing struct ib_device + * in fast paths. */ - struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, - u8 port_num); + int (*get_port_immutable)(struct ib_device *device, u8 port_num, + struct ib_port_immutable *immutable); + enum rdma_link_layer (*get_link_layer)(struct ib_device *device, + u8 port_num); /** - * get_hw_stats - Fill in the counter value(s) in the stats struct. - * @index - The index in the value array we wish to have updated, or - * num_counters if we want all stats updated - * Return codes - - * < 0 - Error, no counters updated - * index - Updated the single counter pointed to by index - * num_counters - Updated all counters (will reset the timestamp - * and prevent further calls for lifespan milliseconds) - * Drivers are allowed to update all counters in leiu of just the - * one given in index at their option - */ - int (*get_hw_stats)(struct ib_device *device, - struct rdma_hw_stats *stats, - u8 port, int index); - int (*query_device)(struct ib_device *device, - struct ib_device_attr *device_attr, - struct ib_udata *udata); - int (*query_port)(struct ib_device *device, - u8 port_num, - struct ib_port_attr *port_attr); - enum rdma_link_layer (*get_link_layer)(struct ib_device *device, - u8 port_num); - /* When calling get_netdev, the HW vendor's driver should return the + * When calling get_netdev, the HW vendor's driver should return the * net device of device @device at port @port_num or NULL if such * a net device doesn't exist. The vendor driver should call dev_hold * on this net device. The HW vendor's device driver must guarantee * that this function returns NULL before the net device has finished * NETDEV_UNREGISTER state. */ - struct net_device *(*get_netdev)(struct ib_device *device, - u8 port_num); - /* query_gid should be return GID value for @device, when @port_num + struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num); + /** + * rdma netdev operation + * + * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params + * must return -EOPNOTSUPP if it doesn't support the specified type. + */ + struct net_device *(*alloc_rdma_netdev)( + struct ib_device *device, u8 port_num, enum rdma_netdev_t type, + const char *name, unsigned char name_assign_type, + void (*setup)(struct net_device *)); + + int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num, + enum rdma_netdev_t type, + struct rdma_netdev_alloc_params *params); + /** + * query_gid should be return GID value for @device, when @port_num * link layer is either IB or iWarp. It is no-op if @port_num port * is RoCE link layer. */ - int (*query_gid)(struct ib_device *device, - u8 port_num, int index, - union ib_gid *gid); - /* When calling add_gid, the HW vendor's driver should add the gid + int (*query_gid)(struct ib_device *device, u8 port_num, int index, + union ib_gid *gid); + /** + * When calling add_gid, the HW vendor's driver should add the gid * of device of port at gid index available at @attr. Meta-info of * that gid (for example, the network device related to this gid) is * available at @attr. @context allows the HW vendor driver to store @@ -2343,213 +2355,186 @@ struct ib_device { * concurrently for different ports. This function is only called when * roce_gid_table is used. */ - int (*add_gid)(const struct ib_gid_attr *attr, - void **context); - /* When calling del_gid, the HW vendor's driver should delete the + int (*add_gid)(const struct ib_gid_attr *attr, void **context); + /** + * When calling del_gid, the HW vendor's driver should delete the * gid of device @device at gid index gid_index of port port_num * available in @attr. * Upon the deletion of a GID entry, the HW vendor must free any * allocated memory. The caller will clear @context afterwards. * This function is only called when roce_gid_table is used. */ - int (*del_gid)(const struct ib_gid_attr *attr, - void **context); - int (*query_pkey)(struct ib_device *device, - u8 port_num, u16 index, u16 *pkey); - int (*modify_device)(struct ib_device *device, - int device_modify_mask, - struct ib_device_modify *device_modify); - int (*modify_port)(struct ib_device *device, - u8 port_num, int port_modify_mask, - struct ib_port_modify *port_modify); - struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, - struct ib_udata *udata); - int (*dealloc_ucontext)(struct ib_ucontext *context); - int (*mmap)(struct ib_ucontext *context, - struct vm_area_struct *vma); - struct ib_pd * (*alloc_pd)(struct ib_device *device, - struct ib_ucontext *context, - struct ib_udata *udata); - int (*dealloc_pd)(struct ib_pd *pd); - struct ib_ah * (*create_ah)(struct ib_pd *pd, - struct rdma_ah_attr *ah_attr, - struct ib_udata *udata); - int (*modify_ah)(struct ib_ah *ah, - struct rdma_ah_attr *ah_attr); - int (*query_ah)(struct ib_ah *ah, - struct rdma_ah_attr *ah_attr); - int (*destroy_ah)(struct ib_ah *ah); - struct ib_srq * (*create_srq)(struct ib_pd *pd, - struct ib_srq_init_attr *srq_init_attr, - struct ib_udata *udata); - int (*modify_srq)(struct ib_srq *srq, - struct ib_srq_attr *srq_attr, - enum ib_srq_attr_mask srq_attr_mask, - struct ib_udata *udata); - int (*query_srq)(struct ib_srq *srq, - struct ib_srq_attr *srq_attr); - int (*destroy_srq)(struct ib_srq *srq); - int (*post_srq_recv)(struct ib_srq *srq, - const struct ib_recv_wr *recv_wr, - const struct ib_recv_wr **bad_recv_wr); - struct ib_qp * (*create_qp)(struct ib_pd *pd, - struct ib_qp_init_attr *qp_init_attr, - struct ib_udata *udata); - int (*modify_qp)(struct ib_qp *qp, - struct ib_qp_attr *qp_attr, - int qp_attr_mask, - struct ib_udata *udata); - int (*query_qp)(struct ib_qp *qp, - struct ib_qp_attr *qp_attr, - int qp_attr_mask, - struct ib_qp_init_attr *qp_init_attr); - int (*destroy_qp)(struct ib_qp *qp); - int (*post_send)(struct ib_qp *qp, - const struct ib_send_wr *send_wr, - const struct ib_send_wr **bad_send_wr); - int (*post_recv)(struct ib_qp *qp, - const struct ib_recv_wr *recv_wr, - const struct ib_recv_wr **bad_recv_wr); - struct ib_cq * (*create_cq)(struct ib_device *device, - const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, - struct ib_udata *udata); - int (*modify_cq)(struct ib_cq *cq, u16 cq_count, - u16 cq_period); - int (*destroy_cq)(struct ib_cq *cq); - int (*resize_cq)(struct ib_cq *cq, int cqe, - struct ib_udata *udata); - int (*poll_cq)(struct ib_cq *cq, int num_entries, - struct ib_wc *wc); - int (*peek_cq)(struct ib_cq *cq, int wc_cnt); - int (*req_notify_cq)(struct ib_cq *cq, - enum ib_cq_notify_flags flags); - int (*req_ncomp_notif)(struct ib_cq *cq, - int wc_cnt); - struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, - int mr_access_flags); - struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, - u64 start, u64 length, - u64 virt_addr, - int mr_access_flags, - struct ib_udata *udata); - int (*rereg_user_mr)(struct ib_mr *mr, - int flags, - u64 start, u64 length, - u64 virt_addr, - int mr_access_flags, - struct ib_pd *pd, - struct ib_udata *udata); - int (*dereg_mr)(struct ib_mr *mr); - struct ib_mr * (*alloc_mr)(struct ib_pd *pd, - enum ib_mr_type mr_type, - u32 max_num_sg); - int (*map_mr_sg)(struct ib_mr *mr, - struct scatterlist *sg, - int sg_nents, - unsigned int *sg_offset); - struct ib_mw * (*alloc_mw)(struct ib_pd *pd, - enum ib_mw_type type, - struct ib_udata *udata); - int (*dealloc_mw)(struct ib_mw *mw); - struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, - int mr_access_flags, - struct ib_fmr_attr *fmr_attr); - int (*map_phys_fmr)(struct ib_fmr *fmr, - u64 *page_list, int list_len, - u64 iova); - int (*unmap_fmr)(struct list_head *fmr_list); - int (*dealloc_fmr)(struct ib_fmr *fmr); - int (*attach_mcast)(struct ib_qp *qp, - union ib_gid *gid, - u16 lid); - int (*detach_mcast)(struct ib_qp *qp, - union ib_gid *gid, - u16 lid); - int (*process_mad)(struct ib_device *device, - int process_mad_flags, - u8 port_num, - const struct ib_wc *in_wc, - const struct ib_grh *in_grh, - const struct ib_mad_hdr *in_mad, - size_t in_mad_size, - struct ib_mad_hdr *out_mad, - size_t *out_mad_size, - u16 *out_mad_pkey_index); - struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, - struct ib_ucontext *ucontext, - struct ib_udata *udata); - int (*dealloc_xrcd)(struct ib_xrcd *xrcd); - struct ib_flow * (*create_flow)(struct ib_qp *qp, - struct ib_flow_attr - *flow_attr, - int domain, - struct ib_udata *udata); - int (*destroy_flow)(struct ib_flow *flow_id); - int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, - struct ib_mr_status *mr_status); - void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); - void (*drain_rq)(struct ib_qp *qp); - void (*drain_sq)(struct ib_qp *qp); - int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, - int state); - int (*get_vf_config)(struct ib_device *device, int vf, u8 port, - struct ifla_vf_info *ivf); - int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, - struct ifla_vf_stats *stats); - int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, - int type); - struct ib_wq * (*create_wq)(struct ib_pd *pd, - struct ib_wq_init_attr *init_attr, - struct ib_udata *udata); - int (*destroy_wq)(struct ib_wq *wq); - int (*modify_wq)(struct ib_wq *wq, - struct ib_wq_attr *attr, - u32 wq_attr_mask, - struct ib_udata *udata); - struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device, - struct ib_rwq_ind_table_init_attr *init_attr, - struct ib_udata *udata); - int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); - struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *device, - const struct ib_flow_action_attrs_esp *attr, - struct uverbs_attr_bundle *attrs); - int (*destroy_flow_action)(struct ib_flow_action *action); - int (*modify_flow_action_esp)(struct ib_flow_action *action, - const struct ib_flow_action_attrs_esp *attr, - struct uverbs_attr_bundle *attrs); - struct ib_dm * (*alloc_dm)(struct ib_device *device, - struct ib_ucontext *context, - struct ib_dm_alloc_attr *attr, - struct uverbs_attr_bundle *attrs); - int (*dealloc_dm)(struct ib_dm *dm); - struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, - struct ib_dm_mr_attr *attr, - struct uverbs_attr_bundle *attrs); - struct ib_counters * (*create_counters)(struct ib_device *device, - struct uverbs_attr_bundle *attrs); - int (*destroy_counters)(struct ib_counters *counters); - int (*read_counters)(struct ib_counters *counters, - struct ib_counters_read_attr *counters_read_attr, - struct uverbs_attr_bundle *attrs); + int (*del_gid)(const struct ib_gid_attr *attr, void **context); + int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index, + u16 *pkey); + struct ib_ucontext *(*alloc_ucontext)(struct ib_device *device, + struct ib_udata *udata); + int (*dealloc_ucontext)(struct ib_ucontext *context); + int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma); + void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); + struct ib_pd *(*alloc_pd)(struct ib_device *device, + struct ib_ucontext *context, + struct ib_udata *udata); + int (*dealloc_pd)(struct ib_pd *pd); + struct ib_ah *(*create_ah)(struct ib_pd *pd, + struct rdma_ah_attr *ah_attr, u32 flags, + struct ib_udata *udata); + int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); + int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); + int (*destroy_ah)(struct ib_ah *ah, u32 flags); + struct ib_srq *(*create_srq)(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr, + struct ib_udata *udata); + int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr, + enum ib_srq_attr_mask srq_attr_mask, + struct ib_udata *udata); + int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); + int (*destroy_srq)(struct ib_srq *srq); + struct ib_qp *(*create_qp)(struct ib_pd *pd, + struct ib_qp_init_attr *qp_init_attr, + struct ib_udata *udata); + int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, + int qp_attr_mask, struct ib_udata *udata); + int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, + int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); + int (*destroy_qp)(struct ib_qp *qp); + struct ib_cq *(*create_cq)(struct ib_device *device, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, + struct ib_udata *udata); + int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); + int (*destroy_cq)(struct ib_cq *cq); + int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); + struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); + struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int mr_access_flags, + struct ib_udata *udata); + int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length, + u64 virt_addr, int mr_access_flags, + struct ib_pd *pd, struct ib_udata *udata); + int (*dereg_mr)(struct ib_mr *mr); + struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg); + int (*advise_mr)(struct ib_pd *pd, + enum ib_uverbs_advise_mr_advice advice, u32 flags, + struct ib_sge *sg_list, u32 num_sge, + struct uverbs_attr_bundle *attrs); + int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, + unsigned int *sg_offset); + int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, + struct ib_mr_status *mr_status); + struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type, + struct ib_udata *udata); + int (*dealloc_mw)(struct ib_mw *mw); + struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags, + struct ib_fmr_attr *fmr_attr); + int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len, + u64 iova); + int (*unmap_fmr)(struct list_head *fmr_list); + int (*dealloc_fmr)(struct ib_fmr *fmr); + int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); + int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); + struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device, + struct ib_ucontext *ucontext, + struct ib_udata *udata); + int (*dealloc_xrcd)(struct ib_xrcd *xrcd); + struct ib_flow *(*create_flow)(struct ib_qp *qp, + struct ib_flow_attr *flow_attr, + int domain, struct ib_udata *udata); + int (*destroy_flow)(struct ib_flow *flow_id); + struct ib_flow_action *(*create_flow_action_esp)( + struct ib_device *device, + const struct ib_flow_action_attrs_esp *attr, + struct uverbs_attr_bundle *attrs); + int (*destroy_flow_action)(struct ib_flow_action *action); + int (*modify_flow_action_esp)( + struct ib_flow_action *action, + const struct ib_flow_action_attrs_esp *attr, + struct uverbs_attr_bundle *attrs); + int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, + int state); + int (*get_vf_config)(struct ib_device *device, int vf, u8 port, + struct ifla_vf_info *ivf); + int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, + struct ifla_vf_stats *stats); + int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, + int type); + struct ib_wq *(*create_wq)(struct ib_pd *pd, + struct ib_wq_init_attr *init_attr, + struct ib_udata *udata); + int (*destroy_wq)(struct ib_wq *wq); + int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr, + u32 wq_attr_mask, struct ib_udata *udata); + struct ib_rwq_ind_table *(*create_rwq_ind_table)( + struct ib_device *device, + struct ib_rwq_ind_table_init_attr *init_attr, + struct ib_udata *udata); + int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); + struct ib_dm *(*alloc_dm)(struct ib_device *device, + struct ib_ucontext *context, + struct ib_dm_alloc_attr *attr, + struct uverbs_attr_bundle *attrs); + int (*dealloc_dm)(struct ib_dm *dm); + struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, + struct ib_dm_mr_attr *attr, + struct uverbs_attr_bundle *attrs); + struct ib_counters *(*create_counters)( + struct ib_device *device, struct uverbs_attr_bundle *attrs); + int (*destroy_counters)(struct ib_counters *counters); + int (*read_counters)(struct ib_counters *counters, + struct ib_counters_read_attr *counters_read_attr, + struct uverbs_attr_bundle *attrs); + /** + * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the + * driver initialized data. The struct is kfree()'ed by the sysfs + * core when the device is removed. A lifespan of -1 in the return + * struct tells the core to set a default lifespan. + */ + struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, + u8 port_num); + /** + * get_hw_stats - Fill in the counter value(s) in the stats struct. + * @index - The index in the value array we wish to have updated, or + * num_counters if we want all stats updated + * Return codes - + * < 0 - Error, no counters updated + * index - Updated the single counter pointed to by index + * num_counters - Updated all counters (will reset the timestamp + * and prevent further calls for lifespan milliseconds) + * Drivers are allowed to update all counters in leiu of just the + * one given in index at their option + */ + int (*get_hw_stats)(struct ib_device *device, + struct rdma_hw_stats *stats, u8 port, int index); +}; + +struct ib_device { + /* Do not access @dma_device directly from ULP nor from HW drivers. */ + struct device *dma_device; + struct ib_device_ops ops; + char name[IB_DEVICE_NAME_MAX]; + + struct list_head event_handler_list; + spinlock_t event_handler_lock; + + rwlock_t client_data_lock; + struct list_head core_list; + /* Access to the client_data_list is protected by the client_data_lock + * rwlock and the lists_rwsem read-write semaphore + */ + struct list_head client_data_list; + struct ib_cache cache; /** - * rdma netdev operation - * - * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params - * must return -EOPNOTSUPP if it doesn't support the specified type. + * port_immutable is indexed by port number */ - struct net_device *(*alloc_rdma_netdev)( - struct ib_device *device, - u8 port_num, - enum rdma_netdev_t type, - const char *name, - unsigned char name_assign_type, - void (*setup)(struct net_device *)); + struct ib_port_immutable *port_immutable; - int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num, - enum rdma_netdev_t type, - struct rdma_netdev_alloc_params *params); + int num_comp_vectors; + + struct ib_port_pkey_list *port_pkey_list; + + struct iw_cm_verbs *iwcm; struct module *owner; struct device dev; @@ -2592,19 +2577,14 @@ struct ib_device { */ struct rdma_restrack_root res; - /** - * The following mandatory functions are used only at device - * registration. Keep functions such as these at the end of this - * structure to avoid cache line misses when accessing struct ib_device - * in fast paths. - */ - int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); - void (*get_dev_fw_str)(struct ib_device *, char *str); - const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev, - int comp_vector); - - const struct uverbs_object_tree_def *const *driver_specs; + const struct uapi_definition *driver_def; enum rdma_driver_id driver_id; + /* + * Provides synchronization between device unregistration and netlink + * commands on a device. To be used only by core. + */ + refcount_t refcount; + struct completion unreg_completion; }; struct ib_client { @@ -2653,6 +2633,8 @@ void ib_unregister_client(struct ib_client *client); void *ib_get_client_data(struct ib_device *device, struct ib_client *client); void ib_set_client_data(struct ib_device *device, struct ib_client *client, void *data); +void ib_set_device_ops(struct ib_device *device, + const struct ib_device_ops *ops); #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, @@ -3109,7 +3091,7 @@ static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, u8 port_num) { return rdma_protocol_roce(device, port_num) && - device->add_gid && device->del_gid; + device->ops.add_gid && device->ops.del_gid; } /* @@ -3169,15 +3151,22 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, __ib_alloc_pd((device), (flags), KBUILD_MODNAME) void ib_dealloc_pd(struct ib_pd *pd); +enum rdma_create_ah_flags { + /* In a sleepable context */ + RDMA_CREATE_AH_SLEEPABLE = BIT(0), +}; + /** * rdma_create_ah - Creates an address handle for the given address vector. * @pd: The protection domain associated with the address handle. * @ah_attr: The attributes of the address vector. + * @flags: Create address handle flags (see enum rdma_create_ah_flags). * * The address handle is used to reference a local or global destination * in all UD QP post sends. */ -struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr); +struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, + u32 flags); /** * rdma_create_user_ah - Creates an address handle for the given address vector. @@ -3267,11 +3256,17 @@ int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); */ int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); +enum rdma_destroy_ah_flags { + /* In a sleepable context */ + RDMA_DESTROY_AH_SLEEPABLE = BIT(0), +}; + /** * rdma_destroy_ah - Destroys an address handle. * @ah: The address handle to destroy. + * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). */ -int rdma_destroy_ah(struct ib_ah *ah); +int rdma_destroy_ah(struct ib_ah *ah, u32 flags); /** * ib_create_srq - Creates a SRQ associated with the specified protection @@ -3333,7 +3328,8 @@ static inline int ib_post_srq_recv(struct ib_srq *srq, { const struct ib_recv_wr *dummy; - return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr ? : &dummy); + return srq->device->ops.post_srq_recv(srq, recv_wr, + bad_recv_wr ? : &dummy); } /** @@ -3436,7 +3432,7 @@ static inline int ib_post_send(struct ib_qp *qp, { const struct ib_send_wr *dummy; - return qp->device->post_send(qp, send_wr, bad_send_wr ? : &dummy); + return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy); } /** @@ -3453,7 +3449,7 @@ static inline int ib_post_recv(struct ib_qp *qp, { const struct ib_recv_wr *dummy; - return qp->device->post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); + return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); } struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, @@ -3526,7 +3522,7 @@ int ib_destroy_cq(struct ib_cq *cq); static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) { - return cq->device->poll_cq(cq, num_entries, wc); + return cq->device->ops.poll_cq(cq, num_entries, wc); } /** @@ -3559,7 +3555,7 @@ static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, static inline int ib_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags) { - return cq->device->req_notify_cq(cq, flags); + return cq->device->ops.req_notify_cq(cq, flags); } /** @@ -3571,8 +3567,8 @@ static inline int ib_req_notify_cq(struct ib_cq *cq, */ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) { - return cq->device->req_ncomp_notif ? - cq->device->req_ncomp_notif(cq, wc_cnt) : + return cq->device->ops.req_ncomp_notif ? + cq->device->ops.req_ncomp_notif(cq, wc_cnt) : -ENOSYS; } @@ -3836,7 +3832,7 @@ static inline int ib_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, u64 iova) { - return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); + return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova); } /** @@ -4189,10 +4185,10 @@ static inline const struct cpumask * ib_get_vector_affinity(struct ib_device *device, int comp_vector) { if (comp_vector < 0 || comp_vector >= device->num_comp_vectors || - !device->get_vector_affinity) + !device->ops.get_vector_affinity) return NULL; - return device->get_vector_affinity(device, comp_vector); + return device->ops.get_vector_affinity(device, comp_vector); } @@ -4204,10 +4200,10 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector) */ void rdma_roce_rescan_device(struct ib_device *ibdev); -struct ib_ucontext *ib_uverbs_get_ucontext(struct ib_uverbs_file *ufile); +struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile); + -int uverbs_destroy_def_handler(struct ib_uverbs_file *file, - struct uverbs_attr_bundle *attrs); +int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs); struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, enum rdma_netdev_t type, const char *name, diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 3584d0816fcd..dd0ed8048bb4 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -269,6 +269,13 @@ struct rvt_driver_provided { void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp); /* + * Init a struture allocated with qp_priv_alloc(). This should be + * called after all qp fields have been initialized in rdmavt. + */ + int (*qp_priv_init)(struct rvt_dev_info *rdi, struct rvt_qp *qp, + struct ib_qp_init_attr *init_attr); + + /* * Free the driver's private qp structure. */ void (*qp_priv_free)(struct rvt_dev_info *rdi, struct rvt_qp *qp); diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h index 2638fa7cd702..8f179be9d9a9 100644 --- a/include/rdma/restrack.h +++ b/include/rdma/restrack.h @@ -39,6 +39,10 @@ enum rdma_restrack_type { */ RDMA_RESTRACK_MR, /** + * @RDMA_RESTRACK_CTX: Verbs contexts (CTX) + */ + RDMA_RESTRACK_CTX, + /** * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations */ RDMA_RESTRACK_MAX @@ -112,6 +116,10 @@ struct rdma_restrack_entry { * @type: various objects in restrack database */ enum rdma_restrack_type type; + /** + * @user: user resource + */ + bool user; }; /** @@ -136,11 +144,8 @@ int rdma_restrack_count(struct rdma_restrack_root *res, enum rdma_restrack_type type, struct pid_namespace *ns); -/** - * rdma_restrack_add() - add object to the reource tracking database - * @res: resource entry - */ -void rdma_restrack_add(struct rdma_restrack_entry *res); +void rdma_restrack_kadd(struct rdma_restrack_entry *res); +void rdma_restrack_uadd(struct rdma_restrack_entry *res); /** * rdma_restrack_del() - delete object from the reource tracking database @@ -155,7 +160,7 @@ void rdma_restrack_del(struct rdma_restrack_entry *res); */ static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res) { - return !res->task; + return !res->user; } /** diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h index 84d3d15f1f38..27da906beea7 100644 --- a/include/rdma/uverbs_ioctl.h +++ b/include/rdma/uverbs_ioctl.h @@ -79,6 +79,8 @@ struct uverbs_attr_spec { */ u8 alloc_and_copy:1; u8 mandatory:1; + /* True if this is from UVERBS_ATTR_UHW */ + u8 is_udata:1; union { struct { @@ -140,6 +142,13 @@ struct uverbs_attr_spec { * * The tree encodes multiple types, and uses a scheme where OBJ_ID,0,0 returns * the object slot, and OBJ_ID,METH_ID,0 and returns the method slot. + * + * This also encodes the tables for the write() and write() extended commands + * using the coding + * OBJ_ID,UVERBS_API_METHOD_IS_WRITE,command # + * OBJ_ID,UVERBS_API_METHOD_IS_WRITE_EX,command_ex # + * ie the WRITE path is treated as a special method type in the ioctl + * framework. */ enum uapi_radix_data { UVERBS_API_NS_FLAG = 1U << UVERBS_ID_NS_SHIFT, @@ -147,12 +156,16 @@ enum uapi_radix_data { UVERBS_API_ATTR_KEY_BITS = 6, UVERBS_API_ATTR_KEY_MASK = GENMASK(UVERBS_API_ATTR_KEY_BITS - 1, 0), UVERBS_API_ATTR_BKEY_LEN = (1 << UVERBS_API_ATTR_KEY_BITS) - 1, + UVERBS_API_WRITE_KEY_NUM = 1 << UVERBS_API_ATTR_KEY_BITS, UVERBS_API_METHOD_KEY_BITS = 5, UVERBS_API_METHOD_KEY_SHIFT = UVERBS_API_ATTR_KEY_BITS, - UVERBS_API_METHOD_KEY_NUM_CORE = 24, - UVERBS_API_METHOD_KEY_NUM_DRIVER = (1 << UVERBS_API_METHOD_KEY_BITS) - - UVERBS_API_METHOD_KEY_NUM_CORE, + UVERBS_API_METHOD_KEY_NUM_CORE = 22, + UVERBS_API_METHOD_IS_WRITE = 30 << UVERBS_API_METHOD_KEY_SHIFT, + UVERBS_API_METHOD_IS_WRITE_EX = 31 << UVERBS_API_METHOD_KEY_SHIFT, + UVERBS_API_METHOD_KEY_NUM_DRIVER = + (UVERBS_API_METHOD_IS_WRITE >> UVERBS_API_METHOD_KEY_SHIFT) - + UVERBS_API_METHOD_KEY_NUM_CORE, UVERBS_API_METHOD_KEY_MASK = GENMASK( UVERBS_API_METHOD_KEY_BITS + UVERBS_API_METHOD_KEY_SHIFT - 1, UVERBS_API_METHOD_KEY_SHIFT), @@ -205,7 +218,22 @@ static inline __attribute_const__ u32 uapi_key_ioctl_method(u32 id) return id << UVERBS_API_METHOD_KEY_SHIFT; } -static inline __attribute_const__ u32 uapi_key_attr_to_method(u32 attr_key) +static inline __attribute_const__ u32 uapi_key_write_method(u32 id) +{ + if (id >= UVERBS_API_WRITE_KEY_NUM) + return UVERBS_API_KEY_ERR; + return UVERBS_API_METHOD_IS_WRITE | id; +} + +static inline __attribute_const__ u32 uapi_key_write_ex_method(u32 id) +{ + if (id >= UVERBS_API_WRITE_KEY_NUM) + return UVERBS_API_KEY_ERR; + return UVERBS_API_METHOD_IS_WRITE_EX | id; +} + +static inline __attribute_const__ u32 +uapi_key_attr_to_ioctl_method(u32 attr_key) { return attr_key & (UVERBS_API_OBJ_KEY_MASK | UVERBS_API_METHOD_KEY_MASK); @@ -213,10 +241,23 @@ static inline __attribute_const__ u32 uapi_key_attr_to_method(u32 attr_key) static inline __attribute_const__ bool uapi_key_is_ioctl_method(u32 key) { - return (key & UVERBS_API_METHOD_KEY_MASK) != 0 && + unsigned int method = key & UVERBS_API_METHOD_KEY_MASK; + + return method != 0 && method < UVERBS_API_METHOD_IS_WRITE && (key & UVERBS_API_ATTR_KEY_MASK) == 0; } +static inline __attribute_const__ bool uapi_key_is_write_method(u32 key) +{ + return (key & UVERBS_API_METHOD_KEY_MASK) == UVERBS_API_METHOD_IS_WRITE; +} + +static inline __attribute_const__ bool uapi_key_is_write_ex_method(u32 key) +{ + return (key & UVERBS_API_METHOD_KEY_MASK) == + UVERBS_API_METHOD_IS_WRITE_EX; +} + static inline __attribute_const__ u32 uapi_key_attrs_start(u32 ioctl_method_key) { /* 0 is the method slot itself */ @@ -246,9 +287,12 @@ static inline __attribute_const__ u32 uapi_key_attr(u32 id) return id; } +/* Only true for ioctl methods */ static inline __attribute_const__ bool uapi_key_is_attr(u32 key) { - return (key & UVERBS_API_METHOD_KEY_MASK) != 0 && + unsigned int method = key & UVERBS_API_METHOD_KEY_MASK; + + return method != 0 && method < UVERBS_API_METHOD_IS_WRITE && (key & UVERBS_API_ATTR_KEY_MASK) != 0; } @@ -285,8 +329,7 @@ struct uverbs_method_def { u32 flags; size_t num_attrs; const struct uverbs_attr_def * const (*attrs)[]; - int (*handler)(struct ib_uverbs_file *ufile, - struct uverbs_attr_bundle *ctx); + int (*handler)(struct uverbs_attr_bundle *attrs); }; struct uverbs_object_def { @@ -296,11 +339,132 @@ struct uverbs_object_def { const struct uverbs_method_def * const (*methods)[]; }; -struct uverbs_object_tree_def { - size_t num_objects; - const struct uverbs_object_def * const (*objects)[]; +enum uapi_definition_kind { + UAPI_DEF_END = 0, + UAPI_DEF_OBJECT_START, + UAPI_DEF_WRITE, + UAPI_DEF_CHAIN_OBJ_TREE, + UAPI_DEF_CHAIN, + UAPI_DEF_IS_SUPPORTED_FUNC, + UAPI_DEF_IS_SUPPORTED_DEV_FN, +}; + +enum uapi_definition_scope { + UAPI_SCOPE_OBJECT = 1, + UAPI_SCOPE_METHOD = 2, }; +struct uapi_definition { + u8 kind; + u8 scope; + union { + struct { + u16 object_id; + } object_start; + struct { + u16 command_num; + u8 is_ex:1; + u8 has_udata:1; + u8 has_resp:1; + u8 req_size; + u8 resp_size; + } write; + }; + + union { + bool (*func_is_supported)(struct ib_device *device); + int (*func_write)(struct uverbs_attr_bundle *attrs); + const struct uapi_definition *chain; + const struct uverbs_object_def *chain_obj_tree; + size_t needs_fn_offset; + }; +}; + +/* Define things connected to object_id */ +#define DECLARE_UVERBS_OBJECT(_object_id, ...) \ + { \ + .kind = UAPI_DEF_OBJECT_START, \ + .object_start = { .object_id = _object_id }, \ + }, \ + ##__VA_ARGS__ + +/* Use in a var_args of DECLARE_UVERBS_OBJECT */ +#define DECLARE_UVERBS_WRITE(_command_num, _func, _cmd_desc, ...) \ + { \ + .kind = UAPI_DEF_WRITE, \ + .scope = UAPI_SCOPE_OBJECT, \ + .write = { .is_ex = 0, .command_num = _command_num }, \ + .func_write = _func, \ + _cmd_desc, \ + }, \ + ##__VA_ARGS__ + +/* Use in a var_args of DECLARE_UVERBS_OBJECT */ +#define DECLARE_UVERBS_WRITE_EX(_command_num, _func, _cmd_desc, ...) \ + { \ + .kind = UAPI_DEF_WRITE, \ + .scope = UAPI_SCOPE_OBJECT, \ + .write = { .is_ex = 1, .command_num = _command_num }, \ + .func_write = _func, \ + _cmd_desc, \ + }, \ + ##__VA_ARGS__ + +/* + * Object is only supported if the function pointer named ibdev_fn in struct + * ib_device is not NULL. + */ +#define UAPI_DEF_OBJ_NEEDS_FN(ibdev_fn) \ + { \ + .kind = UAPI_DEF_IS_SUPPORTED_DEV_FN, \ + .scope = UAPI_SCOPE_OBJECT, \ + .needs_fn_offset = \ + offsetof(struct ib_device_ops, ibdev_fn) + \ + BUILD_BUG_ON_ZERO( \ + sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \ + sizeof(void *)), \ + } + +/* + * Method is only supported if the function pointer named ibdev_fn in struct + * ib_device is not NULL. + */ +#define UAPI_DEF_METHOD_NEEDS_FN(ibdev_fn) \ + { \ + .kind = UAPI_DEF_IS_SUPPORTED_DEV_FN, \ + .scope = UAPI_SCOPE_METHOD, \ + .needs_fn_offset = \ + offsetof(struct ib_device_ops, ibdev_fn) + \ + BUILD_BUG_ON_ZERO( \ + sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \ + sizeof(void *)), \ + } + +/* Call a function to determine if the entire object is supported or not */ +#define UAPI_DEF_IS_OBJ_SUPPORTED(_func) \ + { \ + .kind = UAPI_DEF_IS_SUPPORTED_FUNC, \ + .scope = UAPI_SCOPE_OBJECT, .func_is_supported = _func, \ + } + +/* Include another struct uapi_definition in this one */ +#define UAPI_DEF_CHAIN(_def_var) \ + { \ + .kind = UAPI_DEF_CHAIN, .chain = _def_var, \ + } + +/* Temporary until the tree base description is replaced */ +#define UAPI_DEF_CHAIN_OBJ_TREE(_object_enum, _object_ptr, ...) \ + { \ + .kind = UAPI_DEF_CHAIN_OBJ_TREE, \ + .object_start = { .object_id = _object_enum }, \ + .chain_obj_tree = _object_ptr, \ + }, \ + ##__VA_ARGS__ +#define UAPI_DEF_CHAIN_OBJ_TREE_NAMED(_object_enum, ...) \ + UAPI_DEF_CHAIN_OBJ_TREE(_object_enum, &UVERBS_OBJECT(_object_enum), \ + ##__VA_ARGS__) + /* * ======================================= * Attribute Specifications @@ -361,6 +525,12 @@ struct uverbs_object_tree_def { .u2.objs_arr.max_len = _max_len, \ __VA_ARGS__ } }) +/* + * Only for use with UVERBS_ATTR_IDR, allows any uobject type to be accepted, + * the user must validate the type of the uobject instead. + */ +#define UVERBS_IDR_ANY_OBJECT 0xFFFF + #define UVERBS_ATTR_IDR(_attr_id, _idr_type, _access, ...) \ (&(const struct uverbs_attr_def){ \ .id = _attr_id, \ @@ -433,25 +603,12 @@ struct uverbs_object_tree_def { #define UVERBS_ATTR_UHW() \ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_UHW_IN, \ UVERBS_ATTR_MIN_SIZE(0), \ - UA_OPTIONAL), \ + UA_OPTIONAL, \ + .is_udata = 1), \ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_UHW_OUT, \ UVERBS_ATTR_MIN_SIZE(0), \ - UA_OPTIONAL) - -/* - * ======================================= - * Declaration helpers - * ======================================= - */ - -#define DECLARE_UVERBS_OBJECT_TREE(_name, ...) \ - static const struct uverbs_object_def *const _name##_ptr[] = { \ - __VA_ARGS__, \ - }; \ - static const struct uverbs_object_tree_def _name = { \ - .num_objects = ARRAY_SIZE(_name##_ptr), \ - .objects = &_name##_ptr, \ - } + UA_OPTIONAL, \ + .is_udata = 1) /* ================================================= * Parsing infrastructure @@ -492,6 +649,8 @@ struct uverbs_attr { }; struct uverbs_attr_bundle { + struct ib_udata driver_udata; + struct ib_udata ucore; struct ib_uverbs_file *ufile; DECLARE_BITMAP(attr_present, UVERBS_API_ATTR_BKEY_LEN); struct uverbs_attr attrs[]; @@ -560,6 +719,28 @@ uverbs_attr_get_len(const struct uverbs_attr_bundle *attrs_bundle, u16 idx) return attr->ptr_attr.len; } +/* + * uverbs_attr_ptr_get_array_size() - Get array size pointer by a ptr + * attribute. + * @attrs: The attribute bundle + * @idx: The ID of the attribute + * @elem_size: The size of the element in the array + */ +static inline int +uverbs_attr_ptr_get_array_size(struct uverbs_attr_bundle *attrs, u16 idx, + size_t elem_size) +{ + int size = uverbs_attr_get_len(attrs, idx); + + if (size < 0) + return size; + + if (size % elem_size) + return -EINVAL; + + return size / elem_size; +} + /** * uverbs_attr_get_uobjs_arr() - Provides array's properties for attribute for * UVERBS_ATTR_TYPE_IDRS_ARRAY. @@ -660,6 +841,12 @@ static inline int _uverbs_copy_from_or_zero(void *to, #define uverbs_copy_from_or_zero(to, attrs_bundle, idx) \ _uverbs_copy_from_or_zero(to, attrs_bundle, idx, sizeof(*to)) +static inline struct ib_ucontext * +ib_uverbs_get_ucontext(const struct uverbs_attr_bundle *attrs) +{ + return ib_uverbs_get_ucontext_file(attrs->ufile); +} + #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, u64 allowed_bits); @@ -684,6 +871,8 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle, int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, size_t idx, s64 lower_bound, u64 upper_bound, s64 *def_val); +int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, + size_t idx, const void *from, size_t size); #else static inline int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle, @@ -719,6 +908,12 @@ _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, { return -EINVAL; } +static inline int +uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, + size_t idx, const void *from, size_t size) +{ + return -EINVAL; +} #endif #define uverbs_get_const(_to, _attrs_bundle, _idx) \ diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h index b3b21733cc55..3447bfe356d6 100644 --- a/include/rdma/uverbs_named_ioctl.h +++ b/include/rdma/uverbs_named_ioctl.h @@ -43,7 +43,7 @@ #define _UVERBS_NAME(x, y) _UVERBS_PASTE(x, y) #define UVERBS_METHOD(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _method_##id) #define UVERBS_HANDLER(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _handler_##id) -#define UVERBS_OBJECT(id) _UVERBS_NAME(UVERBS_MOUDLE_NAME, _object_##id) +#define UVERBS_OBJECT(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _object_##id) /* These are static so they do not need to be qualified */ #define UVERBS_METHOD_ATTRS(method_id) _method_attrs_##method_id @@ -102,18 +102,11 @@ #define ADD_UVERBS_METHODS(_name, _object_id, ...) \ static const struct uverbs_method_def *const UVERBS_OBJECT_METHODS( \ _object_id)[] = { __VA_ARGS__ }; \ - static const struct uverbs_object_def _name##_struct = { \ + static const struct uverbs_object_def _name = { \ .id = _object_id, \ .num_methods = ARRAY_SIZE(UVERBS_OBJECT_METHODS(_object_id)), \ .methods = &UVERBS_OBJECT_METHODS(_object_id) \ - }; \ - static const struct uverbs_object_def *const _name##_ptrs[] = { \ - &_name##_struct, \ - }; \ - static const struct uverbs_object_tree_def _name = { \ - .num_objects = 1, \ - .objects = &_name##_ptrs, \ - } + }; /* Used by drivers to declare a complete parsing tree for a single method that * differs only in having additional driver specific attributes. diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h index 3db2802fbc68..883abcf6d36e 100644 --- a/include/rdma/uverbs_std_types.h +++ b/include/rdma/uverbs_std_types.h @@ -37,15 +37,6 @@ #include <rdma/uverbs_ioctl.h> #include <rdma/ib_user_ioctl_verbs.h> -#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) -const struct uverbs_object_tree_def *uverbs_default_get_objects(void); -#else -static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void) -{ - return NULL; -} -#endif - /* Returns _id, or causes a compile error if _id is not a u32. * * The uobj APIs should only be used with the write based uAPI to access @@ -54,15 +45,15 @@ static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(vo */ #define _uobj_check_id(_id) ((_id) * typecheck(u32, _id)) -#define uobj_get_type(_ufile, _object) \ - uapi_get_object((_ufile)->device->uapi, _object) +#define uobj_get_type(_attrs, _object) \ + uapi_get_object((_attrs)->ufile->device->uapi, _object) -#define uobj_get_read(_type, _id, _ufile) \ - rdma_lookup_get_uobject(uobj_get_type(_ufile, _type), _ufile, \ +#define uobj_get_read(_type, _id, _attrs) \ + rdma_lookup_get_uobject(uobj_get_type(_attrs, _type), (_attrs)->ufile, \ _uobj_check_id(_id), UVERBS_LOOKUP_READ) -#define ufd_get_read(_type, _fdnum, _ufile) \ - rdma_lookup_get_uobject(uobj_get_type(_ufile, _type), _ufile, \ +#define ufd_get_read(_type, _fdnum, _attrs) \ + rdma_lookup_get_uobject(uobj_get_type(_attrs, _type), (_attrs)->ufile, \ (_fdnum)*typecheck(s32, _fdnum), \ UVERBS_LOOKUP_READ) @@ -72,26 +63,27 @@ static inline void *_uobj_get_obj_read(struct ib_uobject *uobj) return NULL; return uobj->object; } -#define uobj_get_obj_read(_object, _type, _id, _ufile) \ +#define uobj_get_obj_read(_object, _type, _id, _attrs) \ ((struct ib_##_object *)_uobj_get_obj_read( \ - uobj_get_read(_type, _id, _ufile))) + uobj_get_read(_type, _id, _attrs))) -#define uobj_get_write(_type, _id, _ufile) \ - rdma_lookup_get_uobject(uobj_get_type(_ufile, _type), _ufile, \ +#define uobj_get_write(_type, _id, _attrs) \ + rdma_lookup_get_uobject(uobj_get_type(_attrs, _type), (_attrs)->ufile, \ _uobj_check_id(_id), UVERBS_LOOKUP_WRITE) int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id, - struct ib_uverbs_file *ufile, int success_res); -#define uobj_perform_destroy(_type, _id, _ufile, _success_res) \ - __uobj_perform_destroy(uobj_get_type(_ufile, _type), \ - _uobj_check_id(_id), _ufile, _success_res) + const struct uverbs_attr_bundle *attrs); +#define uobj_perform_destroy(_type, _id, _attrs) \ + __uobj_perform_destroy(uobj_get_type(_attrs, _type), \ + _uobj_check_id(_id), _attrs) struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, - u32 id, struct ib_uverbs_file *ufile); + u32 id, + const struct uverbs_attr_bundle *attrs); -#define uobj_get_destroy(_type, _id, _ufile) \ - __uobj_get_destroy(uobj_get_type(_ufile, _type), _uobj_check_id(_id), \ - _ufile) +#define uobj_get_destroy(_type, _id, _attrs) \ + __uobj_get_destroy(uobj_get_type(_attrs, _type), _uobj_check_id(_id), \ + _attrs) static inline void uobj_put_destroy(struct ib_uobject *uobj) { @@ -111,14 +103,13 @@ static inline void uobj_put_write(struct ib_uobject *uobj) rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); } -static inline int __must_check uobj_alloc_commit(struct ib_uobject *uobj, - int success_res) +static inline int __must_check uobj_alloc_commit(struct ib_uobject *uobj) { int ret = rdma_alloc_commit_uobject(uobj); if (ret) return ret; - return success_res; + return 0; } static inline void uobj_alloc_abort(struct ib_uobject *uobj) @@ -127,18 +118,18 @@ static inline void uobj_alloc_abort(struct ib_uobject *uobj) } static inline struct ib_uobject * -__uobj_alloc(const struct uverbs_api_object *obj, struct ib_uverbs_file *ufile, - struct ib_device **ib_dev) +__uobj_alloc(const struct uverbs_api_object *obj, + struct uverbs_attr_bundle *attrs, struct ib_device **ib_dev) { - struct ib_uobject *uobj = rdma_alloc_begin_uobject(obj, ufile); + struct ib_uobject *uobj = rdma_alloc_begin_uobject(obj, attrs->ufile); if (!IS_ERR(uobj)) *ib_dev = uobj->context->device; return uobj; } -#define uobj_alloc(_type, _ufile, _ib_dev) \ - __uobj_alloc(uobj_get_type(_ufile, _type), _ufile, _ib_dev) +#define uobj_alloc(_type, _attrs, _ib_dev) \ + __uobj_alloc(uobj_get_type(_attrs, _type), _attrs, _ib_dev) static inline void uverbs_flow_action_fill_action(struct ib_flow_action *action, struct ib_uobject *uobj, @@ -191,5 +182,17 @@ static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow, uflow->resources = uflow_res; } +struct uverbs_api_object { + const struct uverbs_obj_type *type_attrs; + const struct uverbs_obj_type_class *type_class; + u8 disabled:1; + u32 id; +}; + +static inline u32 uobj_get_object_id(struct ib_uobject *uobj) +{ + return uobj->uapi_object->id; +} + #endif diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index c891ada3c5c2..d85e6befa26b 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -61,6 +61,9 @@ struct scsi_pointer { /* flags preserved across unprep / reprep */ #define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED) +/* for scmd->state */ +#define SCMD_STATE_COMPLETE 0 + struct scsi_cmnd { struct scsi_request req; struct scsi_device *device; @@ -145,6 +148,7 @@ struct scsi_cmnd { int result; /* Status code from lower level driver */ int flags; /* Command flags */ + unsigned long state; /* Command completion state */ unsigned char tag; /* SCSI-II queued command tag */ }; @@ -171,7 +175,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, size_t *offset, size_t *len); extern void scsi_kunmap_atomic_sg(void *virt); -extern int scsi_init_io(struct scsi_cmnd *cmd); +extern blk_status_t scsi_init_io(struct scsi_cmnd *cmd); #ifdef CONFIG_SCSI_DMA extern int scsi_dma_map(struct scsi_cmnd *cmd); diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h index c7bba2b24849..a862dc23c68d 100644 --- a/include/scsi/scsi_dh.h +++ b/include/scsi/scsi_dh.h @@ -69,7 +69,7 @@ struct scsi_device_handler { int (*attach)(struct scsi_device *); void (*detach)(struct scsi_device *); int (*activate)(struct scsi_device *, activate_complete, void *); - int (*prep_fn)(struct scsi_device *, struct request *); + blk_status_t (*prep_fn)(struct scsi_device *, struct request *); int (*set_params)(struct scsi_device *, const char *); void (*rescan)(struct scsi_device *); }; diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h index fae8b465233e..6dffa8555a39 100644 --- a/include/scsi/scsi_driver.h +++ b/include/scsi/scsi_driver.h @@ -2,6 +2,7 @@ #ifndef _SCSI_SCSI_DRIVER_H #define _SCSI_SCSI_DRIVER_H +#include <linux/blk_types.h> #include <linux/device.h> struct module; @@ -13,7 +14,7 @@ struct scsi_driver { struct device_driver gendrv; void (*rescan)(struct device *); - int (*init_command)(struct scsi_cmnd *); + blk_status_t (*init_command)(struct scsi_cmnd *); void (*uninit_command)(struct scsi_cmnd *); int (*done)(struct scsi_cmnd *); int (*eh_action)(struct scsi_cmnd *, int); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 5ea06d310a25..6ca954e9f752 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -11,7 +11,6 @@ #include <linux/blk-mq.h> #include <scsi/scsi.h> -struct request_queue; struct block_device; struct completion; struct module; @@ -22,7 +21,6 @@ struct scsi_target; struct Scsi_Host; struct scsi_host_cmd_pool; struct scsi_transport_template; -struct blk_queue_tags; /* @@ -44,9 +42,6 @@ struct blk_queue_tags; #define MODE_INITIATOR 0x01 #define MODE_TARGET 0x02 -#define DISABLE_CLUSTERING 0 -#define ENABLE_CLUSTERING 1 - struct scsi_host_template { struct module *module; const char *name; @@ -366,6 +361,11 @@ struct scsi_host_template { unsigned int max_sectors; /* + * Maximum size in bytes of a single segment. + */ + unsigned int max_segment_size; + + /* * DMA scatter gather segment boundary limit. A segment crossing this * boundary will be split in two. */ @@ -415,16 +415,6 @@ struct scsi_host_template { unsigned unchecked_isa_dma:1; /* - * True if this host adapter can make good use of clustering. - * I originally thought that if the tablesize was large that it - * was a waste of CPU cycles to prepare a cluster list, but - * it works out that the Buslogic is faster if you use a smaller - * number of segments (i.e. use clustering). I guess it is - * inefficient. - */ - unsigned use_clustering:1; - - /* * True for emulated SCSI host adapters (e.g. ATAPI). */ unsigned emulated:1; @@ -547,14 +537,8 @@ struct Scsi_Host { struct scsi_host_template *hostt; struct scsi_transport_template *transportt; - /* - * Area to keep a shared tag map (if needed, will be - * NULL if not). - */ - union { - struct blk_queue_tag *bqt; - struct blk_mq_tag_set tag_set; - }; + /* Area to keep a shared tag map */ + struct blk_mq_tag_set tag_set; atomic_t host_busy; /* commands actually active on low-level */ atomic_t host_blocked; @@ -604,6 +588,7 @@ struct Scsi_Host { short unsigned int sg_tablesize; short unsigned int sg_prot_tablesize; unsigned int max_sectors; + unsigned int max_segment_size; unsigned long dma_boundary; /* * In scsi-mq mode, the number of hardware queues supported by the LLD. @@ -621,7 +606,6 @@ struct Scsi_Host { unsigned active_mode:2; unsigned unchecked_isa_dma:1; - unsigned use_clustering:1; /* * Host has requested that no further requests come through for the @@ -648,7 +632,6 @@ struct Scsi_Host { /* The controller does not support WRITE SAME */ unsigned no_write_same:1; - unsigned use_blk_mq:1; unsigned use_cmd_list:1; /* Host responded with short (<36 bytes) INQUIRY result */ @@ -742,11 +725,6 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost) shost->tmf_in_progress; } -static inline bool shost_use_blk_mq(struct Scsi_Host *shost) -{ - return shost->use_blk_mq; -} - extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); extern void scsi_flush_work(struct Scsi_Host *); diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h index e192a0caa850..6053d46e794e 100644 --- a/include/scsi/scsi_tcq.h +++ b/include/scsi/scsi_tcq.h @@ -23,19 +23,15 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost, int tag) { struct request *req = NULL; + u16 hwq; if (tag == SCSI_NO_TAG) return NULL; - if (shost_use_blk_mq(shost)) { - u16 hwq = blk_mq_unique_tag_to_hwq(tag); - - if (hwq < shost->tag_set.nr_hw_queues) { - req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq], - blk_mq_unique_tag_to_tag(tag)); - } - } else { - req = blk_map_queue_find_tag(shost->bqt, tag); + hwq = blk_mq_unique_tag_to_hwq(tag); + if (hwq < shost->tag_set.nr_hw_queues) { + req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq], + blk_mq_unique_tag_to_tag(tag)); } if (!req) diff --git a/include/scsi/srp.h b/include/scsi/srp.h index c16a3c9a4d9b..9220758d5087 100644 --- a/include/scsi/srp.h +++ b/include/scsi/srp.h @@ -67,7 +67,8 @@ enum { enum { SRP_NO_DATA_DESC = 0, SRP_DATA_DESC_DIRECT = 1, - SRP_DATA_DESC_INDIRECT = 2 + SRP_DATA_DESC_INDIRECT = 2, + SRP_DATA_DESC_IMM = 3, /* new in SRP2 */ }; enum { @@ -111,9 +112,16 @@ struct srp_indirect_buf { struct srp_direct_buf desc_list[0]; } __attribute__((packed)); +/* Immediate data buffer descriptor as defined in SRP2. */ +struct srp_imm_buf { + __be32 len; +}; + +/* srp_login_req.flags */ enum { SRP_MULTICHAN_SINGLE = 0, - SRP_MULTICHAN_MULTI = 1 + SRP_MULTICHAN_MULTI = 1, + SRP_IMMED_REQUESTED = 0x80, /* new in SRP2 */ }; struct srp_login_req { @@ -124,7 +132,9 @@ struct srp_login_req { u8 reserved2[4]; __be16 req_buf_fmt; u8 req_flags; - u8 reserved3[5]; + u8 reserved3[1]; + __be16 imm_data_offset; /* new in SRP2 */ + u8 reserved4[2]; u8 initiator_port_id[16]; u8 target_port_id[16]; }; @@ -144,6 +154,16 @@ struct srp_login_req_rdma { __be32 req_it_iu_len; u8 initiator_port_id[16]; u8 target_port_id[16]; + __be16 imm_data_offset; + u8 reserved[6]; +}; + +/* srp_login_rsp.rsp_flags */ +enum { + SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0, + SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1, + SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2, + SRP_LOGIN_RSP_IMMED_SUPP = 0x80, /* new in SRP2 */ }; /* diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h index c4a5c9e9fb47..4be1aa4435ae 100644 --- a/include/soc/bcm2835/raspberrypi-firmware.h +++ b/include/soc/bcm2835/raspberrypi-firmware.h @@ -1,9 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright © 2015 Broadcom - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __SOC_RASPBERRY_FIRMWARE_H__ diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h index 70997ab2146c..3fbd71c27ba3 100644 --- a/include/soc/fsl/dpaa2-io.h +++ b/include/soc/fsl/dpaa2-io.h @@ -116,4 +116,8 @@ struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, void dpaa2_io_store_destroy(struct dpaa2_io_store *s); struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last); +int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid, + u32 *fcnt, u32 *bcnt); +int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, + u32 *num); #endif /* __FSL_DPAA2_IO_H */ diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h index 56877660d5ba..5cc7af06c1ba 100644 --- a/include/soc/fsl/qman.h +++ b/include/soc/fsl/qman.h @@ -1205,8 +1205,10 @@ void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh); * qman_dqrr_set_ithresh - Set coalesce interrupt threshold * @portal: portal to set the new value on * @ithresh: new threshold value + * + * Returns 0 on success, or a negative error code. */ -void qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh); +int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh); /** * qman_dqrr_get_iperiod - Get coalesce interrupt period @@ -1219,7 +1221,9 @@ void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod); * qman_dqrr_set_iperiod - Set coalesce interrupt period * @portal: portal to set the new value on * @ithresh: new period value + * + * Returns 0 on success, or a negative error code. */ -void qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod); +int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod); #endif /* __FSL_QMAN_H */ diff --git a/include/soc/qcom/cmd-db.h b/include/soc/qcom/cmd-db.h index 578180cbc134..af9722223925 100644 --- a/include/soc/qcom/cmd-db.h +++ b/include/soc/qcom/cmd-db.h @@ -18,9 +18,7 @@ enum cmd_db_hw_type { #if IS_ENABLED(CONFIG_QCOM_COMMAND_DB) u32 cmd_db_read_addr(const char *resource_id); -int cmd_db_read_aux_data(const char *resource_id, u8 *data, size_t len); - -size_t cmd_db_read_aux_data_len(const char *resource_id); +const void *cmd_db_read_aux_data(const char *resource_id, size_t *len); enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id); @@ -29,12 +27,8 @@ int cmd_db_ready(void); static inline u32 cmd_db_read_addr(const char *resource_id) { return 0; } -static inline int cmd_db_read_aux_data(const char *resource_id, u8 *data, - size_t len) -{ return -ENODEV; } - -static inline size_t cmd_db_read_aux_data_len(const char *resource_id) -{ return -ENODEV; } +static inline const void *cmd_db_read_aux_data(const char *resource_id, size_t *len) +{ return ERR_PTR(-ENODEV); } static inline enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id) { return -ENODEV; } diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h index 98d8d38b99a1..ab7f8796a260 100644 --- a/include/soc/tegra/bpmp-abi.h +++ b/include/soc/tegra/bpmp-abi.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -40,7 +40,6 @@ * @file */ - /** * @defgroup MRQ MRQ Messages * @brief Messages sent to/from BPMP via IPC @@ -53,7 +52,7 @@ */ /** - * @addtogroup MRQ_Format Message Format + * @addtogroup MRQ_Format * @{ * The CPU requests the BPMP to perform a particular service by * sending it an IVC frame containing a single MRQ message. An MRQ @@ -76,7 +75,7 @@ /** * @ingroup MRQ_Format - * @brief header for an MRQ message + * @brief Header for an MRQ message * * Provides the MRQ number for the MRQ message: #mrq. The remainder of * the MRQ message is a payload (immediately following the @@ -86,7 +85,7 @@ struct mrq_request { /** @brief MRQ number of the request */ uint32_t mrq; /** - * @brief flags providing follow up directions to the receiver + * @brief Flags providing follow up directions to the receiver * * | Bit | Description | * |-----|--------------------------------------------| @@ -98,7 +97,7 @@ struct mrq_request { /** * @ingroup MRQ_Format - * @brief header for an MRQ response + * @brief Header for an MRQ response * * Provides an error code for the associated MRQ message. The * remainder of the MRQ response is a payload (immediately following @@ -106,9 +105,9 @@ struct mrq_request { * mrq_request::mrq */ struct mrq_response { - /** @brief error code for the MRQ request itself */ + /** @brief Error code for the MRQ request itself */ int32_t err; - /** @brief reserved for future use */ + /** @brief Reserved for future use */ uint32_t flags; } __ABI_PACKED; @@ -152,6 +151,14 @@ struct mrq_response { #define MRQ_TRACE_ITER 64 #define MRQ_RINGBUF_CONSOLE 65 #define MRQ_PG 66 +#define MRQ_CPU_NDIV_LIMITS 67 +#define MRQ_STRAP 68 +#define MRQ_UPHY 69 +#define MRQ_CPU_AUTO_CC3 70 +#define MRQ_QUERY_FW_TAG 71 +#define MRQ_FMON 72 +#define MRQ_EC 73 +#define MRQ_FBVOLT_STATUS 74 /** @} */ @@ -160,31 +167,35 @@ struct mrq_response { * @brief Maximum MRQ code to be sent by CPU software to * BPMP. Subject to change in future */ -#define MAX_CPU_MRQ_ID 66 +#define MAX_CPU_MRQ_ID 74 /** - * @addtogroup MRQ_Payloads Message Payloads + * @addtogroup MRQ_Payloads * @{ - * @defgroup Ping + * @defgroup Ping Ping * @defgroup Query_Tag Query Tag * @defgroup Module Loadable Modules - * @defgroup Trace - * @defgroup Debugfs - * @defgroup Reset - * @defgroup I2C - * @defgroup Clocks + * @defgroup Trace Trace + * @defgroup Debugfs Debug File System + * @defgroup Reset Reset + * @defgroup I2C I2C + * @defgroup Clocks Clocks * @defgroup ABI_info ABI Info - * @defgroup MC_Flush MC Flush - * @defgroup Powergating - * @defgroup Thermal + * @defgroup Powergating Power Gating + * @defgroup Thermal Thermal * @defgroup Vhint CPU Voltage hint - * @defgroup MRQ_Deprecated Deprecated MRQ messages - * @defgroup EMC - * @defgroup RingbufConsole + * @defgroup EMC EMC + * @defgroup CPU NDIV Limits + * @defgroup RingbufConsole Ring Buffer Console + * @defgroup Strap Straps + * @defgroup UPHY UPHY + * @defgroup CC3 Auto-CC3 + * @defgroup FMON FMON + * @defgroup EC EC + * @defgroup Fbvolt_status Fuse Burn Voltage Status * @} */ - /** * @ingroup MRQ_Codes * @def MRQ_PING @@ -214,20 +225,20 @@ struct mrq_response { /** * @ingroup Ping - * @brief request with #MRQ_PING + * @brief Request with #MRQ_PING * * Used by the sender of an #MRQ_PING message to request a pong from * recipient. The response from the recipient is computed based on * #challenge. */ struct mrq_ping_request { -/** @brief arbitrarily chosen value */ +/** @brief Arbitrarily chosen value */ uint32_t challenge; } __ABI_PACKED; /** * @ingroup Ping - * @brief response to #MRQ_PING + * @brief Response to #MRQ_PING * * Sent in response to an #MRQ_PING message. #reply should be the * mrq_ping_request challenge left shifted by 1 with the carry-bit @@ -235,14 +246,16 @@ struct mrq_ping_request { * */ struct mrq_ping_response { - /** @brief response to the MRQ_PING challege */ + /** @brief Response to the MRQ_PING challege */ uint32_t reply; } __ABI_PACKED; /** * @ingroup MRQ_Codes * @def MRQ_QUERY_TAG - * @brief Query BPMP firmware's tag (i.e. version information) + * @brief Query BPMP firmware's tag (i.e. unique identifer) + * + * @deprecated Use #MRQ_QUERY_FW_TAG instead. * * * Platforms: All * * Initiators: CCPLEX @@ -254,25 +267,50 @@ struct mrq_ping_response { /** * @ingroup Query_Tag - * @brief request with #MRQ_QUERY_TAG - * - * Used by #MRQ_QUERY_TAG call to ask BPMP to fill in the memory - * pointed by #addr with BPMP firmware header. + * @brief Request with #MRQ_QUERY_TAG * - * The sender is reponsible for ensuring that #addr is mapped in to - * the recipient's address map. + * @deprecated This structure will be removed in future version. + * Use MRQ_QUERY_FW_TAG instead. */ struct mrq_query_tag_request { - /** @brief base address to store the firmware header */ + /** @brief Base address to store the firmware tag */ uint32_t addr; } __ABI_PACKED; + /** * @ingroup MRQ_Codes - * @def MRQ_MODULE_LOAD - * @brief dynamically load a BPMP code module + * @def MRQ_QUERY_FW_TAG + * @brief Query BPMP firmware's tag (i.e. unique identifier) * * * Platforms: All + * * Initiators: Any + * * Targets: BPMP + * * Request Payload: N/A + * * Response Payload: @ref mrq_query_fw_tag_response + * + */ + +/** + * @ingroup Query_Tag + * @brief Response to #MRQ_QUERY_FW_TAG + * + * Sent in response to #MRQ_QUERY_FW_TAG message. #tag contains the unique + * identifier for the version of firmware issuing the reply. + * + */ +struct mrq_query_fw_tag_response { + /** @brief Array to store tag information */ + uint8_t tag[32]; +} __ABI_PACKED; + +/** + * @ingroup MRQ_Codes + * @def MRQ_MODULE_LOAD + * @brief Dynamically load a BPMP code module + * + * * Platforms: T210, T214, T186 + * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186) * * Initiators: CCPLEX * * Targets: BPMP * * Request Payload: @ref mrq_module_load_request @@ -284,7 +322,7 @@ struct mrq_query_tag_request { /** * @ingroup Module - * @brief request with #MRQ_MODULE_LOAD + * @brief Request with #MRQ_MODULE_LOAD * * Used by #MRQ_MODULE_LOAD calls to ask the recipient to dynamically * load the code located at #phys_addr and having size #size @@ -300,29 +338,31 @@ struct mrq_query_tag_request { * */ struct mrq_module_load_request { - /** @brief base address of the code to load. Treated as (void *) */ + /** @brief Base address of the code to load. Treated as (void *) */ uint32_t phys_addr; /* (void *) */ - /** @brief size in bytes of code to load */ + /** @brief Size in bytes of code to load */ uint32_t size; } __ABI_PACKED; /** * @ingroup Module - * @brief response to #MRQ_MODULE_LOAD + * @brief Response to #MRQ_MODULE_LOAD * * @todo document mrq_response::err */ struct mrq_module_load_response { - /** @brief handle to the loaded module */ + /** @brief Handle to the loaded module */ uint32_t base; } __ABI_PACKED; +/** @endcond*/ /** * @ingroup MRQ_Codes * @def MRQ_MODULE_UNLOAD - * @brief unload a previously loaded code module + * @brief Unload a previously loaded code module * - * * Platforms: All + * * Platforms: T210, T214, T186 + * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186) * * Initiators: CCPLEX * * Targets: BPMP * * Request Payload: @ref mrq_module_unload_request @@ -333,20 +373,21 @@ struct mrq_module_load_response { /** * @ingroup Module - * @brief request with #MRQ_MODULE_UNLOAD + * @brief Request with #MRQ_MODULE_UNLOAD * * Used by #MRQ_MODULE_UNLOAD calls to request that a previously loaded * module be unloaded. */ struct mrq_module_unload_request { - /** @brief handle of the module to unload */ + /** @brief Handle of the module to unload */ uint32_t base; } __ABI_PACKED; +/** @endcond*/ /** * @ingroup MRQ_Codes * @def MRQ_TRACE_MODIFY - * @brief modify the set of enabled trace events + * @brief Modify the set of enabled trace events * * * Platforms: All * * Initiators: CCPLEX @@ -359,22 +400,22 @@ struct mrq_module_unload_request { /** * @ingroup Trace - * @brief request with #MRQ_TRACE_MODIFY + * @brief Request with #MRQ_TRACE_MODIFY * * Used by %MRQ_TRACE_MODIFY calls to enable or disable specify trace * events. #set takes precedence for any bit set in both #set and * #clr. */ struct mrq_trace_modify_request { - /** @brief bit mask of trace events to disable */ + /** @brief Bit mask of trace events to disable */ uint32_t clr; - /** @brief bit mask of trace events to enable */ + /** @brief Bit mask of trace events to enable */ uint32_t set; } __ABI_PACKED; /** * @ingroup Trace - * @brief response to #MRQ_TRACE_MODIFY + * @brief Response to #MRQ_TRACE_MODIFY * * Sent in repsonse to an #MRQ_TRACE_MODIFY message. #mask reflects the * state of which events are enabled after the recipient acted on the @@ -382,7 +423,7 @@ struct mrq_trace_modify_request { * */ struct mrq_trace_modify_response { - /** @brief bit mask of trace event enable states */ + /** @brief Bit mask of trace event enable states */ uint32_t mask; } __ABI_PACKED; @@ -407,7 +448,7 @@ struct mrq_trace_modify_response { /** * @ingroup Trace - * @brief request with #MRQ_WRITE_TRACE + * @brief Request with #MRQ_WRITE_TRACE * * Used by MRQ_WRITE_TRACE calls to ask the recipient to copy trace * data from the recipient's local buffer to the output buffer. #area @@ -420,22 +461,22 @@ struct mrq_trace_modify_response { * overwrites. */ struct mrq_write_trace_request { - /** @brief base address of output buffer */ + /** @brief Base address of output buffer */ uint32_t area; - /** @brief size in bytes of the output buffer */ + /** @brief Size in bytes of the output buffer */ uint32_t size; } __ABI_PACKED; /** * @ingroup Trace - * @brief response to #MRQ_WRITE_TRACE + * @brief Response to #MRQ_WRITE_TRACE * * Once this response is sent, the respondent will not access the * output buffer further. */ struct mrq_write_trace_response { /** - * @brief flag whether more data remains in local buffer + * @brief Flag whether more data remains in local buffer * * Value is 1 if the entire local trace buffer has been * drained to the outputbuffer. Value is 0 otherwise. @@ -456,9 +497,10 @@ struct mrq_threaded_ping_response { /** * @ingroup MRQ_Codes * @def MRQ_MODULE_MAIL - * @brief send a message to a loadable module + * @brief Send a message to a loadable module * - * * Platforms: All + * * Platforms: T210, T214, T186 + * @cond (bpmp_t210 || bpmp_t214 || bpmp_t186) * * Initiators: Any * * Targets: BPMP * * Request Payload: @ref mrq_module_mail_request @@ -469,12 +511,12 @@ struct mrq_threaded_ping_response { /** * @ingroup Module - * @brief request with #MRQ_MODULE_MAIL + * @brief Request with #MRQ_MODULE_MAIL */ struct mrq_module_mail_request { - /** @brief handle to the previously loaded module */ + /** @brief Handle to the previously loaded module */ uint32_t base; - /** @brief module-specific mail payload + /** @brief Module-specific mail payload * * The length of data[ ] is unknown to the BPMP core firmware * but it is limited to the size of an IPC message. @@ -484,23 +526,24 @@ struct mrq_module_mail_request { /** * @ingroup Module - * @brief response to #MRQ_MODULE_MAIL + * @brief Response to #MRQ_MODULE_MAIL */ struct mrq_module_mail_response { - /** @brief module-specific mail payload + /** @brief Module-specific mail payload * * The length of data[ ] is unknown to the BPMP core firmware * but it is limited to the size of an IPC message. */ uint8_t data[EMPTY_ARRAY]; } __ABI_PACKED; +/** @endcond */ /** * @ingroup MRQ_Codes * @def MRQ_DEBUGFS * @brief Interact with BPMP's debugfs file nodes * - * * Platforms: T186 + * * Platforms: T186, T194 * * Initiators: Any * * Targets: BPMP * * Request Payload: @ref mrq_debugfs_request @@ -529,65 +572,70 @@ struct mrq_module_mail_response { * * @} */ + /** @ingroup Debugfs */ enum mrq_debugfs_commands { + /** @brief Perform read */ CMD_DEBUGFS_READ = 1, + /** @brief Perform write */ CMD_DEBUGFS_WRITE = 2, + /** @brief Perform dumping directory */ CMD_DEBUGFS_DUMPDIR = 3, + /** @brief Not a command */ CMD_DEBUGFS_MAX }; /** * @ingroup Debugfs - * @brief parameters for CMD_DEBUGFS_READ/WRITE command + * @brief Parameters for CMD_DEBUGFS_READ/WRITE command */ struct cmd_debugfs_fileop_request { - /** @brief physical address pointing at filename */ + /** @brief Physical address pointing at filename */ uint32_t fnameaddr; - /** @brief length in bytes of filename buffer */ + /** @brief Length in bytes of filename buffer */ uint32_t fnamelen; - /** @brief physical address pointing to data buffer */ + /** @brief Physical address pointing to data buffer */ uint32_t dataaddr; - /** @brief length in bytes of data buffer */ + /** @brief Length in bytes of data buffer */ uint32_t datalen; } __ABI_PACKED; /** * @ingroup Debugfs - * @brief parameters for CMD_DEBUGFS_READ/WRITE command + * @brief Parameters for CMD_DEBUGFS_READ/WRITE command */ struct cmd_debugfs_dumpdir_request { - /** @brief physical address pointing to data buffer */ + /** @brief Physical address pointing to data buffer */ uint32_t dataaddr; - /** @brief length in bytes of data buffer */ + /** @brief Length in bytes of data buffer */ uint32_t datalen; } __ABI_PACKED; /** * @ingroup Debugfs - * @brief response data for CMD_DEBUGFS_READ/WRITE command + * @brief Response data for CMD_DEBUGFS_READ/WRITE command */ struct cmd_debugfs_fileop_response { - /** @brief always 0 */ + /** @brief Always 0 */ uint32_t reserved; - /** @brief number of bytes read from or written to data buffer */ + /** @brief Number of bytes read from or written to data buffer */ uint32_t nbytes; } __ABI_PACKED; /** * @ingroup Debugfs - * @brief response data for CMD_DEBUGFS_DUMPDIR command + * @brief Response data for CMD_DEBUGFS_DUMPDIR command */ struct cmd_debugfs_dumpdir_response { - /** @brief always 0 */ + /** @brief Always 0 */ uint32_t reserved; - /** @brief number of bytes read from or written to data buffer */ + /** @brief Number of bytes read from or written to data buffer */ uint32_t nbytes; } __ABI_PACKED; /** * @ingroup Debugfs - * @brief request with #MRQ_DEBUGFS. + * @brief Request with #MRQ_DEBUGFS. * * The sender of an MRQ_DEBUGFS message uses #cmd to specify a debugfs * command to execute. Legal commands are the values of @ref @@ -601,6 +649,7 @@ struct cmd_debugfs_dumpdir_response { * |CMD_DEBUGFS_DUMPDIR|dumpdir| */ struct mrq_debugfs_request { + /** @brief Sub-command (@ref mrq_debugfs_commands) */ uint32_t cmd; union { struct cmd_debugfs_fileop_request fop; @@ -612,14 +661,14 @@ struct mrq_debugfs_request { * @ingroup Debugfs */ struct mrq_debugfs_response { - /** @brief always 0 */ + /** @brief Always 0 */ int32_t reserved; union { - /** @brief response data for CMD_DEBUGFS_READ OR + /** @brief Response data for CMD_DEBUGFS_READ OR * CMD_DEBUGFS_WRITE command */ struct cmd_debugfs_fileop_response fop; - /** @brief response data for CMD_DEBUGFS_DUMPDIR command */ + /** @brief Response data for CMD_DEBUGFS_DUMPDIR command */ struct cmd_debugfs_dumpdir_response dumpdir; } __UNION_ANON; } __ABI_PACKED; @@ -633,57 +682,58 @@ struct mrq_debugfs_response { #define DEBUGFS_S_IWUSR (1 << 7) /** @} */ - /** * @ingroup MRQ_Codes * @def MRQ_RESET - * @brief reset an IP block + * @brief Reset an IP block * - * * Platforms: T186 + * * Platforms: T186, T194 * * Initiators: Any * * Targets: BPMP * * Request Payload: @ref mrq_reset_request * * Response Payload: @ref mrq_reset_response + * + * @addtogroup Reset + * @{ */ -/** - * @ingroup Reset - */ enum mrq_reset_commands { + /** @brief Assert module reset */ CMD_RESET_ASSERT = 1, + /** @brief Deassert module reset */ CMD_RESET_DEASSERT = 2, + /** @brief Assert and deassert the module reset */ CMD_RESET_MODULE = 3, + /** @brief Get the highest reset ID */ CMD_RESET_GET_MAX_ID = 4, - CMD_RESET_MAX, /* not part of ABI and subject to change */ + /** @brief Not part of ABI and subject to change */ + CMD_RESET_MAX, }; /** - * @ingroup Reset - * @brief request with MRQ_RESET + * @brief Request with MRQ_RESET * * Used by the sender of an #MRQ_RESET message to request BPMP to * assert or or deassert a given reset line. */ struct mrq_reset_request { - /** @brief reset action to perform (@enum mrq_reset_commands) */ + /** @brief Reset action to perform (@ref mrq_reset_commands) */ uint32_t cmd; - /** @brief id of the reset to affected */ + /** @brief Id of the reset to affected */ uint32_t reset_id; } __ABI_PACKED; /** - * @ingroup Reset * @brief Response for MRQ_RESET sub-command CMD_RESET_GET_MAX_ID. When * this sub-command is not supported, firmware will return -BPMP_EBADCMD * in mrq_response::err. */ struct cmd_reset_get_max_id_response { - /** @brief max reset id */ + /** @brief Max reset id */ uint32_t max_id; } __ABI_PACKED; /** - * @ingroup Reset * @brief Response with MRQ_RESET * * Each sub-command supported by @ref mrq_reset_request may return @@ -703,32 +753,25 @@ struct mrq_reset_response { } __UNION_ANON; } __ABI_PACKED; +/** @} */ + /** * @ingroup MRQ_Codes * @def MRQ_I2C - * @brief issue an i2c transaction + * @brief Issue an i2c transaction * - * * Platforms: T186 + * * Platforms: T186, T194 * * Initiators: Any * * Targets: BPMP * * Request Payload: @ref mrq_i2c_request * * Response Payload: @ref mrq_i2c_response - */ - -/** + * * @addtogroup I2C * @{ */ #define TEGRA_I2C_IPC_MAX_IN_BUF_SIZE (MSG_DATA_MIN_SZ - 12) #define TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE (MSG_DATA_MIN_SZ - 4) -/** @} */ -/** - * @ingroup I2C - * @name Serial I2C flags - * Use these flags with serial_i2c_request::flags - * @{ - */ #define SERIALI2C_TEN 0x0010 #define SERIALI2C_RD 0x0001 #define SERIALI2C_STOP 0x8000 @@ -737,15 +780,13 @@ struct mrq_reset_response { #define SERIALI2C_IGNORE_NAK 0x1000 #define SERIALI2C_NO_RD_ACK 0x0800 #define SERIALI2C_RECV_LEN 0x0400 -/** @} */ -/** @ingroup I2C */ + enum { CMD_I2C_XFER = 1 }; /** - * @ingroup I2C - * @brief serializable i2c request + * @brief Serializable i2c request * * Instances of this structure are packed (little-endian) into * cmd_i2c_xfer_request::data_buf. Each instance represents a single @@ -762,80 +803,75 @@ enum { struct serial_i2c_request { /** @brief I2C slave address */ uint16_t addr; - /** @brief bitmask of SERIALI2C_ flags */ + /** @brief Bitmask of SERIALI2C_ flags */ uint16_t flags; - /** @brief length of I2C transaction in bytes */ + /** @brief Length of I2C transaction in bytes */ uint16_t len; - /** @brief for write transactions only, #len bytes of data */ + /** @brief For write transactions only, #len bytes of data */ uint8_t data[]; } __ABI_PACKED; /** - * @ingroup I2C - * @brief trigger one or more i2c transactions + * @brief Trigger one or more i2c transactions */ struct cmd_i2c_xfer_request { - /** @brief valid bus number from mach-t186/i2c-t186.h*/ + /** @brief Valid bus number from @ref bpmp_i2c_ids*/ uint32_t bus_id; - /** @brief count of valid bytes in #data_buf*/ + /** @brief Count of valid bytes in #data_buf*/ uint32_t data_size; - /** @brief serialized packed instances of @ref serial_i2c_request*/ + /** @brief Serialized packed instances of @ref serial_i2c_request*/ uint8_t data_buf[TEGRA_I2C_IPC_MAX_IN_BUF_SIZE]; } __ABI_PACKED; /** - * @ingroup I2C - * @brief container for data read from the i2c bus + * @brief Container for data read from the i2c bus * * Processing an cmd_i2c_xfer_request::data_buf causes BPMP to execute * zero or more I2C reads. The data read from the bus is serialized * into #data_buf. */ struct cmd_i2c_xfer_response { - /** @brief count of valid bytes in #data_buf*/ + /** @brief Count of valid bytes in #data_buf*/ uint32_t data_size; - /** @brief i2c read data */ + /** @brief I2c read data */ uint8_t data_buf[TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE]; } __ABI_PACKED; /** - * @ingroup I2C - * @brief request with #MRQ_I2C + * @brief Request with #MRQ_I2C */ struct mrq_i2c_request { - /** @brief always CMD_I2C_XFER (i.e. 1) */ + /** @brief Always CMD_I2C_XFER (i.e. 1) */ uint32_t cmd; - /** @brief parameters of the transfer request */ + /** @brief Parameters of the transfer request */ struct cmd_i2c_xfer_request xfer; } __ABI_PACKED; /** - * @ingroup I2C - * @brief response to #MRQ_I2C + * @brief Response to #MRQ_I2C */ struct mrq_i2c_response { struct cmd_i2c_xfer_response xfer; } __ABI_PACKED; +/** @} */ + /** * @ingroup MRQ_Codes * @def MRQ_CLK + * @brief Perform a clock operation * - * * Platforms: T186 + * * Platforms: T186, T194 * * Initiators: Any * * Targets: BPMP * * Request Payload: @ref mrq_clk_request * * Response Payload: @ref mrq_clk_response + * * @addtogroup Clocks * @{ */ - -/** - * @name MRQ_CLK sub-commands - * @{ - */ enum { CMD_CLK_GET_RATE = 1, CMD_CLK_SET_RATE = 2, @@ -847,20 +883,13 @@ enum { CMD_CLK_DISABLE = 8, CMD_CLK_GET_ALL_INFO = 14, CMD_CLK_GET_MAX_CLK_ID = 15, + CMD_CLK_GET_FMAX_AT_VMIN = 16, CMD_CLK_MAX, }; -/** @} */ -/** - * @name MRQ_CLK properties - * Flag bits for cmd_clk_properties_response::flags and - * cmd_clk_get_all_info_response::flags - * @{ - */ #define BPMP_CLK_HAS_MUX (1 << 0) #define BPMP_CLK_HAS_SET_RATE (1 << 1) #define BPMP_CLK_IS_ROOT (1 << 2) -/** @} */ #define MRQ_CLK_NAME_MAXLEN 40 #define MRQ_CLK_MAX_PARENTS 16 @@ -959,11 +988,19 @@ struct cmd_clk_get_max_clk_id_request { struct cmd_clk_get_max_clk_id_response { uint32_t max_id; } __ABI_PACKED; -/** @} */ + +/** @private */ +struct cmd_clk_get_fmax_at_vmin_request { + EMPTY +} __ABI_PACKED; + +struct cmd_clk_get_fmax_at_vmin_response { + int64_t rate; +} __ABI_PACKED; /** * @ingroup Clocks - * @brief request with #MRQ_CLK + * @brief Request with #MRQ_CLK * * Used by the sender of an #MRQ_CLK message to control clocks. The * clk_request is split into several sub-commands. Some sub-commands @@ -982,11 +1019,13 @@ struct cmd_clk_get_max_clk_id_response { * |CMD_CLK_DISABLE |- | * |CMD_CLK_GET_ALL_INFO |- | * |CMD_CLK_GET_MAX_CLK_ID |- | + * |CMD_CLK_GET_FMAX_AT_VMIN |- + * | * */ struct mrq_clk_request { - /** @brief sub-command and clock id concatenated to 32-bit word. + /** @brief Sub-command and clock id concatenated to 32-bit word. * - bits[31..24] is the sub-cmd. * - bits[23..0] is the clock id */ @@ -1010,12 +1049,14 @@ struct mrq_clk_request { struct cmd_clk_get_all_info_request clk_get_all_info; /** @private */ struct cmd_clk_get_max_clk_id_request clk_get_max_clk_id; + /** @private */ + struct cmd_clk_get_fmax_at_vmin_request clk_get_fmax_at_vmin; } __UNION_ANON; } __ABI_PACKED; /** * @ingroup Clocks - * @brief response to MRQ_CLK + * @brief Response to MRQ_CLK * * Each sub-command supported by @ref mrq_clk_request may return * sub-command-specific data. Some do and some do not as indicated in @@ -1033,6 +1074,7 @@ struct mrq_clk_request { * |CMD_CLK_DISABLE |- | * |CMD_CLK_GET_ALL_INFO |clk_get_all_info | * |CMD_CLK_GET_MAX_CLK_ID |clk_get_max_id | + * |CMD_CLK_GET_FMAX_AT_VMIN |clk_get_fmax_at_vmin | * */ @@ -1050,13 +1092,16 @@ struct mrq_clk_response { struct cmd_clk_is_enabled_response clk_is_enabled; struct cmd_clk_get_all_info_response clk_get_all_info; struct cmd_clk_get_max_clk_id_response clk_get_max_clk_id; + struct cmd_clk_get_fmax_at_vmin_response clk_get_fmax_at_vmin; } __UNION_ANON; } __ABI_PACKED; +/** @} */ + /** * @ingroup MRQ_Codes * @def MRQ_QUERY_ABI - * @brief check if an MRQ is implemented + * @brief Check if an MRQ is implemented * * * Platforms: All * * Initiators: Any @@ -1067,7 +1112,7 @@ struct mrq_clk_response { /** * @ingroup ABI_info - * @brief request with MRQ_QUERY_ABI + * @brief Request with MRQ_QUERY_ABI * * Used by #MRQ_QUERY_ABI call to check if MRQ code #mrq is supported * by the recipient. @@ -1079,7 +1124,7 @@ struct mrq_query_abi_request { /** * @ingroup ABI_info - * @brief response to MRQ_QUERY_ABI + * @brief Response to MRQ_QUERY_ABI * * @note mrq_response::err of 0 indicates that the query was * successful, not that the MRQ itself is supported! @@ -1092,19 +1137,19 @@ struct mrq_query_abi_response { /** * @ingroup MRQ_Codes * @def MRQ_PG_READ_STATE - * @brief read the power-gating state of a partition + * @brief Read the power-gating state of a partition * * * Platforms: T186 + * @cond bpmp_t186 * * Initiators: Any * * Targets: BPMP * * Request Payload: @ref mrq_pg_read_state_request * * Response Payload: @ref mrq_pg_read_state_response - * @addtogroup Powergating - * @{ */ /** - * @brief request with #MRQ_PG_READ_STATE + * @ingroup Powergating + * @brief Request with #MRQ_PG_READ_STATE * * Used by MRQ_PG_READ_STATE call to read the current state of a * partition. @@ -1115,39 +1160,40 @@ struct mrq_pg_read_state_request { } __ABI_PACKED; /** - * @brief response to MRQ_PG_READ_STATE + * @ingroup Powergating + * @brief Response to MRQ_PG_READ_STATE * @todo define possible errors. */ struct mrq_pg_read_state_response { - /** @brief read as don't care */ + /** @brief Read as don't care */ uint32_t sram_state; - /** @brief state of power partition + /** @brief State of power partition * * 0 : off * * 1 : on */ uint32_t logic_state; } __ABI_PACKED; - +/** @endcond*/ /** @} */ /** * @ingroup MRQ_Codes * @def MRQ_PG_UPDATE_STATE - * @brief modify the power-gating state of a partition. In contrast to + * @brief Modify the power-gating state of a partition. In contrast to * MRQ_PG calls, the operations that change state (on/off) of power * partition are reference counted. * * * Platforms: T186 + * @cond bpmp_t186 * * Initiators: Any * * Targets: BPMP * * Request Payload: @ref mrq_pg_update_state_request * * Response Payload: N/A - * @addtogroup Powergating - * @{ */ /** - * @brief request with mrq_pg_update_state_request + * @ingroup Powergating + * @brief Request with mrq_pg_update_state_request * * Used by #MRQ_PG_UPDATE_STATE call to request BPMP to change the * state of a power partition #partition_id. @@ -1155,20 +1201,20 @@ struct mrq_pg_read_state_response { struct mrq_pg_update_state_request { /** @brief ID of partition */ uint32_t partition_id; - /** @brief secondary control of power partition + /** @brief Secondary control of power partition * @details Ignored by many versions of the BPMP * firmware. For maximum compatibility, set the value - * according to @logic_state + * according to @ref logic_state * * 0x1: power ON partition (@ref logic_state == 0x3) * * 0x3: power OFF partition (@ref logic_state == 0x1) */ uint32_t sram_state; - /** @brief controls state of power partition, legal values are + /** @brief Controls state of power partition, legal values are * * 0x1 : power OFF partition * * 0x3 : power ON partition */ uint32_t logic_state; - /** @brief change state of clocks of the power partition, legal values + /** @brief Change state of clocks of the power partition, legal values * * 0x0 : do not change clock state * * 0x1 : disable partition clocks (only applicable when * @ref logic_state == 0x1) @@ -1177,7 +1223,7 @@ struct mrq_pg_update_state_request { */ uint32_t clock_state; } __ABI_PACKED; -/** @} */ +/** @endcond*/ /** * @ingroup MRQ_Codes @@ -1186,19 +1232,20 @@ struct mrq_pg_update_state_request { * MRQ_PG_UPDATE_STATE, operations that change the power partition * state are NOT reference counted * - * * Platforms: T186 + * @note BPMP-FW forcefully turns off some partitions as part of SC7 entry + * because their state cannot be adequately restored on exit. Therefore, + * it is recommended to power off all domains via MRQ_PG prior to SC7 entry. + * See @ref bpmp_pdomain_ids for further detail. + * + * * Platforms: T186, T194 * * Initiators: Any * * Targets: BPMP * * Request Payload: @ref mrq_pg_request * * Response Payload: @ref mrq_pg_response + * * @addtogroup Powergating * @{ */ - -/** - * @name MRQ_PG sub-commands - * @{ - */ enum mrq_pg_cmd { /** * @brief Check whether the BPMP driver supports the specified @@ -1232,7 +1279,7 @@ enum mrq_pg_cmd { CMD_PG_GET_STATE = 2, /** - * @brief get the name string of specified power domain id. + * @brief Get the name string of specified power domain id. * * mrq_response:err is * 0: Success @@ -1242,7 +1289,7 @@ enum mrq_pg_cmd { /** - * @brief get the highest power domain id in the system. Not + * @brief Get the highest power domain id in the system. Not * all IDs between 0 and max_id are valid IDs. * * mrq_response:err is @@ -1251,35 +1298,36 @@ enum mrq_pg_cmd { */ CMD_PG_GET_MAX_ID = 4, }; -/** @} */ #define MRQ_PG_NAME_MAXLEN 40 -/** - * @brief possible power domain states in - * cmd_pg_set_state_request:state and cmd_pg_get_state_response:state. - * PG_STATE_OFF: power domain is OFF - * PG_STATE_ON: power domain is ON - * PG_STATE_RUNNING: power domain is ON and made into directly usable - * state by turning on the clocks associated with - * the domain - */ enum pg_states { + /** @brief Power domain is OFF */ PG_STATE_OFF = 0, + /** @brief Power domain is ON */ PG_STATE_ON = 1, + /** + * @brief a legacy state where power domain and the clock + * associated to the domain are ON. + * This state is only supported in T186, and the use of it is + * deprecated. + */ PG_STATE_RUNNING = 2, }; struct cmd_pg_query_abi_request { - uint32_t type; /* enum mrq_pg_cmd */ + /** @ref mrq_pg_cmd */ + uint32_t type; } __ABI_PACKED; struct cmd_pg_set_state_request { - uint32_t state; /* enum pg_states */ + /** @ref pg_states */ + uint32_t state; } __ABI_PACKED; struct cmd_pg_get_state_response { - uint32_t state; /* enum pg_states */ + /** @ref pg_states */ + uint32_t state; } __ABI_PACKED; struct cmd_pg_get_name_response { @@ -1291,8 +1339,7 @@ struct cmd_pg_get_max_id_response { } __ABI_PACKED; /** - * @ingroup Powergating - * @brief request with #MRQ_PG + * @brief Request with #MRQ_PG * * Used by the sender of an #MRQ_PG message to control power * partitions. The pg_request is split into several sub-commands. Some @@ -1308,7 +1355,6 @@ struct cmd_pg_get_max_id_response { * |CMD_PG_GET_MAX_ID | - | * */ - struct mrq_pg_request { uint32_t cmd; uint32_t id; @@ -1319,8 +1365,7 @@ struct mrq_pg_request { } __ABI_PACKED; /** - * @ingroup Powergating - * @brief response to MRQ_PG + * @brief Response to MRQ_PG * * Each sub-command supported by @ref mrq_pg_request may return * sub-command-specific data. Some do and some do not as indicated in @@ -1333,9 +1378,7 @@ struct mrq_pg_request { * |CMD_PG_GET_STATE | get_state | * |CMD_PG_GET_NAME | get_name | * |CMD_PG_GET_MAX_ID | get_max_id | - * */ - struct mrq_pg_response { union { struct cmd_pg_get_state_response get_state; @@ -1344,12 +1387,14 @@ struct mrq_pg_response { } __UNION_ANON; } __ABI_PACKED; +/** @} */ + /** * @ingroup MRQ_Codes * @def MRQ_THERMAL - * @brief interact with BPMP thermal framework + * @brief Interact with BPMP thermal framework * - * * Platforms: T186 + * * Platforms: T186, T194 * * Initiators: Any * * Targets: Any * * Request Payload: TODO @@ -1562,17 +1607,18 @@ union mrq_thermal_bpmp_to_host_response { * @brief Query CPU voltage hint data * * * Platforms: T186 + * @cond bpmp_t186 * * Initiators: CCPLEX * * Targets: BPMP * * Request Payload: @ref mrq_cpu_vhint_request * * Response Payload: N/A * - * @addtogroup Vhint CPU Voltage hint + * @addtogroup Vhint * @{ */ /** - * @brief request with #MRQ_CPU_VHINT + * @brief Request with #MRQ_CPU_VHINT * * Used by #MRQ_CPU_VHINT call by CCPLEX to retrieve voltage hint data * from BPMP to memory space pointed by #addr. CCPLEX is responsible @@ -1581,16 +1627,16 @@ union mrq_thermal_bpmp_to_host_response { */ struct mrq_cpu_vhint_request { /** @brief IOVA address for the #cpu_vhint_data */ - uint32_t addr; /* struct cpu_vhint_data * */ + uint32_t addr; /** @brief ID of the cluster whose data is requested */ - uint32_t cluster_id; /* enum cluster_id */ + uint32_t cluster_id; } __ABI_PACKED; /** - * @brief description of the CPU v/f relation + * @brief Description of the CPU v/f relation * - * Used by #MRQ_CPU_VHINT call to carry data pointed by #addr of - * struct mrq_cpu_vhint_request + * Used by #MRQ_CPU_VHINT call to carry data pointed by + * #mrq_cpu_vhint_request::addr */ struct cpu_vhint_data { uint32_t ref_clk_hz; /**< reference frequency in Hz */ @@ -1612,7 +1658,7 @@ struct cpu_vhint_data { /** reserved for future use */ uint16_t reserved[328]; } __ABI_PACKED; - +/** @endcond */ /** @} */ /** @@ -1620,7 +1666,7 @@ struct cpu_vhint_data { * @def MRQ_ABI_RATCHET * @brief ABI ratchet value query * - * * Platforms: T186 + * * Platforms: T186, T194 * * Initiators: Any * * Targets: BPMP * * Request Payload: @ref mrq_abi_ratchet_request @@ -1630,7 +1676,7 @@ struct cpu_vhint_data { */ /** - * @brief an ABI compatibility mechanism + * @brief An ABI compatibility mechanism * * BPMP_ABI_RATCHET_VALUE may increase for various reasons in a future * revision of this header file. @@ -1644,7 +1690,7 @@ struct cpu_vhint_data { #define BPMP_ABI_RATCHET_VALUE 3 /** - * @brief request with #MRQ_ABI_RATCHET. + * @brief Request with #MRQ_ABI_RATCHET. * * #ratchet should be #BPMP_ABI_RATCHET_VALUE from the ABI header * against which the requester was compiled. @@ -1657,12 +1703,12 @@ struct cpu_vhint_data { * Otherwise, err shall be 0. */ struct mrq_abi_ratchet_request { - /** @brief requester's ratchet value */ + /** @brief Requester's ratchet value */ uint16_t ratchet; }; /** - * @brief response to #MRQ_ABI_RATCHET + * @brief Response to #MRQ_ABI_RATCHET * * #ratchet shall be #BPMP_ABI_RATCHET_VALUE from the ABI header * against which BPMP firwmare was compiled. @@ -1685,9 +1731,9 @@ struct mrq_abi_ratchet_response { /** * @ingroup MRQ_Codes * @def MRQ_EMC_DVFS_LATENCY - * @brief query frequency dependent EMC DVFS latency + * @brief Query frequency dependent EMC DVFS latency * - * * Platforms: T186 + * * Platforms: T186, T194 * * Initiators: CCPLEX * * Targets: BPMP * * Request Payload: N/A @@ -1697,7 +1743,7 @@ struct mrq_abi_ratchet_response { */ /** - * @brief used by @ref mrq_emc_dvfs_latency_response + * @brief Used by @ref mrq_emc_dvfs_latency_response */ struct emc_dvfs_latency { /** @brief EMC frequency in kHz */ @@ -1708,10 +1754,10 @@ struct emc_dvfs_latency { #define EMC_DVFS_LATENCY_MAX_SIZE 14 /** - * @brief response to #MRQ_EMC_DVFS_LATENCY + * @brief Response to #MRQ_EMC_DVFS_LATENCY */ struct mrq_emc_dvfs_latency_response { - /** @brief the number valid entries in #pairs */ + /** @brief The number valid entries in #pairs */ uint32_t num_pairs; /** @brief EMC <frequency, latency> information */ struct emc_dvfs_latency pairs[EMC_DVFS_LATENCY_MAX_SIZE]; @@ -1721,8 +1767,96 @@ struct mrq_emc_dvfs_latency_response { /** * @ingroup MRQ_Codes + * @def MRQ_CPU_NDIV_LIMITS + * @brief CPU freq. limits in ndiv + * + * * Platforms: T194 onwards + * @cond bpmp_t194 + * * Initiators: CCPLEX + * * Targets: BPMP + * * Request Payload: @ref mrq_cpu_ndiv_limits_request + * * Response Payload: @ref mrq_cpu_ndiv_limits_response + * @addtogroup CPU + * @{ + */ + +/** + * @brief Request for ndiv limits of a cluster + */ +struct mrq_cpu_ndiv_limits_request { + /** @brief Enum cluster_id */ + uint32_t cluster_id; +} __ABI_PACKED; + +/** + * @brief Response to #MRQ_CPU_NDIV_LIMITS + */ +struct mrq_cpu_ndiv_limits_response { + /** @brief Reference frequency in Hz */ + uint32_t ref_clk_hz; + /** @brief Post divider value */ + uint16_t pdiv; + /** @brief Input divider value */ + uint16_t mdiv; + /** @brief FMAX expressed with max NDIV value */ + uint16_t ndiv_max; + /** @brief Minimum allowed NDIV value */ + uint16_t ndiv_min; +} __ABI_PACKED; + +/** @} */ +/** @endcond */ + +/** + * @ingroup MRQ_Codes + * @def MRQ_CPU_AUTO_CC3 + * @brief Query CPU cluster auto-CC3 configuration + * + * * Platforms: T194 onwards + * @cond bpmp_t194 + * * Initiators: CCPLEX + * * Targets: BPMP + * * Request Payload: @ref mrq_cpu_auto_cc3_request + * * Response Payload: @ref mrq_cpu_auto_cc3_response + * @addtogroup CC3 + * + * Queries from BPMP auto-CC3 configuration (allowed/not allowed) for a + * specified cluster. CCPLEX s/w uses this information to override its own + * device tree auto-CC3 settings, so that BPMP device tree is a single source of + * auto-CC3 platform configuration. + * + * @{ + */ + +/** + * @brief Request for auto-CC3 configuration of a cluster + */ +struct mrq_cpu_auto_cc3_request { + /** @brief Enum cluster_id (logical cluster id, known to CCPLEX s/w) */ + uint32_t cluster_id; +} __ABI_PACKED; + +/** + * @brief Response to #MRQ_CPU_AUTO_CC3 + */ +struct mrq_cpu_auto_cc3_response { + /** + * @brief auto-CC3 configuration + * + * - bits[31..10] reserved. + * - bits[9..1] cc3 ndiv + * - bit [0] if "1" auto-CC3 is allowed, if "0" auto-CC3 is not allowed + */ + uint32_t auto_cc3_config; +} __ABI_PACKED; + +/** @} */ +/** @endcond */ + +/** + * @ingroup MRQ_Codes * @def MRQ_TRACE_ITER - * @brief manage the trace iterator + * @brief Manage the trace iterator * * * Platforms: All * * Initiators: CCPLEX @@ -1735,12 +1869,12 @@ struct mrq_emc_dvfs_latency_response { enum { /** @brief (re)start the tracing now. Ignore older events */ TRACE_ITER_INIT = 0, - /** @brief clobber all events in the trace buffer */ + /** @brief Clobber all events in the trace buffer */ TRACE_ITER_CLEAN = 1 }; /** - * @brief request with #MRQ_TRACE_ITER + * @brief Request with #MRQ_TRACE_ITER */ struct mrq_trace_iter_request { /** @brief TRACE_ITER_INIT or TRACE_ITER_CLEAN */ @@ -1900,7 +2034,7 @@ struct cmd_ringbuf_console_get_fifo_resp { */ struct mrq_ringbuf_console_host_to_bpmp_request { /** - * @brief type of request. Values listed in enum + * @brief Type of request. Values listed in enum * #mrq_ringbuf_console_host_to_bpmp_cmd. */ uint32_t type; @@ -1927,49 +2061,616 @@ union mrq_ringbuf_console_bpmp_to_host_response { } __ABI_PACKED; /** @} */ -/* - * 4. Enumerations +/** + * @ingroup MRQ_Codes + * @def MRQ_STRAP + * @brief Set a strap value controlled by BPMP + * + * * Platforms: T194 onwards + * @cond bpmp_t194 + * * Initiators: CCPLEX + * * Targets: BPMP + * * Request Payload: @ref mrq_strap_request + * * Response Payload: N/A + * @addtogroup Strap + * + * A strap is an input that is sampled by a hardware unit during the + * unit's startup process. The sampled value of a strap affects the + * behavior of the unit until the unit is restarted. Many hardware + * units sample their straps at the instant that their resets are + * deasserted. + * + * BPMP owns registers which act as straps to various units. It + * exposes limited control of those straps via #MRQ_STRAP. + * + * @{ */ +enum mrq_strap_cmd { + /** @private */ + STRAP_RESERVED = 0, + /** @brief Set a strap value */ + STRAP_SET = 1 +}; -/* - * 4.1 CPU enumerations +/** + * @brief Request with #MRQ_STRAP + */ +struct mrq_strap_request { + /** @brief @ref mrq_strap_cmd */ + uint32_t cmd; + /** @brief Strap ID from @ref Strap_Ids */ + uint32_t id; + /** @brief Desired value for strap (if cmd is #STRAP_SET) */ + uint32_t value; +} __ABI_PACKED; + +/** + * @defgroup Strap_Ids Strap Identifiers + * @} + */ +/** @endcond */ + +/** + * @ingroup MRQ_Codes + * @def MRQ_UPHY + * @brief Perform a UPHY operation * - * See <mach-t186/system-t186.h> + * * Platforms: T194 onwards + * @cond bpmp_t194 + * * Initiators: CCPLEX + * * Targets: BPMP + * * Request Payload: @ref mrq_uphy_request + * * Response Payload: @ref mrq_uphy_response * - * 4.2 CPU Cluster enumerations + * @addtogroup UPHY + * @{ + */ +enum { + CMD_UPHY_PCIE_LANE_MARGIN_CONTROL = 1, + CMD_UPHY_PCIE_LANE_MARGIN_STATUS = 2, + CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT = 3, + CMD_UPHY_PCIE_CONTROLLER_STATE = 4, + CMD_UPHY_MAX, +}; + +struct cmd_uphy_margin_control_request { + /** @brief Enable margin */ + int32_t en; + /** @brief Clear the number of error and sections */ + int32_t clr; + /** @brief Set x offset (1's complement) for left/right margin type (y should be 0) */ + uint32_t x; + /** @brief Set y offset (1's complement) for left/right margin type (x should be 0) */ + uint32_t y; + /** @brief Set number of bit blocks for each margin section */ + uint32_t nblks; +} __ABI_PACKED; + +struct cmd_uphy_margin_status_response { + /** @brief Number of errors observed */ + uint32_t status; +} __ABI_PACKED; + +struct cmd_uphy_ep_controller_pll_init_request { + /** @brief EP controller number, valid: 0, 4, 5 */ + uint8_t ep_controller; +} __ABI_PACKED; + +struct cmd_uphy_pcie_controller_state_request { + /** @brief PCIE controller number, valid: 0, 1, 2, 3, 4 */ + uint8_t pcie_controller; + uint8_t enable; +} __ABI_PACKED; + +/** + * @ingroup UPHY + * @brief Request with #MRQ_UPHY * - * See <mach-t186/system-t186.h> + * Used by the sender of an #MRQ_UPHY message to control UPHY Lane RX margining. + * The uphy_request is split into several sub-commands. Some sub-commands + * require no additional data. Others have a sub-command specific payload * - * 4.3 System low power state enumerations + * |sub-command |payload | + * |------------------------------------ |----------------------------------------| + * |CMD_UPHY_PCIE_LANE_MARGIN_CONTROL |uphy_set_margin_control | + * |CMD_UPHY_PCIE_LANE_MARGIN_STATUS | | + * |CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT |cmd_uphy_ep_controller_pll_init_request | + * |CMD_UPHY_PCIE_CONTROLLER_STATE |cmd_uphy_pcie_controller_state_request | * - * See <mach-t186/system-t186.h> */ -/* - * 4.4 Clock enumerations +struct mrq_uphy_request { + /** @brief Lane number. */ + uint16_t lane; + /** @brief Sub-command id. */ + uint16_t cmd; + + union { + struct cmd_uphy_margin_control_request uphy_set_margin_control; + struct cmd_uphy_ep_controller_pll_init_request ep_ctrlr_pll_init; + struct cmd_uphy_pcie_controller_state_request controller_state; + } __UNION_ANON; +} __ABI_PACKED; + +/** + * @ingroup UPHY + * @brief Response to MRQ_UPHY + * + * Each sub-command supported by @ref mrq_uphy_request may return + * sub-command-specific data. Some do and some do not as indicated in + * the following table + * + * |sub-command |payload | + * |---------------------------- |------------------------| + * |CMD_UPHY_PCIE_LANE_MARGIN_CONTROL | | + * |CMD_UPHY_PCIE_LANE_MARGIN_STATUS |uphy_get_margin_status | * - * For clock enumerations, see <mach-t186/clk-t186.h> */ -/* - * 4.5 Reset enumerations +struct mrq_uphy_response { + union { + struct cmd_uphy_margin_status_response uphy_get_margin_status; + } __UNION_ANON; +} __ABI_PACKED; + +/** @} */ +/** @endcond */ + +/** + * @ingroup MRQ_Codes + * @def MRQ_FMON + * @brief Perform a frequency monitor configuration operations + * + * * Platforms: T194 onwards + * @cond bpmp_t194 + * * Initiators: CCPLEX + * * Targets: BPMP + * * Request Payload: @ref mrq_fmon_request + * * Response Payload: @ref mrq_fmon_response * - * For reset enumerations, see <mach-t186/reset-t186.h> + * @addtogroup FMON + * @{ */ +enum { + /** + * @brief Clamp FMON configuration to specified rate. + * + * The monitored clock must be running for clamp to succeed. If + * clamped, FMON configuration is preserved when clock rate + * and/or state is changed. + */ + CMD_FMON_GEAR_CLAMP = 1, + /** + * @brief Release clamped FMON configuration. + * + * Allow FMON configuration to follow monitored clock rate + * and/or state changes. + */ + CMD_FMON_GEAR_FREE = 2, + /** + * @brief Return rate FMON is clamped at, or 0 if FMON is not + * clamped. + * + * Inherently racy, since clamp state can be changed + * concurrently. Useful for testing. + */ + CMD_FMON_GEAR_GET = 3, + CMD_FMON_NUM, +}; -/* - * 4.6 Thermal sensor enumerations +struct cmd_fmon_gear_clamp_request { + int32_t unused; + int64_t rate; +} __ABI_PACKED; + +/** @private */ +struct cmd_fmon_gear_clamp_response { + EMPTY +} __ABI_PACKED; + +/** @private */ +struct cmd_fmon_gear_free_request { + EMPTY +} __ABI_PACKED; + +/** @private */ +struct cmd_fmon_gear_free_response { + EMPTY +} __ABI_PACKED; + +/** @private */ +struct cmd_fmon_gear_get_request { + EMPTY +} __ABI_PACKED; + +struct cmd_fmon_gear_get_response { + int64_t rate; +} __ABI_PACKED; + +/** + * @ingroup FMON + * @brief Request with #MRQ_FMON + * + * Used by the sender of an #MRQ_FMON message to configure clock + * frequency monitors. The FMON request is split into several + * sub-commands. Some sub-commands require no additional data. + * Others have a sub-command specific payload + * + * |sub-command |payload | + * |----------------------------|-----------------------| + * |CMD_FMON_GEAR_CLAMP |fmon_gear_clamp | + * |CMD_FMON_GEAR_FREE |- | + * |CMD_FMON_GEAR_GET |- | + * + */ + +struct mrq_fmon_request { + /** @brief Sub-command and clock id concatenated to 32-bit word. + * - bits[31..24] is the sub-cmd. + * - bits[23..0] is monitored clock id used to select target + * FMON + */ + uint32_t cmd_and_id; + + union { + struct cmd_fmon_gear_clamp_request fmon_gear_clamp; + /** @private */ + struct cmd_fmon_gear_free_request fmon_gear_free; + /** @private */ + struct cmd_fmon_gear_get_request fmon_gear_get; + } __UNION_ANON; +} __ABI_PACKED; + +/** + * @ingroup FMON + * @brief Response to MRQ_FMON + * + * Each sub-command supported by @ref mrq_fmon_request may + * return sub-command-specific data as indicated below. + * + * |sub-command |payload | + * |----------------------------|------------------------| + * |CMD_FMON_GEAR_CLAMP |- | + * |CMD_FMON_GEAR_FREE |- | + * |CMD_FMON_GEAR_GET |fmon_gear_get | + * + */ + +struct mrq_fmon_response { + union { + /** @private */ + struct cmd_fmon_gear_clamp_response fmon_gear_clamp; + /** @private */ + struct cmd_fmon_gear_free_response fmon_gear_free; + struct cmd_fmon_gear_get_response fmon_gear_get; + } __UNION_ANON; +} __ABI_PACKED; + +/** @} */ +/** @endcond */ + +/** + * @ingroup MRQ_Codes + * @def MRQ_EC + * @brief Provide status information on faults reported by Error + * Collator (EC) to HSM. + * + * * Platforms: T194 onwards + * @cond bpmp_t194 + * * Initiators: CCPLEX + * * Targets: BPMP + * * Request Payload: @ref mrq_ec_request + * * Response Payload: @ref mrq_ec_response + * + * @note This MRQ ABI is under construction, and subject to change + * + * @addtogroup EC + * @{ + */ +enum { + /** + * @brief Retrieve specified EC status. + * + * mrq_response::err is 0 if the operation was successful, or @n + * -#BPMP_ENODEV if target EC is not owned by BPMP @n + * -#BPMP_EACCES if target EC power domain is turned off + */ + CMD_EC_STATUS_GET = 1, + CMD_EC_NUM, +}; + +/** @brief BPMP ECs error types */ +enum bpmp_ec_err_type { + /** @brief Parity error on internal data path + * + * Error descriptor @ref ec_err_simple_desc. + */ + EC_ERR_TYPE_PARITY_INTERNAL = 1, + + /** @brief ECC SEC error on internal data path + * + * Error descriptor @ref ec_err_simple_desc. + */ + EC_ERR_TYPE_ECC_SEC_INTERNAL = 2, + + /** @brief ECC DED error on internal data path + * + * Error descriptor @ref ec_err_simple_desc. + */ + EC_ERR_TYPE_ECC_DED_INTERNAL = 3, + + /** @brief Comparator error + * + * Error descriptor @ref ec_err_simple_desc. + */ + EC_ERR_TYPE_COMPARATOR = 4, + + /** @brief Register parity error + * + * Error descriptor @ref ec_err_reg_parity_desc. + */ + EC_ERR_TYPE_REGISTER_PARITY = 5, + + /** @brief Parity error from on-chip SRAM/FIFO + * + * Error descriptor @ref ec_err_simple_desc. + */ + EC_ERR_TYPE_PARITY_SRAM = 6, + + /** @brief Clock Monitor error + * + * Error descriptor @ref ec_err_fmon_desc. + */ + EC_ERR_TYPE_CLOCK_MONITOR = 9, + + /** @brief Voltage Monitor error + * + * Error descriptor @ref ec_err_vmon_desc. + */ + EC_ERR_TYPE_VOLTAGE_MONITOR = 10, + + /** @brief SW Correctable error + * + * Error descriptor @ref ec_err_simple_desc. + */ + EC_ERR_TYPE_SW_CORRECTABLE = 16, + + /** @brief SW Uncorrectable error + * + * Error descriptor @ref ec_err_simple_desc. + */ + EC_ERR_TYPE_SW_UNCORRECTABLE = 17, + + /** @brief Other HW Correctable error + * + * Error descriptor @ref ec_err_simple_desc. + */ + EC_ERR_TYPE_OTHER_HW_CORRECTABLE = 32, + + /** @brief Other HW Uncorrectable error + * + * Error descriptor @ref ec_err_simple_desc. + */ + EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE = 33, +}; + +/** @brief Group of registers with parity error. */ +enum ec_registers_group { + /** @brief Functional registers group */ + EC_ERR_GROUP_FUNC_REG = 0, + /** @brief SCR registers group */ + EC_ERR_GROUP_SCR_REG = 1, +}; + +/** + * @defgroup bpmp_ec_status_flags EC Status Flags + * @addtogroup bpmp_ec_status_flags + * @{ + */ +/** @brief No EC error found flag */ +#define EC_STATUS_FLAG_NO_ERROR 0x0001 +/** @brief Last EC error found flag */ +#define EC_STATUS_FLAG_LAST_ERROR 0x0002 +/** @brief EC latent error flag */ +#define EC_STATUS_FLAG_LATENT_ERROR 0x0004 +/** @} */ + +/** + * @defgroup bpmp_ec_desc_flags EC Descriptor Flags + * @addtogroup bpmp_ec_desc_flags + * @{ + */ +/** @brief EC descriptor error resolved flag */ +#define EC_DESC_FLAG_RESOLVED 0x0001 +/** @brief EC descriptor failed to retrieve id flag */ +#define EC_DESC_FLAG_NO_ID 0x0002 +/** @} */ + +/** + * |error type | fmon_clk_id values | + * |---------------------------------|---------------------------| + * |@ref EC_ERR_TYPE_CLOCK_MONITOR |@ref bpmp_clock_ids | + */ +struct ec_err_fmon_desc { + /** @brief Bitmask of @ref bpmp_ec_desc_flags */ + uint16_t desc_flags; + /** @brief FMON monitored clock id */ + uint16_t fmon_clk_id; + /** @brief Bitmask of @ref bpmp_fmon_faults_flags */ + uint32_t fmon_faults; + /** @brief FMON faults access error */ + int32_t fmon_access_error; +} __ABI_PACKED; + +/** + * |error type | vmon_adc_id values | + * |---------------------------------|---------------------------| + * |@ref EC_ERR_TYPE_VOLTAGE_MONITOR |@ref bpmp_adc_ids | + */ +struct ec_err_vmon_desc { + /** @brief Bitmask of @ref bpmp_ec_desc_flags */ + uint16_t desc_flags; + /** @brief VMON rail adc id */ + uint16_t vmon_adc_id; + /** @brief Bitmask of @ref bpmp_vmon_faults_flags */ + uint32_t vmon_faults; + /** @brief VMON faults access error */ + int32_t vmon_access_error; +} __ABI_PACKED; + +/** + * |error type | reg_id values | + * |---------------------------------|---------------------------| + * |@ref EC_ERR_TYPE_REGISTER_PARITY |@ref bpmp_ec_registers_ids | + */ +struct ec_err_reg_parity_desc { + /** @brief Bitmask of @ref bpmp_ec_desc_flags */ + uint16_t desc_flags; + /** @brief Register id */ + uint16_t reg_id; + /** @brief Register group @ref ec_registers_group */ + uint16_t reg_group; +} __ABI_PACKED; + +/** + * |error type | err_source_id values | + * |----------------------------------------|---------------------------| + * |@ref EC_ERR_TYPE_PARITY_INTERNAL |@ref bpmp_ec_ipath_ids | + * |@ref EC_ERR_TYPE_ECC_SEC_INTERNAL |@ref bpmp_ec_ipath_ids | + * |@ref EC_ERR_TYPE_ECC_DED_INTERNAL |@ref bpmp_ec_ipath_ids | + * |@ref EC_ERR_TYPE_COMPARATOR |@ref bpmp_ec_comparator_ids| + * |@ref EC_ERR_TYPE_PARITY_SRAM |@ref bpmp_clock_ids | + * |@ref EC_ERR_TYPE_SW_CORRECTABLE |@ref bpmp_ec_misc_ids | + * |@ref EC_ERR_TYPE_SW_UNCORRECTABLE |@ref bpmp_ec_misc_ids | + * |@ref EC_ERR_TYPE_OTHER_HW_CORRECTABLE |@ref bpmp_ec_misc_ids | + * |@ref EC_ERR_TYPE_OTHER_HW_UNCORRECTABLE |@ref bpmp_ec_misc_ids | + */ +struct ec_err_simple_desc { + /** @brief Bitmask of @ref bpmp_ec_desc_flags */ + uint16_t desc_flags; + /** @brief Error source id. Id space depends on error type. */ + uint16_t err_source_id; +} __ABI_PACKED; + +/** @brief Union of EC error descriptors */ +union ec_err_desc { + struct ec_err_fmon_desc fmon_desc; + struct ec_err_vmon_desc vmon_desc; + struct ec_err_reg_parity_desc reg_parity_desc; + struct ec_err_simple_desc simple_desc; +} __ABI_PACKED; + +struct cmd_ec_status_get_request { + /** @brief HSM error line number that identifies target EC. */ + uint32_t ec_hsm_id; +} __ABI_PACKED; + +/** EC status maximum number of descriptors */ +#define EC_ERR_STATUS_DESC_MAX_NUM 4 + +struct cmd_ec_status_get_response { + /** @brief Target EC id (the same id received with request). */ + uint32_t ec_hsm_id; + /** + * @brief Bitmask of @ref bpmp_ec_status_flags + * + * If NO_ERROR flag is set, error_ fields should be ignored + */ + uint32_t ec_status_flags; + /** @brief Found EC error index. */ + uint32_t error_idx; + /** @brief Found EC error type @ref bpmp_ec_err_type. */ + uint32_t error_type; + /** @brief Number of returned EC error descriptors */ + uint32_t error_desc_num; + /** @brief EC error descriptors */ + union ec_err_desc error_descs[EC_ERR_STATUS_DESC_MAX_NUM]; +} __ABI_PACKED; + +/** + * @ingroup EC + * @brief Request with #MRQ_EC + * + * Used by the sender of an #MRQ_EC message to access ECs owned + * by BPMP. + * + * |sub-command |payload | + * |----------------------------|-----------------------| + * |@ref CMD_EC_STATUS_GET |ec_status_get | * - * For thermal sensor enumerations, see <mach-t186/thermal-t186.h> */ +struct mrq_ec_request { + /** @brief Sub-command id. */ + uint32_t cmd_id; + + union { + struct cmd_ec_status_get_request ec_status_get; + } __UNION_ANON; +} __ABI_PACKED; + /** - * @defgroup Error_Codes + * @ingroup EC + * @brief Response to MRQ_EC + * + * Each sub-command supported by @ref mrq_ec_request may return + * sub-command-specific data as indicated below. + * + * |sub-command |payload | + * |----------------------------|------------------------| + * |@ref CMD_EC_STATUS_GET |ec_status_get | + * + */ + +struct mrq_ec_response { + union { + struct cmd_ec_status_get_response ec_status_get; + } __UNION_ANON; +} __ABI_PACKED; + +/** @} */ +/** @endcond */ + +/** + * @ingroup MRQ_Codes + * @def MRQ_FBVOLT_STATUS + * @brief Provides status information about voltage state for fuse burning + * + * * Platforms: T194 onwards + * @cond bpmp_t194 + * * Initiators: CCPLEX + * * Target: BPMP + * * Request Payload: None + * * Response Payload: @ref mrq_fbvolt_status_response + * @{ + */ + +/** + * @ingroup Fbvolt_status + * @brief Response to #MRQ_FBVOLT_STATUS + * + * Value of #ready reflects if core voltages are in a suitable state for buring + * fuses. A value of 0x1 indicates that core voltages are ready for burning + * fuses. A value of 0x0 indicates that core voltages are not ready. + */ +struct mrq_fbvolt_status_response { + /** @brief Bit [0:0] - ready status, bits [31:1] - reserved */ + uint32_t ready; + /** @brief Reserved */ + uint32_t unused; +} __ABI_PACKED; + +/** @} */ +/** @endcond */ + +/** + * @addtogroup Error_Codes * Negative values for mrq_response::err generally indicate some * error. The ABI defines the following error codes. Negating these * defines is an exercise left to the user. * @{ */ + /** @brief No such file or directory */ #define BPMP_ENOENT 2 /** @brief No MRQ handler */ @@ -1994,6 +2695,11 @@ union mrq_ringbuf_console_bpmp_to_host_response { #define BPMP_ETIMEDOUT 23 /** @brief Out of range */ #define BPMP_ERANGE 34 +/** @brief Function not implemented */ +#define BPMP_ENOSYS 38 +/** @brief Invalid slot */ +#define BPMP_EBADSLT 57 + /** @} */ -/** @} */ + #endif diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h index e69e4c4d80ae..b02f926a0216 100644 --- a/include/soc/tegra/bpmp.h +++ b/include/soc/tegra/bpmp.h @@ -129,6 +129,7 @@ int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, tegra_bpmp_mrq_handler_t handler, void *data); void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data); +bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq); #else static inline struct tegra_bpmp *tegra_bpmp_get(struct device *dev) { @@ -164,6 +165,12 @@ static inline void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data) { } + +static inline bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, + unsigned int mrq) +{ + return false; +} #endif #if IS_ENABLED(CONFIG_CLK_TEGRA_BPMP) diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h index 9b6ea0c72117..8fb2f8a87339 100644 --- a/include/soc/tegra/fuse.h +++ b/include/soc/tegra/fuse.h @@ -60,7 +60,6 @@ struct tegra_sku_info { u32 tegra_read_straps(void); u32 tegra_read_ram_code(void); -u32 tegra_read_chipid(void); int tegra_fuse_readl(unsigned long offset, u32 *value); extern struct tegra_sku_info tegra_sku_info; diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h index 562426812ab2..a9db1b501de1 100644 --- a/include/soc/tegra/pmc.h +++ b/include/soc/tegra/pmc.h @@ -26,11 +26,9 @@ struct clk; struct reset_control; -#ifdef CONFIG_SMP bool tegra_pmc_cpu_is_powered(unsigned int cpuid); int tegra_pmc_cpu_power_on(unsigned int cpuid); int tegra_pmc_cpu_remove_clamping(unsigned int cpuid); -#endif /* CONFIG_SMP */ /* * powergate and I/O rail APIs @@ -90,6 +88,10 @@ enum tegra_io_pad { TEGRA_IO_PAD_CSID, TEGRA_IO_PAD_CSIE, TEGRA_IO_PAD_CSIF, + TEGRA_IO_PAD_CSIG, + TEGRA_IO_PAD_CSIH, + TEGRA_IO_PAD_DAP3, + TEGRA_IO_PAD_DAP5, TEGRA_IO_PAD_DBG, TEGRA_IO_PAD_DEBUG_NONAO, TEGRA_IO_PAD_DMIC, @@ -102,10 +104,15 @@ enum tegra_io_pad { TEGRA_IO_PAD_EDP, TEGRA_IO_PAD_EMMC, TEGRA_IO_PAD_EMMC2, + TEGRA_IO_PAD_EQOS, TEGRA_IO_PAD_GPIO, + TEGRA_IO_PAD_GP_PWM2, + TEGRA_IO_PAD_GP_PWM3, TEGRA_IO_PAD_HDMI, TEGRA_IO_PAD_HDMI_DP0, TEGRA_IO_PAD_HDMI_DP1, + TEGRA_IO_PAD_HDMI_DP2, + TEGRA_IO_PAD_HDMI_DP3, TEGRA_IO_PAD_HSIC, TEGRA_IO_PAD_HV, TEGRA_IO_PAD_LVDS, @@ -115,8 +122,14 @@ enum tegra_io_pad { TEGRA_IO_PAD_PEX_CLK_BIAS, TEGRA_IO_PAD_PEX_CLK1, TEGRA_IO_PAD_PEX_CLK2, + TEGRA_IO_PAD_PEX_CLK2_BIAS, TEGRA_IO_PAD_PEX_CLK3, TEGRA_IO_PAD_PEX_CNTRL, + TEGRA_IO_PAD_PEX_CTL2, + TEGRA_IO_PAD_PEX_L0_RST_N, + TEGRA_IO_PAD_PEX_L1_RST_N, + TEGRA_IO_PAD_PEX_L5_RST_N, + TEGRA_IO_PAD_PWR_CTL, TEGRA_IO_PAD_SDMMC1, TEGRA_IO_PAD_SDMMC1_HV, TEGRA_IO_PAD_SDMMC2, @@ -124,10 +137,16 @@ enum tegra_io_pad { TEGRA_IO_PAD_SDMMC3, TEGRA_IO_PAD_SDMMC3_HV, TEGRA_IO_PAD_SDMMC4, + TEGRA_IO_PAD_SOC_GPIO10, + TEGRA_IO_PAD_SOC_GPIO12, + TEGRA_IO_PAD_SOC_GPIO13, + TEGRA_IO_PAD_SOC_GPIO53, TEGRA_IO_PAD_SPI, TEGRA_IO_PAD_SPI_HV, TEGRA_IO_PAD_SYS_DDC, TEGRA_IO_PAD_UART, + TEGRA_IO_PAD_UART4, + TEGRA_IO_PAD_UART5, TEGRA_IO_PAD_UFS, TEGRA_IO_PAD_USB0, TEGRA_IO_PAD_USB1, diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index ea8c93bbb0e0..0cdc3999ecfa 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h @@ -23,6 +23,7 @@ struct snd_compr_ops; * struct snd_compr_runtime: runtime stream description * @state: stream state * @ops: pointer to DSP callbacks + * @dma_buffer_p: runtime dma buffer pointer * @buffer: pointer to kernel buffer, valid only when not in mmap mode or * DSP doesn't implement copy * @buffer_size: size of the above buffer @@ -37,6 +38,7 @@ struct snd_compr_ops; struct snd_compr_runtime { snd_pcm_state_t state; struct snd_compr_ops *ops; + struct snd_dma_buffer *dma_buffer_p; void *buffer; u64 buffer_size; u32 fragment_size; @@ -175,6 +177,23 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream) wake_up(&stream->runtime->sleep); } +/** + * snd_compr_set_runtime_buffer - Set the Compress runtime buffer + * @substream: compress substream to set + * @bufp: the buffer information, NULL to clear + * + * Copy the buffer information to runtime buffer when @bufp is non-NULL. + * Otherwise it clears the current buffer information. + */ +static inline void snd_compr_set_runtime_buffer( + struct snd_compr_stream *substream, + struct snd_dma_buffer *bufp) +{ + struct snd_compr_runtime *runtime = substream->runtime; + + runtime->dma_buffer_p = bufp; +} + int snd_compr_stop_error(struct snd_compr_stream *stream, snd_pcm_state_t state); diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index 0d98bb9068b1..7fa48b100936 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -236,6 +236,7 @@ struct hda_codec { /* misc flags */ unsigned int in_freeing:1; /* being released */ unsigned int registered:1; /* codec was registered */ + unsigned int display_power_control:1; /* needs display power */ unsigned int spdif_status_reset :1; /* needs to toggle SPDIF for each * status change * (e.g. Realtek codecs) diff --git a/include/sound/hda_component.h b/include/sound/hda_component.h index 78626cde7081..2ec31b358950 100644 --- a/include/sound/hda_component.h +++ b/include/sound/hda_component.h @@ -5,10 +5,15 @@ #define __SOUND_HDA_COMPONENT_H #include <drm/drm_audio_component.h> +#include <sound/hdaudio.h> + +/* virtual idx for controller */ +#define HDA_CODEC_IDX_CONTROLLER HDA_MAX_CODECS #ifdef CONFIG_SND_HDA_COMPONENT int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable); -int snd_hdac_display_power(struct hdac_bus *bus, bool enable); +void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, + bool enable); int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int dev_id, int rate); int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id, @@ -25,9 +30,9 @@ static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable) { return 0; } -static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable) +static inline void snd_hdac_display_power(struct hdac_bus *bus, + unsigned int idx, bool enable) { - return 0; } static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int dev_id, int rate) diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index cd1773d0e08f..b4fa1c775251 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -79,7 +79,6 @@ struct hdac_device { /* misc flags */ atomic_t in_pm; /* suspend/resume being performed */ - bool link_power_control:1; /* sysfs */ struct hdac_widget_tree *widgets; @@ -99,6 +98,12 @@ enum { HDA_DEV_ASOC, }; +enum { + SND_SKL_PCI_BIND_AUTO, /* automatic selection based on pci class */ + SND_SKL_PCI_BIND_LEGACY,/* bind only with legacy driver */ + SND_SKL_PCI_BIND_ASOC /* bind only with ASoC driver */ +}; + /* direction */ enum { HDA_INPUT, HDA_OUTPUT @@ -237,8 +242,6 @@ struct hdac_bus_ops { /* get a response from the last command */ int (*get_response)(struct hdac_bus *bus, unsigned int addr, unsigned int *res); - /* control the link power */ - int (*link_power)(struct hdac_bus *bus, bool enable); }; /* @@ -363,7 +366,8 @@ struct hdac_bus { /* DRM component interface */ struct drm_audio_component *audio_component; - int drm_power_refcount; + long display_power_status; + bool display_power_active; /* parameters required for enhanced capabilities */ int num_streams; @@ -389,6 +393,7 @@ void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex); int snd_hdac_bus_add_device(struct hdac_bus *bus, struct hdac_device *codec); void snd_hdac_bus_remove_device(struct hdac_bus *bus, struct hdac_device *codec); +void snd_hdac_bus_process_unsol_events(struct work_struct *work); static inline void snd_hdac_codec_link_up(struct hdac_device *codec) { @@ -404,7 +409,6 @@ int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val); int snd_hdac_bus_get_response(struct hdac_bus *bus, unsigned int addr, unsigned int *res); int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus); -int snd_hdac_link_power(struct hdac_device *codec, bool enable); bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset); void snd_hdac_bus_stop_chip(struct hdac_bus *bus); diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h index 2dd37cada7c0..888a833d3b00 100644 --- a/include/sound/pcm_params.h +++ b/include/sound/pcm_params.h @@ -254,11 +254,13 @@ static inline int snd_interval_empty(const struct snd_interval *i) static inline int snd_interval_single(const struct snd_interval *i) { return (i->min == i->max || - (i->min + 1 == i->max && i->openmax)); + (i->min + 1 == i->max && (i->openmin || i->openmax))); } static inline int snd_interval_value(const struct snd_interval *i) { + if (i->openmin && !i->openmax) + return i->max; return i->min; } diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h index fb0318f9b10f..6d69ed2bd7b1 100644 --- a/include/sound/simple_card_utils.h +++ b/include/sound/simple_card_utils.h @@ -116,12 +116,12 @@ int asoc_simple_card_clean_reference(struct snd_soc_card *card); void asoc_simple_card_convert_fixup(struct asoc_simple_card_data *data, struct snd_pcm_hw_params *params); -void asoc_simple_card_parse_convert(struct device *dev, char *prefix, +void asoc_simple_card_parse_convert(struct device *dev, + struct device_node *np, char *prefix, struct asoc_simple_card_data *data); int asoc_simple_card_of_parse_routing(struct snd_soc_card *card, - char *prefix, - int optional); + char *prefix); int asoc_simple_card_of_parse_widgets(struct snd_soc_card *card, char *prefix); diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h index f48f59e5b7b0..bb5e1e4ce8bf 100644 --- a/include/sound/soc-acpi-intel-match.h +++ b/include/sound/soc-acpi-intel-match.h @@ -24,6 +24,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[]; +extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[]; /* * generic table used for HDA codec-based platforms, possibly with diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h index e45b2330d16a..266e64e3c24c 100644 --- a/include/sound/soc-acpi.h +++ b/include/sound/soc-acpi.h @@ -38,6 +38,20 @@ struct snd_soc_acpi_mach * snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines); /** + * snd_soc_acpi_mach_params: interface for machine driver configuration + * + * @acpi_ipc_irq_index: used for BYT-CR detection + * @platform: string used for HDaudio codec support + * @codec_mask: used for HDAudio support + */ +struct snd_soc_acpi_mach_params { + u32 acpi_ipc_irq_index; + const char *platform; + u32 codec_mask; + u32 dmic_num; +}; + +/** * snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are * related to the hardware, except for the firmware and topology file names. * A platform supported by legacy and Sound Open Firmware (SOF) would expose @@ -68,6 +82,7 @@ struct snd_soc_acpi_mach { struct snd_soc_acpi_mach * (*machine_quirk)(void *arg); const void *quirk_data; void *pdata; + struct snd_soc_acpi_mach_params mach_params; const char *sof_fw_filename; const char *sof_tplg_filename; const char *asoc_plat_name; diff --git a/include/sound/soc.h b/include/sound/soc.h index f1dab1f4b194..8ec1de856ee7 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -553,12 +553,12 @@ static inline void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count, } #endif -#ifdef CONFIG_SND_SOC_AC97_BUS struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component); struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component, unsigned int id, unsigned int id_mask); void snd_soc_free_ac97_component(struct snd_ac97 *ac97); +#ifdef CONFIG_SND_SOC_AC97_BUS int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops); int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops, struct platform_device *pdev); @@ -1192,7 +1192,7 @@ struct snd_soc_pcm_runtime { ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \ (i)++) #define for_each_rtd_codec_dai_rollback(rtd, i, dai) \ - for (; ((i--) >= 0) && ((dai) = rtd->codec_dais[i]);) + for (; ((--i) >= 0) && ((dai) = rtd->codec_dais[i]);) /* mixer control */ @@ -1477,10 +1477,20 @@ int snd_soc_of_parse_tdm_slot(struct device_node *np, unsigned int *rx_mask, unsigned int *slots, unsigned int *slot_width); -void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card, +void snd_soc_of_parse_node_prefix(struct device_node *np, struct snd_soc_codec_conf *codec_conf, struct device_node *of_node, const char *propname); +static inline +void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card, + struct snd_soc_codec_conf *codec_conf, + struct device_node *of_node, + const char *propname) +{ + snd_soc_of_parse_node_prefix(card->dev->of_node, + codec_conf, of_node, propname); +} + int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, const char *propname); unsigned int snd_soc_of_parse_daifmt(struct device_node *np, diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index e3bdb0550a59..69b7b955902c 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -46,6 +46,10 @@ /* Used by transport_get_inquiry_vpd_device_ident() */ #define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254 +#define INQUIRY_VENDOR_LEN 8 +#define INQUIRY_MODEL_LEN 16 +#define INQUIRY_REVISION_LEN 4 + /* Attempts before moving from SHORT to LONG */ #define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3 #define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */ @@ -87,6 +91,8 @@ #define DA_EMULATE_3PC 1 /* No Emulation for PSCSI by default */ #define DA_EMULATE_ALUA 0 +/* Emulate SCSI2 RESERVE/RELEASE and Persistent Reservations by default */ +#define DA_EMULATE_PR 1 /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ #define DA_ENFORCE_PR_ISIDS 1 /* Force SPC-3 PR Activate Persistence across Target Power Loss */ @@ -134,7 +140,6 @@ enum se_cmd_flags_table { SCF_SENT_CHECK_CONDITION = 0x00000800, SCF_OVERFLOW_BIT = 0x00001000, SCF_UNDERFLOW_BIT = 0x00002000, - SCF_SEND_DELAYED_TAS = 0x00004000, SCF_ALUA_NON_OPTIMIZED = 0x00008000, SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000, SCF_COMPARE_AND_WRITE = 0x00080000, @@ -314,9 +319,13 @@ struct t10_vpd { }; struct t10_wwn { - char vendor[8]; - char model[16]; - char revision[4]; + /* + * SCSI left aligned strings may not be null terminated. +1 to ensure a + * null terminator is always present. + */ + char vendor[INQUIRY_VENDOR_LEN + 1]; + char model[INQUIRY_MODEL_LEN + 1]; + char revision[INQUIRY_REVISION_LEN + 1]; char unit_serial[INQUIRY_VPD_SERIAL_LEN]; spinlock_t t10_vpd_lock; struct se_device *t10_dev; @@ -474,7 +483,8 @@ struct se_cmd { struct se_session *se_sess; struct se_tmr_req *se_tmr_req; struct list_head se_cmd_list; - struct completion *compl; + struct completion *free_compl; + struct completion *abrt_compl; const struct target_core_fabric_ops *se_tfo; sense_reason_t (*execute_cmd)(struct se_cmd *); sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *); @@ -601,6 +611,7 @@ struct se_session { struct se_node_acl *se_node_acl; struct se_portal_group *se_tpg; void *fabric_sess_ptr; + struct percpu_ref cmd_count; struct list_head sess_list; struct list_head sess_acl_list; struct list_head sess_cmd_list; @@ -664,7 +675,7 @@ struct se_dev_attrib { int emulate_tpws; int emulate_caw; int emulate_3pc; - int pi_prot_format; + int emulate_pr; enum target_prot_type pi_prot_type; enum target_prot_type hw_pi_prot_type; int pi_prot_verify; @@ -731,7 +742,6 @@ struct se_lun { struct scsi_port_stats lun_stats; struct config_group lun_group; struct se_port_stat_grps port_stat_grps; - struct completion lun_ref_comp; struct completion lun_shutdown_comp; struct percpu_ref lun_ref; struct list_head lun_dev_link; @@ -794,7 +804,6 @@ struct se_device { struct t10_pr_registration *dev_pr_res_holder; struct list_head dev_sep_list; struct list_head dev_tmr_list; - struct workqueue_struct *tmr_wq; struct work_struct qf_work_queue; struct list_head delayed_cmd_list; struct list_head state_list; diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index f4147b398431..ee5ddd81cd8d 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -8,7 +8,18 @@ struct target_core_fabric_ops { struct module *module; - const char *name; + /* + * XXX: Special case for iscsi/iSCSI... + * If non-null, fabric_alias is used for matching target/$fabric + * ConfigFS paths. If null, fabric_name is used for this (see below). + */ + const char *fabric_alias; + /* + * fabric_name is used for matching target/$fabric ConfigFS paths + * without a fabric_alias (see above). It's also used for the ALUA state + * path and is stored on disk with PR state. + */ + const char *fabric_name; size_t node_acl_size; /* * Limits number of scatterlist entries per SCF_SCSI_DATA_CDB payload. @@ -23,7 +34,6 @@ struct target_core_fabric_ops { * XXX: Currently assumes single PAGE_SIZE per scatterlist entry */ u32 max_data_sg_nents; - char *(*get_fabric_name)(void); char *(*tpg_get_wwn)(struct se_portal_group *); u16 (*tpg_get_tag)(struct se_portal_group *); u32 (*tpg_get_default_depth)(struct se_portal_group *); @@ -101,6 +111,13 @@ struct target_core_fabric_ops { struct configfs_attribute **tfc_tpg_nacl_attrib_attrs; struct configfs_attribute **tfc_tpg_nacl_auth_attrs; struct configfs_attribute **tfc_tpg_nacl_param_attrs; + + /* + * Set this member variable to true if the SCSI transport protocol + * (e.g. iSCSI) requires that the Data-Out buffer is transferred in + * its entirety before a command is aborted. + */ + bool write_pending_must_be_called; }; int target_register_template(const struct target_core_fabric_ops *fo); @@ -116,7 +133,7 @@ struct se_session *target_setup_session(struct se_portal_group *, struct se_session *, void *)); void target_remove_session(struct se_session *); -void transport_init_session(struct se_session *); +int transport_init_session(struct se_session *se_sess); struct se_session *transport_alloc_session(enum target_prot_op); int transport_alloc_session_tags(struct se_session *, unsigned int, unsigned int); @@ -149,12 +166,12 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, int transport_handle_cdb_direct(struct se_cmd *); sense_reason_t transport_generic_new_cmd(struct se_cmd *); +void target_put_cmd_and_wait(struct se_cmd *cmd); void target_execute_cmd(struct se_cmd *cmd); int transport_generic_free_cmd(struct se_cmd *, int); bool transport_wait_for_tasks(struct se_cmd *); -int transport_check_aborted_status(struct se_cmd *, int); int transport_send_check_condition_and_sense(struct se_cmd *, sense_reason_t, int); int target_get_sess_cmd(struct se_cmd *, bool); diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h index 2cbd6e42ad83..e4526f85c19d 100644 --- a/include/trace/events/bcache.h +++ b/include/trace/events/bcache.h @@ -221,9 +221,30 @@ DEFINE_EVENT(cache_set, bcache_journal_entry_full, TP_ARGS(c) ); -DEFINE_EVENT(bcache_bio, bcache_journal_write, - TP_PROTO(struct bio *bio), - TP_ARGS(bio) +TRACE_EVENT(bcache_journal_write, + TP_PROTO(struct bio *bio, u32 keys), + TP_ARGS(bio, keys), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(sector_t, sector ) + __field(unsigned int, nr_sector ) + __array(char, rwbs, 6 ) + __field(u32, nr_keys ) + ), + + TP_fast_assign( + __entry->dev = bio_dev(bio); + __entry->sector = bio->bi_iter.bi_sector; + __entry->nr_sector = bio->bi_iter.bi_size >> 9; + __entry->nr_keys = keys; + blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); + ), + + TP_printk("%d,%d %s %llu + %u keys %u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, __entry->nr_sector, + __entry->nr_keys) ); /* Btree */ diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 8568946f491d..2887503e4d12 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -92,7 +92,7 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS); #define TP_STRUCT__entry_fsid __array(u8, fsid, BTRFS_FSID_SIZE) #define TP_fast_assign_fsid(fs_info) \ - memcpy(__entry->fsid, fs_info->fsid, BTRFS_FSID_SIZE) + memcpy(__entry->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE) #define TP_STRUCT__entry_btrfs(args...) \ TP_STRUCT__entry( \ @@ -1048,6 +1048,8 @@ TRACE_EVENT(btrfs_trigger_flush, { FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS"}, \ { FLUSH_DELALLOC, "FLUSH_DELALLOC"}, \ { FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT"}, \ + { FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR"}, \ + { FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS"}, \ { ALLOC_CHUNK, "ALLOC_CHUNK"}, \ { COMMIT_TRANS, "COMMIT_TRANS"}) diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 698e0d8a5ca4..d68e9e536814 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -226,6 +226,26 @@ TRACE_EVENT(ext4_drop_inode, (unsigned long) __entry->ino, __entry->drop) ); +TRACE_EVENT(ext4_nfs_commit_metadata, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + ), + + TP_printk("dev %d,%d ino %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino) +); + TRACE_EVENT(ext4_mark_inode_dirty, TP_PROTO(struct inode *inode, unsigned long IP), diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h index 68b17c116907..fad7befa612d 100644 --- a/include/trace/events/filelock.h +++ b/include/trace/events/filelock.h @@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(filelock_lock, __field(struct file_lock *, fl) __field(unsigned long, i_ino) __field(dev_t, s_dev) - __field(struct file_lock *, fl_next) + __field(struct file_lock *, fl_blocker) __field(fl_owner_t, fl_owner) __field(unsigned int, fl_pid) __field(unsigned int, fl_flags) @@ -82,7 +82,7 @@ DECLARE_EVENT_CLASS(filelock_lock, __entry->fl = fl ? fl : NULL; __entry->s_dev = inode->i_sb->s_dev; __entry->i_ino = inode->i_ino; - __entry->fl_next = fl ? fl->fl_next : NULL; + __entry->fl_blocker = fl ? fl->fl_blocker : NULL; __entry->fl_owner = fl ? fl->fl_owner : NULL; __entry->fl_pid = fl ? fl->fl_pid : 0; __entry->fl_flags = fl ? fl->fl_flags : 0; @@ -92,9 +92,9 @@ DECLARE_EVENT_CLASS(filelock_lock, __entry->ret = ret; ), - TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d", + TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_blocker=0x%p fl_owner=0x%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d", __entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev), - __entry->i_ino, __entry->fl_next, __entry->fl_owner, + __entry->i_ino, __entry->fl_blocker, __entry->fl_owner, __entry->fl_pid, show_fl_flags(__entry->fl_flags), show_fl_type(__entry->fl_type), __entry->fl_start, __entry->fl_end, __entry->ret) @@ -125,7 +125,7 @@ DECLARE_EVENT_CLASS(filelock_lease, __field(struct file_lock *, fl) __field(unsigned long, i_ino) __field(dev_t, s_dev) - __field(struct file_lock *, fl_next) + __field(struct file_lock *, fl_blocker) __field(fl_owner_t, fl_owner) __field(unsigned int, fl_flags) __field(unsigned char, fl_type) @@ -137,7 +137,7 @@ DECLARE_EVENT_CLASS(filelock_lease, __entry->fl = fl ? fl : NULL; __entry->s_dev = inode->i_sb->s_dev; __entry->i_ino = inode->i_ino; - __entry->fl_next = fl ? fl->fl_next : NULL; + __entry->fl_blocker = fl ? fl->fl_blocker : NULL; __entry->fl_owner = fl ? fl->fl_owner : NULL; __entry->fl_flags = fl ? fl->fl_flags : 0; __entry->fl_type = fl ? fl->fl_type : 0; @@ -145,9 +145,9 @@ DECLARE_EVENT_CLASS(filelock_lease, __entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0; ), - TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu", + TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_blocker=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu", __entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev), - __entry->i_ino, __entry->fl_next, __entry->fl_owner, + __entry->i_ino, __entry->fl_blocker, __entry->fl_owner, show_fl_flags(__entry->fl_flags), show_fl_type(__entry->fl_type), __entry->fl_break_time, __entry->fl_downgrade_time) diff --git a/include/trace/events/iscsi.h b/include/trace/events/iscsi.h new file mode 100644 index 000000000000..87408faf6e4e --- /dev/null +++ b/include/trace/events/iscsi.h @@ -0,0 +1,107 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iscsi + +#if !defined(_TRACE_ISCSI_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_ISCSI_H + +#include <linux/tracepoint.h> + +/* max debug message length */ +#define ISCSI_MSG_MAX 256 + +/* + * Declare tracepoint helper function. + */ +void iscsi_dbg_trace(void (*trace)(struct device *dev, struct va_format *), + struct device *dev, const char *fmt, ...); + +/* + * Declare event class for iscsi debug messages. + */ +DECLARE_EVENT_CLASS(iscsi_log_msg, + + TP_PROTO(struct device *dev, struct va_format *vaf), + + TP_ARGS(dev, vaf), + + TP_STRUCT__entry( + __string(dname, dev_name(dev) ) + __dynamic_array(char, msg, ISCSI_MSG_MAX ) + ), + + TP_fast_assign( + __assign_str(dname, dev_name(dev)); + vsnprintf(__get_str(msg), ISCSI_MSG_MAX, vaf->fmt, *vaf->va); + ), + + TP_printk("%s: %s",__get_str(dname), __get_str(msg) + ) +); + +/* + * Define event to capture iscsi connection debug messages. + */ +DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_conn, + TP_PROTO(struct device *dev, struct va_format *vaf), + + TP_ARGS(dev, vaf) +); + +/* + * Define event to capture iscsi session debug messages. + */ +DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_session, + TP_PROTO(struct device *dev, struct va_format *vaf), + + TP_ARGS(dev, vaf) +); + +/* + * Define event to capture iscsi error handling debug messages. + */ +DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_eh, + TP_PROTO(struct device *dev, struct va_format *vaf), + + TP_ARGS(dev, vaf) +); + +/* + * Define event to capture iscsi tcp debug messages. + */ +DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_tcp, + TP_PROTO(struct device *dev, struct va_format *vaf), + + TP_ARGS(dev, vaf) +); + +/* + * Define event to capture iscsi sw tcp debug messages. + */ +DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_sw_tcp, + TP_PROTO(struct device *dev, struct va_format *vaf), + + TP_ARGS(dev, vaf) +); + +/* + * Define event to capture iscsi transport session debug messages. + */ +DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_trans_session, + TP_PROTO(struct device *dev, struct va_format *vaf), + + TP_ARGS(dev, vaf) +); + +/* + * Define event to capture iscsi transport connection debug messages. + */ +DEFINE_EVENT(iscsi_log_msg, iscsi_dbg_trans_conn, + TP_PROTO(struct device *dev, struct va_format *vaf), + + TP_ARGS(dev, vaf) +); + +#endif /* _TRACE_ISCSI_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h index a9834c37ac40..c0e7d24ca256 100644 --- a/include/trace/events/kyber.h +++ b/include/trace/events/kyber.h @@ -31,8 +31,8 @@ TRACE_EVENT(kyber_latency, TP_fast_assign( __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); - strlcpy(__entry->domain, domain, DOMAIN_LEN); - strlcpy(__entry->type, type, DOMAIN_LEN); + strlcpy(__entry->domain, domain, sizeof(__entry->domain)); + strlcpy(__entry->type, type, sizeof(__entry->type)); __entry->percentile = percentile; __entry->numerator = numerator; __entry->denominator = denominator; @@ -60,7 +60,7 @@ TRACE_EVENT(kyber_adjust, TP_fast_assign( __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); - strlcpy(__entry->domain, domain, DOMAIN_LEN); + strlcpy(__entry->domain, domain, sizeof(__entry->domain)); __entry->depth = depth; ), @@ -82,7 +82,7 @@ TRACE_EVENT(kyber_throttled, TP_fast_assign( __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); - strlcpy(__entry->domain, domain, DOMAIN_LEN); + strlcpy(__entry->domain, domain, sizeof(__entry->domain)); ), TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), diff --git a/include/trace/events/net.h b/include/trace/events/net.h index 00aa72ce0e7c..1efd7d9b25fe 100644 --- a/include/trace/events/net.h +++ b/include/trace/events/net.h @@ -244,6 +244,65 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry, TP_ARGS(skb) ); +DECLARE_EVENT_CLASS(net_dev_rx_exit_template, + + TP_PROTO(int ret), + + TP_ARGS(ret), + + TP_STRUCT__entry( + __field(int, ret) + ), + + TP_fast_assign( + __entry->ret = ret; + ), + + TP_printk("ret=%d", __entry->ret) +); + +DEFINE_EVENT(net_dev_rx_exit_template, napi_gro_frags_exit, + + TP_PROTO(int ret), + + TP_ARGS(ret) +); + +DEFINE_EVENT(net_dev_rx_exit_template, napi_gro_receive_exit, + + TP_PROTO(int ret), + + TP_ARGS(ret) +); + +DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_exit, + + TP_PROTO(int ret), + + TP_ARGS(ret) +); + +DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_exit, + + TP_PROTO(int ret), + + TP_ARGS(ret) +); + +DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_ni_exit, + + TP_PROTO(int ret), + + TP_ARGS(ret) +); + +DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit, + + TP_PROTO(int ret), + + TP_ARGS(ret) +); + #endif /* _TRACE_NET_H */ /* This part must be outside protection */ diff --git a/include/trace/events/objagg.h b/include/trace/events/objagg.h new file mode 100644 index 000000000000..fcec0fc9eb0c --- /dev/null +++ b/include/trace/events/objagg.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM objagg + +#if !defined(__TRACE_OBJAGG_H) || defined(TRACE_HEADER_MULTI_READ) +#define __TRACE_OBJAGG_H + +#include <linux/tracepoint.h> + +struct objagg; +struct objagg_obj; + +TRACE_EVENT(objagg_create, + TP_PROTO(const struct objagg *objagg), + + TP_ARGS(objagg), + + TP_STRUCT__entry( + __field(const void *, objagg) + ), + + TP_fast_assign( + __entry->objagg = objagg; + ), + + TP_printk("objagg %p", __entry->objagg) +); + +TRACE_EVENT(objagg_destroy, + TP_PROTO(const struct objagg *objagg), + + TP_ARGS(objagg), + + TP_STRUCT__entry( + __field(const void *, objagg) + ), + + TP_fast_assign( + __entry->objagg = objagg; + ), + + TP_printk("objagg %p", __entry->objagg) +); + +TRACE_EVENT(objagg_obj_create, + TP_PROTO(const struct objagg *objagg, + const struct objagg_obj *obj), + + TP_ARGS(objagg, obj), + + TP_STRUCT__entry( + __field(const void *, objagg) + __field(const void *, obj) + ), + + TP_fast_assign( + __entry->objagg = objagg; + __entry->obj = obj; + ), + + TP_printk("objagg %p, obj %p", __entry->objagg, __entry->obj) +); + +TRACE_EVENT(objagg_obj_destroy, + TP_PROTO(const struct objagg *objagg, + const struct objagg_obj *obj), + + TP_ARGS(objagg, obj), + + TP_STRUCT__entry( + __field(const void *, objagg) + __field(const void *, obj) + ), + + TP_fast_assign( + __entry->objagg = objagg; + __entry->obj = obj; + ), + + TP_printk("objagg %p, obj %p", __entry->objagg, __entry->obj) +); + +TRACE_EVENT(objagg_obj_get, + TP_PROTO(const struct objagg *objagg, + const struct objagg_obj *obj, + unsigned int refcount), + + TP_ARGS(objagg, obj, refcount), + + TP_STRUCT__entry( + __field(const void *, objagg) + __field(const void *, obj) + __field(unsigned int, refcount) + ), + + TP_fast_assign( + __entry->objagg = objagg; + __entry->obj = obj; + __entry->refcount = refcount; + ), + + TP_printk("objagg %p, obj %p, refcount %u", + __entry->objagg, __entry->obj, __entry->refcount) +); + +TRACE_EVENT(objagg_obj_put, + TP_PROTO(const struct objagg *objagg, + const struct objagg_obj *obj, + unsigned int refcount), + + TP_ARGS(objagg, obj, refcount), + + TP_STRUCT__entry( + __field(const void *, objagg) + __field(const void *, obj) + __field(unsigned int, refcount) + ), + + TP_fast_assign( + __entry->objagg = objagg; + __entry->obj = obj; + __entry->refcount = refcount; + ), + + TP_printk("objagg %p, obj %p, refcount %u", + __entry->objagg, __entry->obj, __entry->refcount) +); + +TRACE_EVENT(objagg_obj_parent_assign, + TP_PROTO(const struct objagg *objagg, + const struct objagg_obj *obj, + const struct objagg_obj *parent, + unsigned int parent_refcount), + + TP_ARGS(objagg, obj, parent, parent_refcount), + + TP_STRUCT__entry( + __field(const void *, objagg) + __field(const void *, obj) + __field(const void *, parent) + __field(unsigned int, parent_refcount) + ), + + TP_fast_assign( + __entry->objagg = objagg; + __entry->obj = obj; + __entry->parent = parent; + __entry->parent_refcount = parent_refcount; + ), + + TP_printk("objagg %p, obj %p, parent %p, parent_refcount %u", + __entry->objagg, __entry->obj, + __entry->parent, __entry->parent_refcount) +); + +TRACE_EVENT(objagg_obj_parent_unassign, + TP_PROTO(const struct objagg *objagg, + const struct objagg_obj *obj, + const struct objagg_obj *parent, + unsigned int parent_refcount), + + TP_ARGS(objagg, obj, parent, parent_refcount), + + TP_STRUCT__entry( + __field(const void *, objagg) + __field(const void *, obj) + __field(const void *, parent) + __field(unsigned int, parent_refcount) + ), + + TP_fast_assign( + __entry->objagg = objagg; + __entry->obj = obj; + __entry->parent = parent; + __entry->parent_refcount = parent_refcount; + ), + + TP_printk("objagg %p, obj %p, parent %p, parent_refcount %u", + __entry->objagg, __entry->obj, + __entry->parent, __entry->parent_refcount) +); + +TRACE_EVENT(objagg_obj_root_create, + TP_PROTO(const struct objagg *objagg, + const struct objagg_obj *obj), + + TP_ARGS(objagg, obj), + + TP_STRUCT__entry( + __field(const void *, objagg) + __field(const void *, obj) + ), + + TP_fast_assign( + __entry->objagg = objagg; + __entry->obj = obj; + ), + + TP_printk("objagg %p, obj %p", + __entry->objagg, __entry->obj) +); + +TRACE_EVENT(objagg_obj_root_destroy, + TP_PROTO(const struct objagg *objagg, + const struct objagg_obj *obj), + + TP_ARGS(objagg, obj), + + TP_STRUCT__entry( + __field(const void *, objagg) + __field(const void *, obj) + ), + + TP_fast_assign( + __entry->objagg = objagg; + __entry->obj = obj; + ), + + TP_printk("objagg %p, obj %p", + __entry->objagg, __entry->obj) +); + +#endif /* __TRACE_OBJAGG_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index b093058f78aa..399b1aedc927 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -10,6 +10,7 @@ #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_RPCRDMA_H +#include <linux/scatterlist.h> #include <linux/tracepoint.h> #include <trace/events/rdma.h> @@ -97,7 +98,6 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event, TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) - __field(const void *, mr) __field(unsigned int, pos) __field(int, nents) __field(u32, handle) @@ -109,7 +109,6 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event, TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; - __entry->mr = mr; __entry->pos = pos; __entry->nents = mr->mr_nents; __entry->handle = mr->mr_handle; @@ -118,8 +117,8 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event, __entry->nsegs = nsegs; ), - TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)", - __entry->task_id, __entry->client_id, __entry->mr, + TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)", + __entry->task_id, __entry->client_id, __entry->pos, __entry->length, (unsigned long long)__entry->offset, __entry->handle, __entry->nents < __entry->nsegs ? "more" : "last" @@ -127,7 +126,7 @@ DECLARE_EVENT_CLASS(xprtrdma_rdch_event, ); #define DEFINE_RDCH_EVENT(name) \ - DEFINE_EVENT(xprtrdma_rdch_event, name, \ + DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\ TP_PROTO( \ const struct rpc_task *task, \ unsigned int pos, \ @@ -148,7 +147,6 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event, TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) - __field(const void *, mr) __field(int, nents) __field(u32, handle) __field(u32, length) @@ -159,7 +157,6 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event, TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; - __entry->mr = mr; __entry->nents = mr->mr_nents; __entry->handle = mr->mr_handle; __entry->length = mr->mr_length; @@ -167,8 +164,8 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event, __entry->nsegs = nsegs; ), - TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)", - __entry->task_id, __entry->client_id, __entry->mr, + TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)", + __entry->task_id, __entry->client_id, __entry->length, (unsigned long long)__entry->offset, __entry->handle, __entry->nents < __entry->nsegs ? "more" : "last" @@ -176,7 +173,7 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event, ); #define DEFINE_WRCH_EVENT(name) \ - DEFINE_EVENT(xprtrdma_wrch_event, name, \ + DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\ TP_PROTO( \ const struct rpc_task *task, \ struct rpcrdma_mr *mr, \ @@ -234,6 +231,18 @@ DECLARE_EVENT_CLASS(xprtrdma_frwr_done, ), \ TP_ARGS(wc, frwr)) +TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL); +TRACE_DEFINE_ENUM(DMA_TO_DEVICE); +TRACE_DEFINE_ENUM(DMA_FROM_DEVICE); +TRACE_DEFINE_ENUM(DMA_NONE); + +#define xprtrdma_show_direction(x) \ + __print_symbolic(x, \ + { DMA_BIDIRECTIONAL, "BIDIR" }, \ + { DMA_TO_DEVICE, "TO_DEVICE" }, \ + { DMA_FROM_DEVICE, "FROM_DEVICE" }, \ + { DMA_NONE, "NONE" }) + DECLARE_EVENT_CLASS(xprtrdma_mr, TP_PROTO( const struct rpcrdma_mr *mr @@ -246,6 +255,7 @@ DECLARE_EVENT_CLASS(xprtrdma_mr, __field(u32, handle) __field(u32, length) __field(u64, offset) + __field(u32, dir) ), TP_fast_assign( @@ -253,12 +263,13 @@ DECLARE_EVENT_CLASS(xprtrdma_mr, __entry->handle = mr->mr_handle; __entry->length = mr->mr_length; __entry->offset = mr->mr_offset; + __entry->dir = mr->mr_dir; ), - TP_printk("mr=%p %u@0x%016llx:0x%08x", + TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)", __entry->mr, __entry->length, - (unsigned long long)__entry->offset, - __entry->handle + (unsigned long long)__entry->offset, __entry->handle, + xprtrdma_show_direction(__entry->dir) ) ); @@ -371,11 +382,13 @@ TRACE_EVENT(xprtrdma_disconnect, DEFINE_RXPRT_EVENT(xprtrdma_conn_start); DEFINE_RXPRT_EVENT(xprtrdma_conn_tout); DEFINE_RXPRT_EVENT(xprtrdma_create); -DEFINE_RXPRT_EVENT(xprtrdma_destroy); +DEFINE_RXPRT_EVENT(xprtrdma_op_destroy); DEFINE_RXPRT_EVENT(xprtrdma_remove); DEFINE_RXPRT_EVENT(xprtrdma_reinsert); DEFINE_RXPRT_EVENT(xprtrdma_reconnect); -DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc); +DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc); +DEFINE_RXPRT_EVENT(xprtrdma_op_close); +DEFINE_RXPRT_EVENT(xprtrdma_op_connect); TRACE_EVENT(xprtrdma_qp_event, TP_PROTO( @@ -437,9 +450,9 @@ TRACE_EVENT(xprtrdma_createmrs, DEFINE_RXPRT_EVENT(xprtrdma_nomrs); -DEFINE_RDCH_EVENT(xprtrdma_read_chunk); -DEFINE_WRCH_EVENT(xprtrdma_write_chunk); -DEFINE_WRCH_EVENT(xprtrdma_reply_chunk); +DEFINE_RDCH_EVENT(read); +DEFINE_WRCH_EVENT(write); +DEFINE_WRCH_EVENT(reply); TRACE_DEFINE_ENUM(rpcrdma_noch); TRACE_DEFINE_ENUM(rpcrdma_readch); @@ -570,7 +583,7 @@ TRACE_EVENT(xprtrdma_post_recvs, __entry->r_xprt = r_xprt; __entry->count = count; __entry->status = status; - __entry->posted = r_xprt->rx_buf.rb_posted_receives; + __entry->posted = r_xprt->rx_ep.rep_receive_count; __assign_str(addr, rpcrdma_addrstr(r_xprt)); __assign_str(port, rpcrdma_portstr(r_xprt)); ), @@ -651,12 +664,147 @@ DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li); DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake); +TRACE_EVENT(xprtrdma_frwr_alloc, + TP_PROTO( + const struct rpcrdma_mr *mr, + int rc + ), + + TP_ARGS(mr, rc), + + TP_STRUCT__entry( + __field(const void *, mr) + __field(int, rc) + ), + + TP_fast_assign( + __entry->mr = mr; + __entry->rc = rc; + ), + + TP_printk("mr=%p: rc=%d", + __entry->mr, __entry->rc + ) +); + +TRACE_EVENT(xprtrdma_frwr_dereg, + TP_PROTO( + const struct rpcrdma_mr *mr, + int rc + ), + + TP_ARGS(mr, rc), + + TP_STRUCT__entry( + __field(const void *, mr) + __field(u32, handle) + __field(u32, length) + __field(u64, offset) + __field(u32, dir) + __field(int, rc) + ), + + TP_fast_assign( + __entry->mr = mr; + __entry->handle = mr->mr_handle; + __entry->length = mr->mr_length; + __entry->offset = mr->mr_offset; + __entry->dir = mr->mr_dir; + __entry->rc = rc; + ), + + TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d", + __entry->mr, __entry->length, + (unsigned long long)__entry->offset, __entry->handle, + xprtrdma_show_direction(__entry->dir), + __entry->rc + ) +); + +TRACE_EVENT(xprtrdma_frwr_sgerr, + TP_PROTO( + const struct rpcrdma_mr *mr, + int sg_nents + ), + + TP_ARGS(mr, sg_nents), + + TP_STRUCT__entry( + __field(const void *, mr) + __field(u64, addr) + __field(u32, dir) + __field(int, nents) + ), + + TP_fast_assign( + __entry->mr = mr; + __entry->addr = mr->mr_sg->dma_address; + __entry->dir = mr->mr_dir; + __entry->nents = sg_nents; + ), + + TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d", + __entry->mr, __entry->addr, + xprtrdma_show_direction(__entry->dir), + __entry->nents + ) +); + +TRACE_EVENT(xprtrdma_frwr_maperr, + TP_PROTO( + const struct rpcrdma_mr *mr, + int num_mapped + ), + + TP_ARGS(mr, num_mapped), + + TP_STRUCT__entry( + __field(const void *, mr) + __field(u64, addr) + __field(u32, dir) + __field(int, num_mapped) + __field(int, nents) + ), + + TP_fast_assign( + __entry->mr = mr; + __entry->addr = mr->mr_sg->dma_address; + __entry->dir = mr->mr_dir; + __entry->num_mapped = num_mapped; + __entry->nents = mr->mr_nents; + ), + + TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d", + __entry->mr, __entry->addr, + xprtrdma_show_direction(__entry->dir), + __entry->num_mapped, __entry->nents + ) +); + DEFINE_MR_EVENT(localinv); DEFINE_MR_EVENT(map); DEFINE_MR_EVENT(unmap); DEFINE_MR_EVENT(remoteinv); DEFINE_MR_EVENT(recycle); +TRACE_EVENT(xprtrdma_dma_maperr, + TP_PROTO( + u64 addr + ), + + TP_ARGS(addr), + + TP_STRUCT__entry( + __field(u64, addr) + ), + + TP_fast_assign( + __entry->addr = addr; + ), + + TP_printk("dma addr=0x%llx\n", __entry->addr) +); + /** ** Reply events **/ @@ -824,7 +972,7 @@ TRACE_EVENT(xprtrdma_decode_seg, ** Allocation/release of rpcrdma_reqs and rpcrdma_reps **/ -TRACE_EVENT(xprtrdma_allocate, +TRACE_EVENT(xprtrdma_op_allocate, TP_PROTO( const struct rpc_task *task, const struct rpcrdma_req *req @@ -854,7 +1002,7 @@ TRACE_EVENT(xprtrdma_allocate, ) ); -TRACE_EVENT(xprtrdma_rpc_done, +TRACE_EVENT(xprtrdma_op_free, TP_PROTO( const struct rpc_task *task, const struct rpcrdma_req *req @@ -917,6 +1065,34 @@ TRACE_EVENT(xprtrdma_cb_setup, DEFINE_CB_EVENT(xprtrdma_cb_call); DEFINE_CB_EVENT(xprtrdma_cb_reply); +TRACE_EVENT(xprtrdma_leaked_rep, + TP_PROTO( + const struct rpc_rqst *rqst, + const struct rpcrdma_rep *rep + ), + + TP_ARGS(rqst, rep), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + __field(const void *, rep) + ), + + TP_fast_assign( + __entry->task_id = rqst->rq_task->tk_pid; + __entry->client_id = rqst->rq_task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(rqst->rq_xid); + __entry->rep = rep; + ), + + TP_printk("task:%u@%u xid=0x%08x rep=%p", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->rep + ) +); + /** ** Server-side RPC/RDMA events **/ diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 573d5b901fb1..5b50fe4906d2 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -181,6 +181,7 @@ enum rxrpc_timer_trace { enum rxrpc_propose_ack_trace { rxrpc_propose_ack_client_tx_end, rxrpc_propose_ack_input_data, + rxrpc_propose_ack_ping_for_check_life, rxrpc_propose_ack_ping_for_keepalive, rxrpc_propose_ack_ping_for_lost_ack, rxrpc_propose_ack_ping_for_lost_reply, @@ -380,6 +381,7 @@ enum rxrpc_tx_point { #define rxrpc_propose_ack_traces \ EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \ EM(rxrpc_propose_ack_input_data, "DataIn ") \ + EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \ EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \ EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \ EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index f07b270d4fc4..9a4bdfadab07 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -107,6 +107,8 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, #ifdef CREATE_TRACE_POINTS static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) { + unsigned int state; + #ifdef CONFIG_SCHED_DEBUG BUG_ON(p != current); #endif /* CONFIG_SCHED_DEBUG */ @@ -118,7 +120,15 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct * if (preempt) return TASK_REPORT_MAX; - return 1 << task_state_index(p); + /* + * task_state_index() uses fls() and returns a value from 0-8 range. + * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using + * it for left shift operation to get the correct task->state + * mapping. + */ + state = task_state_index(p); + + return state ? (1 << (state - 1)) : state; } #endif /* CREATE_TRACE_POINTS */ diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 28e384186c35..0d5d0d91f861 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -16,40 +16,6 @@ DECLARE_EVENT_CLASS(rpc_task_status, - TP_PROTO(struct rpc_task *task), - - TP_ARGS(task), - - TP_STRUCT__entry( - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(int, status) - ), - - TP_fast_assign( - __entry->task_id = task->tk_pid; - __entry->client_id = task->tk_client->cl_clid; - __entry->status = task->tk_status; - ), - - TP_printk("task:%u@%u status=%d", - __entry->task_id, __entry->client_id, - __entry->status) -); - -DEFINE_EVENT(rpc_task_status, rpc_call_status, - TP_PROTO(struct rpc_task *task), - - TP_ARGS(task) -); - -DEFINE_EVENT(rpc_task_status, rpc_bind_status, - TP_PROTO(struct rpc_task *task), - - TP_ARGS(task) -); - -TRACE_EVENT(rpc_connect_status, TP_PROTO(const struct rpc_task *task), TP_ARGS(task), @@ -70,6 +36,16 @@ TRACE_EVENT(rpc_connect_status, __entry->task_id, __entry->client_id, __entry->status) ); +#define DEFINE_RPC_STATUS_EVENT(name) \ + DEFINE_EVENT(rpc_task_status, rpc_##name##_status, \ + TP_PROTO( \ + const struct rpc_task *task \ + ), \ + TP_ARGS(task)) + +DEFINE_RPC_STATUS_EVENT(call); +DEFINE_RPC_STATUS_EVENT(bind); +DEFINE_RPC_STATUS_EVENT(connect); TRACE_EVENT(rpc_request, TP_PROTO(const struct rpc_task *task), @@ -134,30 +110,17 @@ DECLARE_EVENT_CLASS(rpc_task_running, __entry->action ) ); +#define DEFINE_RPC_RUNNING_EVENT(name) \ + DEFINE_EVENT(rpc_task_running, rpc_task_##name, \ + TP_PROTO( \ + const struct rpc_task *task, \ + const void *action \ + ), \ + TP_ARGS(task, action)) -DEFINE_EVENT(rpc_task_running, rpc_task_begin, - - TP_PROTO(const struct rpc_task *task, const void *action), - - TP_ARGS(task, action) - -); - -DEFINE_EVENT(rpc_task_running, rpc_task_run_action, - - TP_PROTO(const struct rpc_task *task, const void *action), - - TP_ARGS(task, action) - -); - -DEFINE_EVENT(rpc_task_running, rpc_task_complete, - - TP_PROTO(const struct rpc_task *task, const void *action), - - TP_ARGS(task, action) - -); +DEFINE_RPC_RUNNING_EVENT(begin); +DEFINE_RPC_RUNNING_EVENT(run_action); +DEFINE_RPC_RUNNING_EVENT(complete); DECLARE_EVENT_CLASS(rpc_task_queued, @@ -195,22 +158,16 @@ DECLARE_EVENT_CLASS(rpc_task_queued, __get_str(q_name) ) ); +#define DEFINE_RPC_QUEUED_EVENT(name) \ + DEFINE_EVENT(rpc_task_queued, rpc_task_##name, \ + TP_PROTO( \ + const struct rpc_task *task, \ + const struct rpc_wait_queue *q \ + ), \ + TP_ARGS(task, q)) -DEFINE_EVENT(rpc_task_queued, rpc_task_sleep, - - TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q), - - TP_ARGS(task, q) - -); - -DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup, - - TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q), - - TP_ARGS(task, q) - -); +DEFINE_RPC_QUEUED_EVENT(sleep); +DEFINE_RPC_QUEUED_EVENT(wakeup); TRACE_EVENT(rpc_stats_latency, @@ -410,7 +367,11 @@ DEFINE_RPC_SOCKET_EVENT(rpc_socket_close); DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown); DECLARE_EVENT_CLASS(rpc_xprt_event, - TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), + TP_PROTO( + const struct rpc_xprt *xprt, + __be32 xid, + int status + ), TP_ARGS(xprt, xid, status), @@ -432,22 +393,19 @@ DECLARE_EVENT_CLASS(rpc_xprt_event, __get_str(port), __entry->xid, __entry->status) ); +#define DEFINE_RPC_XPRT_EVENT(name) \ + DEFINE_EVENT(rpc_xprt_event, xprt_##name, \ + TP_PROTO( \ + const struct rpc_xprt *xprt, \ + __be32 xid, \ + int status \ + ), \ + TP_ARGS(xprt, xid, status)) -DEFINE_EVENT(rpc_xprt_event, xprt_timer, - TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), - TP_ARGS(xprt, xid, status)); - -DEFINE_EVENT(rpc_xprt_event, xprt_lookup_rqst, - TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), - TP_ARGS(xprt, xid, status)); - -DEFINE_EVENT(rpc_xprt_event, xprt_transmit, - TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), - TP_ARGS(xprt, xid, status)); - -DEFINE_EVENT(rpc_xprt_event, xprt_complete_rqst, - TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), - TP_ARGS(xprt, xid, status)); +DEFINE_RPC_XPRT_EVENT(timer); +DEFINE_RPC_XPRT_EVENT(lookup_rqst); +DEFINE_RPC_XPRT_EVENT(transmit); +DEFINE_RPC_XPRT_EVENT(complete_rqst); TRACE_EVENT(xprt_ping, TP_PROTO(const struct rpc_xprt *xprt, int status), @@ -569,7 +527,8 @@ TRACE_EVENT(svc_process, __field(u32, vers) __field(u32, proc) __string(service, name) - __string(addr, rqst->rq_xprt->xpt_remotebuf) + __string(addr, rqst->rq_xprt ? + rqst->rq_xprt->xpt_remotebuf : "(null)") ), TP_fast_assign( @@ -577,7 +536,8 @@ TRACE_EVENT(svc_process, __entry->vers = rqst->rq_vers; __entry->proc = rqst->rq_proc; __assign_str(service, name); - __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); + __assign_str(addr, rqst->rq_xprt ? + rqst->rq_xprt->xpt_remotebuf : "(null)"); ), TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%u", @@ -587,7 +547,9 @@ TRACE_EVENT(svc_process, DECLARE_EVENT_CLASS(svc_rqst_event, - TP_PROTO(struct svc_rqst *rqst), + TP_PROTO( + const struct svc_rqst *rqst + ), TP_ARGS(rqst), @@ -607,14 +569,15 @@ DECLARE_EVENT_CLASS(svc_rqst_event, __get_str(addr), __entry->xid, show_rqstp_flags(__entry->flags)) ); +#define DEFINE_SVC_RQST_EVENT(name) \ + DEFINE_EVENT(svc_rqst_event, svc_##name, \ + TP_PROTO( \ + const struct svc_rqst *rqst \ + ), \ + TP_ARGS(rqst)) -DEFINE_EVENT(svc_rqst_event, svc_defer, - TP_PROTO(struct svc_rqst *rqst), - TP_ARGS(rqst)); - -DEFINE_EVENT(svc_rqst_event, svc_drop, - TP_PROTO(struct svc_rqst *rqst), - TP_ARGS(rqst)); +DEFINE_SVC_RQST_EVENT(defer); +DEFINE_SVC_RQST_EVENT(drop); DECLARE_EVENT_CLASS(svc_rqst_status, @@ -801,7 +764,9 @@ TRACE_EVENT(svc_stats_latency, ); DECLARE_EVENT_CLASS(svc_deferred_event, - TP_PROTO(struct svc_deferred_req *dr), + TP_PROTO( + const struct svc_deferred_req *dr + ), TP_ARGS(dr), @@ -818,13 +783,16 @@ DECLARE_EVENT_CLASS(svc_deferred_event, TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid) ); +#define DEFINE_SVC_DEFERRED_EVENT(name) \ + DEFINE_EVENT(svc_deferred_event, svc_##name##_deferred, \ + TP_PROTO( \ + const struct svc_deferred_req *dr \ + ), \ + TP_ARGS(dr)) + +DEFINE_SVC_DEFERRED_EVENT(drop); +DEFINE_SVC_DEFERRED_EVENT(revisit); -DEFINE_EVENT(svc_deferred_event, svc_drop_deferred, - TP_PROTO(struct svc_deferred_req *dr), - TP_ARGS(dr)); -DEFINE_EVENT(svc_deferred_event, svc_revisit_deferred, - TP_PROTO(struct svc_deferred_req *dr), - TP_ARGS(dr)); #endif /* _TRACE_SUNRPC_H */ #include <trace/define_trace.h> diff --git a/include/uapi/asm-generic/Kbuild.asm b/include/uapi/asm-generic/Kbuild.asm index 21381449d98a..355c4ac2c0b0 100644 --- a/include/uapi/asm-generic/Kbuild.asm +++ b/include/uapi/asm-generic/Kbuild.asm @@ -3,6 +3,7 @@ # mandatory-y += auxvec.h mandatory-y += bitsperlong.h +mandatory-y += bpf_perf_event.h mandatory-y += byteorder.h mandatory-y += errno.h mandatory-y += fcntl.h diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 538546edbfbd..d90127298f12 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -738,9 +738,11 @@ __SYSCALL(__NR_statx, sys_statx) __SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents) #define __NR_rseq 293 __SYSCALL(__NR_rseq, sys_rseq) +#define __NR_kexec_file_load 294 +__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load) #undef __NR_syscalls -#define __NR_syscalls 294 +#define __NR_syscalls 295 /* * 32 bit systems traditionally used different @@ -760,8 +762,10 @@ __SYSCALL(__NR_rseq, sys_rseq) #define __NR_ftruncate __NR3264_ftruncate #define __NR_lseek __NR3264_lseek #define __NR_sendfile __NR3264_sendfile +#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64) #define __NR_newfstatat __NR3264_fstatat #define __NR_fstat __NR3264_fstat +#endif #define __NR_mmap __NR3264_mmap #define __NR_fadvise64 __NR3264_fadvise64 #ifdef __NR3264_stat @@ -776,8 +780,10 @@ __SYSCALL(__NR_rseq, sys_rseq) #define __NR_ftruncate64 __NR3264_ftruncate #define __NR_llseek __NR3264_lseek #define __NR_sendfile64 __NR3264_sendfile +#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64) #define __NR_fstatat64 __NR3264_fstatat #define __NR_fstat64 __NR3264_fstat +#endif #define __NR_mmap2 __NR3264_mmap #define __NR_fadvise64_64 __NR3264_fadvise64 #ifdef __NR3264_stat diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 370e9a5536ef..be84e43c1e19 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -326,6 +326,12 @@ struct drm_amdgpu_gem_userptr { /* GFX9 and later: */ #define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0 #define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f +#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5 +#define AMDGPU_TILING_DCC_OFFSET_256B_MASK 0xFFFFFF +#define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT 29 +#define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF +#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43 +#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1 /* Set/Get helpers for tiling flags. */ #define AMDGPU_TILING_SET(field, value) \ diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 0cd40ebfa1b1..0b44260a5ee9 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -151,6 +151,21 @@ extern "C" { #define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */ #define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */ +#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */ + +/* + * packed YCbCr420 2x2 tiled formats + * first 64 bits will contain Y,Cb,Cr components for a 2x2 tile + */ +/* [63:0] A3:A2:Y3:0:Cr0:0:Y2:0:A1:A0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */ +#define DRM_FORMAT_Y0L0 fourcc_code('Y', '0', 'L', '0') +/* [63:0] X3:X2:Y3:0:Cr0:0:Y2:0:X1:X0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */ +#define DRM_FORMAT_X0L0 fourcc_code('X', '0', 'L', '0') + +/* [63:0] A3:A2:Y3:Cr0:Y2:A1:A0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */ +#define DRM_FORMAT_Y0L2 fourcc_code('Y', '0', 'L', '2') +/* [63:0] X3:X2:Y3:Cr0:Y2:X1:X0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */ +#define DRM_FORMAT_X0L2 fourcc_code('X', '0', 'L', '2') /* * 2 plane RGB + A diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index d3e0fe31efc5..a439c2e67896 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -888,6 +888,25 @@ struct drm_mode_revoke_lease { __u32 lessee_id; }; +/** + * struct drm_mode_rect - Two dimensional rectangle. + * @x1: Horizontal starting coordinate (inclusive). + * @y1: Vertical starting coordinate (inclusive). + * @x2: Horizontal ending coordinate (exclusive). + * @y2: Vertical ending coordinate (exclusive). + * + * With drm subsystem using struct drm_rect to manage rectangular area this + * export it to user-space. + * + * Currently used by drm_mode_atomic blob property FB_DAMAGE_CLIPS. + */ +struct drm_mode_rect { + __s32 x1; + __s32 y1; + __s32 x2; + __s32 y2; +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index a4446f452040..298b2e197744 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -412,6 +412,14 @@ typedef struct drm_i915_irq_wait { int irq_seq; } drm_i915_irq_wait_t; +/* + * Different modes of per-process Graphics Translation Table, + * see I915_PARAM_HAS_ALIASING_PPGTT + */ +#define I915_GEM_PPGTT_NONE 0 +#define I915_GEM_PPGTT_ALIASING 1 +#define I915_GEM_PPGTT_FULL 2 + /* Ioctl to query kernel params: */ #define I915_PARAM_IRQ_ACTIVE 1 diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index c06d0a5bdd80..91a16b333c69 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -105,14 +105,24 @@ struct drm_msm_gem_new { __u32 handle; /* out */ }; -#define MSM_INFO_IOVA 0x01 - -#define MSM_INFO_FLAGS (MSM_INFO_IOVA) +/* Get or set GEM buffer info. The requested value can be passed + * directly in 'value', or for data larger than 64b 'value' is a + * pointer to userspace buffer, with 'len' specifying the number of + * bytes copied into that buffer. For info returned by pointer, + * calling the GEM_INFO ioctl with null 'value' will return the + * required buffer size in 'len' + */ +#define MSM_INFO_GET_OFFSET 0x00 /* get mmap() offset, returned by value */ +#define MSM_INFO_GET_IOVA 0x01 /* get iova, returned by value */ +#define MSM_INFO_SET_NAME 0x02 /* set the debug name (by pointer) */ +#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */ struct drm_msm_gem_info { __u32 handle; /* in */ - __u32 flags; /* in - combination of MSM_INFO_* flags */ - __u64 offset; /* out, mmap() offset or iova */ + __u32 info; /* in - one of MSM_INFO_* */ + __u64 value; /* in or out */ + __u32 len; /* in or out */ + __u32 pad; }; #define MSM_PREP_READ 0x01 @@ -188,8 +198,11 @@ struct drm_msm_gem_submit_cmd { */ #define MSM_SUBMIT_BO_READ 0x0001 #define MSM_SUBMIT_BO_WRITE 0x0002 +#define MSM_SUBMIT_BO_DUMP 0x0004 -#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE) +#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | \ + MSM_SUBMIT_BO_WRITE | \ + MSM_SUBMIT_BO_DUMP) struct drm_msm_gem_submit_bo { __u32 flags; /* in, mask of MSM_SUBMIT_BO_x */ diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 7b6627783608..35c7d813c66e 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -36,6 +36,7 @@ extern "C" { #define DRM_V3D_MMAP_BO 0x03 #define DRM_V3D_GET_PARAM 0x04 #define DRM_V3D_GET_BO_OFFSET 0x05 +#define DRM_V3D_SUBMIT_TFU 0x06 #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) @@ -43,6 +44,7 @@ extern "C" { #define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo) #define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param) #define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset) +#define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu) /** * struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D @@ -58,10 +60,15 @@ struct drm_v3d_submit_cl { * coordinate shader to determine where primitives land on the screen, * then writes out the state updates and draw calls necessary per tile * to the tile allocation BO. + * + * This BCL will block on any previous BCL submitted on the + * same FD, but not on any RCL or BCLs submitted by other + * clients -- that is left up to the submitter to control + * using in_sync_bcl if necessary. */ __u32 bcl_start; - /** End address of the BCL (first byte after the BCL) */ + /** End address of the BCL (first byte after the BCL) */ __u32 bcl_end; /* Offset of the render command list. @@ -69,10 +76,15 @@ struct drm_v3d_submit_cl { * This is the second set of commands executed, which will either * execute the tiles that have been set up by the BCL, or a fixed set * of tiles (in the case of RCL-only blits). + * + * This RCL will block on this submit's BCL, and any previous + * RCL submitted on the same FD, but not on any RCL or BCLs + * submitted by other clients -- that is left up to the + * submitter to control using in_sync_rcl if necessary. */ __u32 rcl_start; - /** End address of the RCL (first byte after the RCL) */ + /** End address of the RCL (first byte after the RCL) */ __u32 rcl_end; /** An optional sync object to wait on before starting the BCL. */ @@ -169,6 +181,7 @@ enum drm_v3d_param { DRM_V3D_PARAM_V3D_CORE0_IDENT0, DRM_V3D_PARAM_V3D_CORE0_IDENT1, DRM_V3D_PARAM_V3D_CORE0_IDENT2, + DRM_V3D_PARAM_SUPPORTS_TFU, }; struct drm_v3d_get_param { @@ -187,6 +200,28 @@ struct drm_v3d_get_bo_offset { __u32 offset; }; +struct drm_v3d_submit_tfu { + __u32 icfg; + __u32 iia; + __u32 iis; + __u32 ica; + __u32 iua; + __u32 ioa; + __u32 ios; + __u32 coef[4]; + /* First handle is the output BO, following are other inputs. + * 0 for unused. + */ + __u32 bo_handles[4]; + /* sync object to block on before running the TFU job. Each TFU + * job will execute in the order submitted to its FD. Synchronization + * against rendering jobs requires using sync objects. + */ + __u32 in_sync; + /* Sync object to signal when the TFU job is done. */ + __u32 out_sync; +}; + #if defined(__cplusplus) } #endif diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h index 9a781f0611df..f06a789f34cd 100644 --- a/include/uapi/drm/virtgpu_drm.h +++ b/include/uapi/drm/virtgpu_drm.h @@ -47,6 +47,13 @@ extern "C" { #define DRM_VIRTGPU_WAIT 0x08 #define DRM_VIRTGPU_GET_CAPS 0x09 +#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01 +#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02 +#define VIRTGPU_EXECBUF_FLAGS (\ + VIRTGPU_EXECBUF_FENCE_FD_IN |\ + VIRTGPU_EXECBUF_FENCE_FD_OUT |\ + 0) + struct drm_virtgpu_map { __u64 offset; /* use for mmap system call */ __u32 handle; @@ -54,12 +61,12 @@ struct drm_virtgpu_map { }; struct drm_virtgpu_execbuffer { - __u32 flags; /* for future use */ + __u32 flags; __u32 size; __u64 command; /* void* */ __u64 bo_handles; __u32 num_bo_handles; - __u32 pad; + __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */ }; #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ @@ -137,7 +144,7 @@ struct drm_virtgpu_get_caps { DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map) #define DRM_IOCTL_VIRTGPU_EXECBUFFER \ - DRM_IOW(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\ struct drm_virtgpu_execbuffer) #define DRM_IOCTL_VIRTGPU_GETPARAM \ diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h index ce43d340f010..8387e0af0f76 100644 --- a/include/uapi/linux/aio_abi.h +++ b/include/uapi/linux/aio_abi.h @@ -50,6 +50,8 @@ enum { * * IOCB_FLAG_RESFD - Set if the "aio_resfd" member of the "struct iocb" * is valid. + * IOCB_FLAG_IOPRIO - Set if the "aio_reqprio" member of the "struct iocb" + * is valid. */ #define IOCB_FLAG_RESFD (1 << 0) #define IOCB_FLAG_IOPRIO (1 << 1) diff --git a/include/uapi/linux/android/binder_ctl.h b/include/uapi/linux/android/binder_ctl.h new file mode 100644 index 000000000000..65b2efd1a0a5 --- /dev/null +++ b/include/uapi/linux/android/binder_ctl.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2018 Canonical Ltd. + * + */ + +#ifndef _UAPI_LINUX_BINDER_CTL_H +#define _UAPI_LINUX_BINDER_CTL_H + +#include <linux/android/binder.h> +#include <linux/types.h> +#include <linux/ioctl.h> + +#define BINDERFS_MAX_NAME 255 + +/** + * struct binderfs_device - retrieve information about a new binder device + * @name: the name to use for the new binderfs binder device + * @major: major number allocated for binderfs binder devices + * @minor: minor number allocated for the new binderfs binder device + * + */ +struct binderfs_device { + char name[BINDERFS_MAX_NAME + 1]; + __u8 major; + __u8 minor; +}; + +/** + * Allocate a new binder device. + */ +#define BINDER_CTL_ADD _IOWR('b', 1, struct binderfs_device) + +#endif /* _UAPI_LINUX_BINDER_CTL_H */ + diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index 818ae690ab79..36a7e3f18e69 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -378,6 +378,7 @@ enum { #define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE) #define AUDIT_ARCH_ARMEB (EM_ARM) #define AUDIT_ARCH_CRIS (EM_CRIS|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_CSKY (EM_CSKY|__AUDIT_ARCH_LE) #define AUDIT_ARCH_FRV (EM_FRV) #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) #define AUDIT_ARCH_IA64 (EM_IA_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) @@ -411,6 +412,7 @@ enum { #define AUDIT_ARCH_TILEGX32 (EM_TILEGX|__AUDIT_ARCH_LE) #define AUDIT_ARCH_TILEPRO (EM_TILEPRO|__AUDIT_ARCH_LE) #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_XTENSA (EM_XTENSA) #define AUDIT_PERM_EXEC 1 #define AUDIT_PERM_WRITE 2 diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h index df31aa9c9a8c..082119630b49 100644 --- a/include/uapi/linux/auto_fs.h +++ b/include/uapi/linux/auto_fs.h @@ -23,7 +23,7 @@ #define AUTOFS_MIN_PROTO_VERSION 3 #define AUTOFS_MAX_PROTO_VERSION 5 -#define AUTOFS_PROTO_SUBVERSION 3 +#define AUTOFS_PROTO_SUBVERSION 4 /* * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed diff --git a/include/uapi/linux/bfs_fs.h b/include/uapi/linux/bfs_fs.h index 940b04772af8..08f6b4956359 100644 --- a/include/uapi/linux/bfs_fs.h +++ b/include/uapi/linux/bfs_fs.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * include/linux/bfs_fs.h - BFS data structures on disk. - * Copyright (C) 1999 Tigran Aivazian <tigran@veritas.com> + * Copyright (C) 1999-2018 Tigran Aivazian <aivazian.tigran@gmail.com> */ #ifndef _LINUX_BFS_FS_H diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h index 8f08ff9bdea0..6fa38d001d84 100644 --- a/include/uapi/linux/blkzoned.h +++ b/include/uapi/linux/blkzoned.h @@ -141,7 +141,7 @@ struct blk_zone_range { */ #define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report) #define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range) -#define BLKGETZONESZ _IOW(0x12, 132, __u32) -#define BLKGETNRZONES _IOW(0x12, 133, __u32) +#define BLKGETZONESZ _IOR(0x12, 132, __u32) +#define BLKGETNRZONES _IOR(0x12, 133, __u32) #endif /* _UAPI_BLKZONED_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 852dc17ab47a..91c43884f295 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -133,6 +133,14 @@ enum bpf_map_type { BPF_MAP_TYPE_STACK, }; +/* Note that tracing related programs such as + * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT} + * are not subject to a stable API since kernel internal data + * structures can change from release to release and may + * therefore break existing tracing BPF programs. Tracing BPF + * programs correspond to /a/ specific kernel which is to be + * analyzed, and not /a/ specific kernel /and/ all future ones. + */ enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC, BPF_PROG_TYPE_SOCKET_FILTER, @@ -232,6 +240,20 @@ enum bpf_attach_type { */ #define BPF_F_STRICT_ALIGNMENT (1U << 0) +/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the + * verifier will allow any alignment whatsoever. On platforms + * with strict alignment requirements for loads ands stores (such + * as sparc and mips) the verifier validates that all loads and + * stores provably follow this requirement. This flag turns that + * checking and enforcement off. + * + * It is mostly used for testing when we want to validate the + * context and memory access aspects of the verifier, but because + * of an unaligned access the alignment check would trigger before + * the one we are interested in. + */ +#define BPF_F_ANY_ALIGNMENT (1U << 1) + /* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */ #define BPF_PSEUDO_MAP_FD 1 @@ -257,9 +279,6 @@ enum bpf_attach_type { /* Specify numa node during map creation */ #define BPF_F_NUMA_NODE (1U << 2) -/* flags for BPF_PROG_QUERY */ -#define BPF_F_QUERY_EFFECTIVE (1U << 0) - #define BPF_OBJ_NAME_LEN 16U /* Flags for accessing BPF object */ @@ -269,6 +288,12 @@ enum bpf_attach_type { /* Flag for stack_map, store build_id+offset instead of pointer */ #define BPF_F_STACK_BUILD_ID (1U << 5) +/* Zero-initialize hash function seed. This should only be used for testing. */ +#define BPF_F_ZERO_SEED (1U << 6) + +/* flags for BPF_PROG_QUERY */ +#define BPF_F_QUERY_EFFECTIVE (1U << 0) + enum bpf_stack_build_id_status { /* user space need an empty entry to identify end of a trace */ BPF_STACK_BUILD_ID_EMPTY = 0, @@ -326,7 +351,7 @@ union bpf_attr { __u32 log_level; /* verbosity level of verifier */ __u32 log_size; /* size of user buffer */ __aligned_u64 log_buf; /* user supplied buffer */ - __u32 kern_version; /* checked when prog_type=kprobe */ + __u32 kern_version; /* not used */ __u32 prog_flags; char prog_name[BPF_OBJ_NAME_LEN]; __u32 prog_ifindex; /* ifindex of netdev to prep for */ @@ -335,6 +360,13 @@ union bpf_attr { * (context accesses, allowed helpers, etc). */ __u32 expected_attach_type; + __u32 prog_btf_fd; /* fd pointing to BTF type data */ + __u32 func_info_rec_size; /* userspace bpf_func_info size */ + __aligned_u64 func_info; /* func info */ + __u32 func_info_cnt; /* number of bpf_func_info records */ + __u32 line_info_rec_size; /* userspace bpf_line_info size */ + __aligned_u64 line_info; /* line info */ + __u32 line_info_cnt; /* number of bpf_line_info records */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -353,8 +385,11 @@ union bpf_attr { struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ __u32 prog_fd; __u32 retval; - __u32 data_size_in; - __u32 data_size_out; + __u32 data_size_in; /* input: len of data_in */ + __u32 data_size_out; /* input/output: len of data_out + * returns ENOSPC if data_out + * is too small. + */ __aligned_u64 data_in; __aligned_u64 data_out; __u32 repeat; @@ -475,18 +510,6 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_pop_elem(struct bpf_map *map, void *value) - * Description - * Pop an element from *map*. - * Return - * 0 on success, or a negative error in case of failure. - * - * int bpf_map_peek_elem(struct bpf_map *map, void *value) - * Description - * Get an element from *map* without removing it. - * Return - * 0 on success, or a negative error in case of failure. - * * int bpf_probe_read(void *dst, u32 size, const void *src) * Description * For tracing programs, safely attempt to read *size* bytes from @@ -1910,9 +1933,9 @@ union bpf_attr { * is set to metric from route (IPv4/IPv6 only), and ifindex * is set to the device index of the nexthop from the FIB lookup. * - * *plen* argument is the size of the passed in struct. - * *flags* argument can be a combination of one or more of the - * following values: + * *plen* argument is the size of the passed in struct. + * *flags* argument can be a combination of one or more of the + * following values: * * **BPF_FIB_LOOKUP_DIRECT** * Do a direct table lookup vs full lookup using FIB @@ -1921,9 +1944,9 @@ union bpf_attr { * Perform lookup from an egress perspective (default is * ingress). * - * *ctx* is either **struct xdp_md** for XDP programs or - * **struct sk_buff** tc cls_act programs. - * Return + * *ctx* is either **struct xdp_md** for XDP programs or + * **struct sk_buff** tc cls_act programs. + * Return * * < 0 if any input argument is invalid * * 0 on success (packet is forwarded, nexthop neighbor exists) * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the @@ -2068,8 +2091,8 @@ union bpf_attr { * translated to a keycode using the rc keymap, and reported as * an input key down event. After a period a key up event is * generated. This period can be extended by calling either - * **bpf_rc_keydown** () again with the same values, or calling - * **bpf_rc_repeat** (). + * **bpf_rc_keydown**\ () again with the same values, or calling + * **bpf_rc_repeat**\ (). * * Some protocols include a toggle bit, in case the button was * released and pressed again between consecutive scancodes. @@ -2152,29 +2175,30 @@ union bpf_attr { * The *flags* meaning is specific for each map type, * and has to be 0 for cgroup local storage. * - * Depending on the bpf program type, a local storage area - * can be shared between multiple instances of the bpf program, + * Depending on the BPF program type, a local storage area + * can be shared between multiple instances of the BPF program, * running simultaneously. * * A user should care about the synchronization by himself. - * For example, by using the BPF_STX_XADD instruction to alter + * For example, by using the **BPF_STX_XADD** instruction to alter * the shared data. * Return - * Pointer to the local storage area. + * A pointer to the local storage area. * * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) * Description - * Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map - * It checks the selected sk is matching the incoming - * request in the skb. + * Select a **SO_REUSEPORT** socket from a + * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. + * It checks the selected socket is matching the incoming + * request in the socket buffer. * Return * 0 on success, or a negative error in case of failure. * - * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for TCP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, - * and if non-NULL, released via **bpf_sk_release**\ (). + * and if non-**NULL**, released via **bpf_sk_release**\ (). * * The *ctx* should point to the context of the program, such as * the skb or socket (depending on the hook in use). This is used @@ -2187,12 +2211,14 @@ union bpf_attr { * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * - * If the *netns* is zero, then the socket lookup table in the - * netns associated with the *ctx* will be used. For the TC hooks, - * this in the netns of the device in the skb. For socket hooks, - * this in the netns of the socket. If *netns* is non-zero, then - * it specifies the ID of the netns relative to the netns - * associated with the *ctx*. + * If the *netns* is a negative signed 32-bit integer, then the + * socket lookup table in the netns associated with the *ctx* will + * will be used. For the TC hooks, this is the netns of the device + * in the skb. For socket hooks, this is the netns of the socket. + * If *netns* is any other signed 32-bit value greater than or + * equal to zero then it specifies the ID of the netns relative to + * the netns associated with the *ctx*. *netns* values beyond the + * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. @@ -2200,13 +2226,15 @@ union bpf_attr { * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return - * Pointer to *struct bpf_sock*, or NULL in case of failure. + * Pointer to **struct bpf_sock**, or **NULL** in case of failure. + * For sockets with reuseport option, the **struct bpf_sock** + * result is from **reuse->socks**\ [] using the hash of the tuple. * - * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags) + * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) * Description * Look for UDP socket matching *tuple*, optionally in a child * network namespace *netns*. The return value must be checked, - * and if non-NULL, released via **bpf_sk_release**\ (). + * and if non-**NULL**, released via **bpf_sk_release**\ (). * * The *ctx* should point to the context of the program, such as * the skb or socket (depending on the hook in use). This is used @@ -2219,12 +2247,14 @@ union bpf_attr { * **sizeof**\ (*tuple*\ **->ipv6**) * Look for an IPv6 socket. * - * If the *netns* is zero, then the socket lookup table in the - * netns associated with the *ctx* will be used. For the TC hooks, - * this in the netns of the device in the skb. For socket hooks, - * this in the netns of the socket. If *netns* is non-zero, then - * it specifies the ID of the netns relative to the netns - * associated with the *ctx*. + * If the *netns* is a negative signed 32-bit integer, then the + * socket lookup table in the netns associated with the *ctx* will + * will be used. For the TC hooks, this is the netns of the device + * in the skb. For socket hooks, this is the netns of the socket. + * If *netns* is any other signed 32-bit value greater than or + * equal to zero then it specifies the ID of the netns relative to + * the netns associated with the *ctx*. *netns* values beyond the + * range of 32-bit integers are reserved for future use. * * All values for *flags* are reserved for future usage, and must * be left at zero. @@ -2232,31 +2262,71 @@ union bpf_attr { * This helper is available only if the kernel was compiled with * **CONFIG_NET** configuration option. * Return - * Pointer to *struct bpf_sock*, or NULL in case of failure. + * Pointer to **struct bpf_sock**, or **NULL** in case of failure. + * For sockets with reuseport option, the **struct bpf_sock** + * result is from **reuse->socks**\ [] using the hash of the tuple. * - * int bpf_sk_release(struct bpf_sock *sk) + * int bpf_sk_release(struct bpf_sock *sock) * Description - * Release the reference held by *sock*. *sock* must be a non-NULL - * pointer that was returned from bpf_sk_lookup_xxx\ (). + * Release the reference held by *sock*. *sock* must be a + * non-**NULL** pointer that was returned from + * **bpf_sk_lookup_xxx**\ (). * Return * 0 on success, or a negative error in case of failure. * + * int bpf_map_pop_elem(struct bpf_map *map, void *value) + * Description + * Pop an element from *map*. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_map_peek_elem(struct bpf_map *map, void *value) + * Description + * Get an element from *map* without removing it. + * Return + * 0 on success, or a negative error in case of failure. + * * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags) * Description - * For socket policies, insert *len* bytes into msg at offset + * For socket policies, insert *len* bytes into *msg* at offset * *start*. * * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a - * *msg* it may want to insert metadata or options into the msg. + * *msg* it may want to insert metadata or options into the *msg*. * This can later be read and used by any of the lower layer BPF * hooks. * * This helper may fail if under memory pressure (a malloc * fails) in these cases BPF programs will get an appropriate * error and BPF programs will need to handle them. + * Return + * 0 on success, or a negative error in case of failure. * + * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags) + * Description + * Will remove *pop* bytes from a *msg* starting at byte *start*. + * This may result in **ENOMEM** errors under certain situations if + * an allocation and copy are required due to a full ring buffer. + * However, the helper will try to avoid doing the allocation + * if possible. Other errors can occur if input parameters are + * invalid either due to *start* byte not being valid part of *msg* + * payload and/or *pop* value being to large. * Return * 0 on success, or a negative error in case of failure. + * + * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) + * Description + * This helper is used in programs implementing IR decoding, to + * report a successfully decoded pointer movement. + * + * The *ctx* should point to the lirc sample as passed into + * the program. + * + * This helper is only available is the kernel was compiled with + * the **CONFIG_BPF_LIRC_MODE2** configuration option set to + * "**y**". + * Return + * 0 */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2349,7 +2419,9 @@ union bpf_attr { FN(map_push_elem), \ FN(map_pop_elem), \ FN(map_peek_elem), \ - FN(msg_push_data), + FN(msg_push_data), \ + FN(msg_pop_data), \ + FN(rc_pointer_rel), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -2405,6 +2477,9 @@ enum bpf_func_id { /* BPF_FUNC_perf_event_output for sk_buff input context. */ #define BPF_F_CTXLEN_MASK (0xfffffULL << 32) +/* Current network namespace */ +#define BPF_F_CURRENT_NETNS (-1L) + /* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { BPF_ADJ_ROOM_NET, @@ -2422,6 +2497,12 @@ enum bpf_lwt_encap_mode { BPF_LWT_ENCAP_SEG6_INLINE }; +#define __bpf_md_ptr(type, name) \ +union { \ + type name; \ + __u64 :64; \ +} __attribute__((aligned(8))) + /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ @@ -2456,7 +2537,9 @@ struct __sk_buff { /* ... here. */ __u32 data_meta; - struct bpf_flow_keys *flow_keys; + __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); + __u64 tstamp; + __u32 wire_len; }; struct bpf_tunnel_key { @@ -2572,8 +2655,8 @@ enum sk_action { * be added to the end of this structure */ struct sk_msg_md { - void *data; - void *data_end; + __bpf_md_ptr(void *, data); + __bpf_md_ptr(void *, data_end); __u32 family; __u32 remote_ip4; /* Stored in network byte order */ @@ -2582,6 +2665,7 @@ struct sk_msg_md { __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ + __u32 size; /* Total size of sk_msg */ }; struct sk_reuseport_md { @@ -2589,8 +2673,9 @@ struct sk_reuseport_md { * Start of directly accessible data. It begins from * the tcp/udp header. */ - void *data; - void *data_end; /* End of directly accessible data */ + __bpf_md_ptr(void *, data); + /* End of directly accessible data */ + __bpf_md_ptr(void *, data_end); /* * Total length of packet (starting from the tcp/udp header). * Note that the directly accessible bytes (data_end - data) @@ -2631,6 +2716,18 @@ struct bpf_prog_info { __u32 nr_jited_func_lens; __aligned_u64 jited_ksyms; __aligned_u64 jited_func_lens; + __u32 btf_id; + __u32 func_info_rec_size; + __aligned_u64 func_info; + __u32 nr_func_info; + __u32 nr_line_info; + __aligned_u64 line_info; + __aligned_u64 jited_line_info; + __u32 nr_jited_line_info; + __u32 line_info_rec_size; + __u32 jited_line_info_rec_size; + __u32 nr_prog_tags; + __aligned_u64 prog_tags; } __attribute__((aligned(8))); struct bpf_map_info { @@ -2942,4 +3039,19 @@ struct bpf_flow_keys { }; }; +struct bpf_func_info { + __u32 insn_off; + __u32 type_id; +}; + +#define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) +#define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) + +struct bpf_line_info { + __u32 insn_off; + __u32 file_name_off; + __u32 line_off; + __u32 line_col; +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h index 972265f32871..7b7475ef2f17 100644 --- a/include/uapi/linux/btf.h +++ b/include/uapi/linux/btf.h @@ -34,13 +34,16 @@ struct btf_type { * bits 0-15: vlen (e.g. # of struct's members) * bits 16-23: unused * bits 24-27: kind (e.g. int, ptr, array...etc) - * bits 28-31: unused + * bits 28-30: unused + * bit 31: kind_flag, currently used by + * struct, union and fwd */ __u32 info; /* "size" is used by INT, ENUM, STRUCT and UNION. * "size" tells the size of the type it is describing. * - * "type" is used by PTR, TYPEDEF, VOLATILE, CONST and RESTRICT. + * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, + * FUNC and FUNC_PROTO. * "type" is a type_id referring to another type. */ union { @@ -51,6 +54,7 @@ struct btf_type { #define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f) #define BTF_INFO_VLEN(info) ((info) & 0xffff) +#define BTF_INFO_KFLAG(info) ((info) >> 31) #define BTF_KIND_UNKN 0 /* Unknown */ #define BTF_KIND_INT 1 /* Integer */ @@ -64,8 +68,10 @@ struct btf_type { #define BTF_KIND_VOLATILE 9 /* Volatile */ #define BTF_KIND_CONST 10 /* Const */ #define BTF_KIND_RESTRICT 11 /* Restrict */ -#define BTF_KIND_MAX 11 -#define NR_BTF_KINDS 12 +#define BTF_KIND_FUNC 12 /* Function */ +#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */ +#define BTF_KIND_MAX 13 +#define NR_BTF_KINDS 14 /* For some specific BTF_KIND, "struct btf_type" is immediately * followed by extra data. @@ -107,7 +113,29 @@ struct btf_array { struct btf_member { __u32 name_off; __u32 type; - __u32 offset; /* offset in bits */ + /* If the type info kind_flag is set, the btf_member offset + * contains both member bitfield size and bit offset. The + * bitfield size is set for bitfield members. If the type + * info kind_flag is not set, the offset contains only bit + * offset. + */ + __u32 offset; +}; + +/* If the struct/union type info kind_flag is set, the + * following two macros are used to access bitfield_size + * and bit_offset from btf_member.offset. + */ +#define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24) +#define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff) + +/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param". + * The exact number of btf_param is stored in the vlen (of the + * info in "struct btf_type"). + */ +struct btf_param { + __u32 name_off; + __u32 type; }; #endif /* _UAPI__LINUX_BTF_H__ */ diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 5ca1d21fc4a7..e0763bc4158e 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -269,6 +269,7 @@ struct btrfs_ioctl_fs_info_args { #define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7) #define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8) #define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9) +#define BTRFS_FEATURE_INCOMPAT_METADATA_UUID (1ULL << 10) struct btrfs_ioctl_feature_flags { __u64 compat_flags; diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index aff1356c2bb8..e974f4bb5378 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -458,6 +458,7 @@ struct btrfs_free_space_header { #define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33) #define BTRFS_SUPER_FLAG_METADUMP_V2 (1ULL << 34) #define BTRFS_SUPER_FLAG_CHANGING_FSID (1ULL << 35) +#define BTRFS_SUPER_FLAG_CHANGING_FSID_V2 (1ULL << 36) /* diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index 6dafbc3e4414..4dc1603919ce 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h @@ -76,45 +76,69 @@ struct crypto_user_alg { __u32 cru_flags; }; -struct crypto_stat { - char type[CRYPTO_MAX_NAME]; - union { - __u32 stat_encrypt_cnt; - __u32 stat_compress_cnt; - __u32 stat_generate_cnt; - __u32 stat_hash_cnt; - __u32 stat_setsecret_cnt; - }; - union { - __u64 stat_encrypt_tlen; - __u64 stat_compress_tlen; - __u64 stat_generate_tlen; - __u64 stat_hash_tlen; - }; - union { - __u32 stat_akcipher_err_cnt; - __u32 stat_cipher_err_cnt; - __u32 stat_compress_err_cnt; - __u32 stat_aead_err_cnt; - __u32 stat_hash_err_cnt; - __u32 stat_rng_err_cnt; - __u32 stat_kpp_err_cnt; - }; - union { - __u32 stat_decrypt_cnt; - __u32 stat_decompress_cnt; - __u32 stat_seed_cnt; - __u32 stat_generate_public_key_cnt; - }; - union { - __u64 stat_decrypt_tlen; - __u64 stat_decompress_tlen; - }; - union { - __u32 stat_verify_cnt; - __u32 stat_compute_shared_secret_cnt; - }; - __u32 stat_sign_cnt; +struct crypto_stat_aead { + char type[CRYPTO_MAX_NAME]; + __u64 stat_encrypt_cnt; + __u64 stat_encrypt_tlen; + __u64 stat_decrypt_cnt; + __u64 stat_decrypt_tlen; + __u64 stat_err_cnt; +}; + +struct crypto_stat_akcipher { + char type[CRYPTO_MAX_NAME]; + __u64 stat_encrypt_cnt; + __u64 stat_encrypt_tlen; + __u64 stat_decrypt_cnt; + __u64 stat_decrypt_tlen; + __u64 stat_verify_cnt; + __u64 stat_sign_cnt; + __u64 stat_err_cnt; +}; + +struct crypto_stat_cipher { + char type[CRYPTO_MAX_NAME]; + __u64 stat_encrypt_cnt; + __u64 stat_encrypt_tlen; + __u64 stat_decrypt_cnt; + __u64 stat_decrypt_tlen; + __u64 stat_err_cnt; +}; + +struct crypto_stat_compress { + char type[CRYPTO_MAX_NAME]; + __u64 stat_compress_cnt; + __u64 stat_compress_tlen; + __u64 stat_decompress_cnt; + __u64 stat_decompress_tlen; + __u64 stat_err_cnt; +}; + +struct crypto_stat_hash { + char type[CRYPTO_MAX_NAME]; + __u64 stat_hash_cnt; + __u64 stat_hash_tlen; + __u64 stat_err_cnt; +}; + +struct crypto_stat_kpp { + char type[CRYPTO_MAX_NAME]; + __u64 stat_setsecret_cnt; + __u64 stat_generate_public_key_cnt; + __u64 stat_compute_shared_secret_cnt; + __u64 stat_err_cnt; +}; + +struct crypto_stat_rng { + char type[CRYPTO_MAX_NAME]; + __u64 stat_generate_cnt; + __u64 stat_generate_tlen; + __u64 stat_seed_cnt; + __u64 stat_err_cnt; +}; + +struct crypto_stat_larval { + char type[CRYPTO_MAX_NAME]; }; struct crypto_report_larval { diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 79407bbd296d..6e52d3660654 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -163,6 +163,11 @@ enum devlink_param_cmode { DEVLINK_PARAM_CMODE_MAX = __DEVLINK_PARAM_CMODE_MAX - 1 }; +enum devlink_param_fw_load_policy_value { + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER, + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH, +}; + enum devlink_attr { /* don't change the order or add anything between, this is ABI! */ DEVLINK_ATTR_UNSPEC, diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h index 93722e60204c..0c3000faedba 100644 --- a/include/uapi/linux/elf-em.h +++ b/include/uapi/linux/elf-em.h @@ -34,6 +34,7 @@ #define EM_M32R 88 /* Renesas M32R */ #define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */ #define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */ +#define EM_XTENSA 94 /* Tensilica Xtensa Architecture */ #define EM_BLACKFIN 106 /* ADI Blackfin Processor */ #define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */ #define EM_TI_C6000 140 /* TI C6X DSPs */ @@ -43,6 +44,7 @@ #define EM_TILEGX 191 /* Tilera TILE-Gx */ #define EM_RISCV 243 /* RISC-V */ #define EM_BPF 247 /* Linux BPF - in-kernel virtual machine */ +#define EM_CSKY 252 /* C-SKY */ #define EM_FRV 0x5441 /* Fujitsu FR-V */ /* diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index c5358e0ae7c5..e4d6ddd93567 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -420,10 +420,12 @@ typedef struct elf64_shdr { #define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ #define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */ #define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */ +#define NT_ARM_PAC_MASK 0x406 /* ARM pointer authentication code masks */ #define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */ #define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */ #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */ #define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */ +#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */ /* Note header in a PT_NOTE section */ typedef struct elf32_note { diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index c8f8e2455bf3..17be76aeb468 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -882,7 +882,7 @@ struct ethtool_rx_flow_spec { __u32 location; }; -/* How rings are layed out when accessing virtual functions or +/* How rings are laid out when accessing virtual functions or * offloaded queues is device specific. To allow users to do flow * steering and specify these queues the ring cookie is partitioned * into a 32bit queue index with an 8 bit virtual function id. @@ -891,7 +891,7 @@ struct ethtool_rx_flow_spec { * devices start supporting PCIe w/ARI. However at the moment I * do not know of any devices that support this so I do not reserve * space for this at this time. If a future patch consumes the next - * byte it should be aware of this possiblity. + * byte it should be aware of this possibility. */ #define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL #define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h index b86740d1c50a..909c98fcace2 100644 --- a/include/uapi/linux/fanotify.h +++ b/include/uapi/linux/fanotify.h @@ -10,11 +10,13 @@ #define FAN_CLOSE_WRITE 0x00000008 /* Writtable file closed */ #define FAN_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */ #define FAN_OPEN 0x00000020 /* File was opened */ +#define FAN_OPEN_EXEC 0x00001000 /* File was opened for exec */ #define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ #define FAN_OPEN_PERM 0x00010000 /* File open in perm check */ #define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */ +#define FAN_OPEN_EXEC_PERM 0x00040000 /* File open/exec in perm check */ #define FAN_ONDIR 0x40000000 /* event occurred against dir */ diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h index 6cd9b198b7c6..b6aac7ee1f67 100644 --- a/include/uapi/linux/fb.h +++ b/include/uapi/linux/fb.h @@ -393,11 +393,9 @@ struct fb_cursor { struct fb_image image; /* Cursor image */ }; -#ifdef CONFIG_FB_BACKLIGHT /* Settings for the generic backlight code */ #define FB_BACKLIGHT_LEVELS 128 #define FB_BACKLIGHT_MAX 0xFF -#endif #endif /* _UAPI_LINUX_FB_H */ diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index a441ea1bfe6d..53a22e8e0408 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -14,6 +14,11 @@ #include <linux/ioctl.h> #include <linux/types.h> +/* Use of MS_* flags within the kernel is restricted to core mount(2) code. */ +#if !defined(__KERNEL__) +#include <linux/mount.h> +#endif + /* * It's silly to have NR_OPEN bigger than NR_FILE, but you can change * the file limit at runtime and only root can increase the per-process @@ -101,57 +106,6 @@ struct inodes_stat_t { #define NR_FILE 8192 /* this can well be larger on a larger system */ - -/* - * These are the fs-independent mount-flags: up to 32 flags are supported - */ -#define MS_RDONLY 1 /* Mount read-only */ -#define MS_NOSUID 2 /* Ignore suid and sgid bits */ -#define MS_NODEV 4 /* Disallow access to device special files */ -#define MS_NOEXEC 8 /* Disallow program execution */ -#define MS_SYNCHRONOUS 16 /* Writes are synced at once */ -#define MS_REMOUNT 32 /* Alter flags of a mounted FS */ -#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ -#define MS_DIRSYNC 128 /* Directory modifications are synchronous */ -#define MS_NOATIME 1024 /* Do not update access times. */ -#define MS_NODIRATIME 2048 /* Do not update directory access times */ -#define MS_BIND 4096 -#define MS_MOVE 8192 -#define MS_REC 16384 -#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. - MS_VERBOSE is deprecated. */ -#define MS_SILENT 32768 -#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ -#define MS_UNBINDABLE (1<<17) /* change to unbindable */ -#define MS_PRIVATE (1<<18) /* change to private */ -#define MS_SLAVE (1<<19) /* change to slave */ -#define MS_SHARED (1<<20) /* change to shared */ -#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ -#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ -#define MS_I_VERSION (1<<23) /* Update inode I_version field */ -#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ -#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ - -/* These sb flags are internal to the kernel */ -#define MS_SUBMOUNT (1<<26) -#define MS_NOREMOTELOCK (1<<27) -#define MS_NOSEC (1<<28) -#define MS_BORN (1<<29) -#define MS_ACTIVE (1<<30) -#define MS_NOUSER (1<<31) - -/* - * Superblock flags that can be altered by MS_REMOUNT - */ -#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\ - MS_LAZYTIME) - -/* - * Old magic mount flag and mask - */ -#define MS_MGC_VAL 0xC0ED0000 -#define MS_MGC_MSK 0xffff0000 - /* * Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR. */ diff --git a/include/uapi/linux/hash_info.h b/include/uapi/linux/hash_info.h index eea5d02c58de..74a8609fcb4d 100644 --- a/include/uapi/linux/hash_info.h +++ b/include/uapi/linux/hash_info.h @@ -33,6 +33,8 @@ enum hash_algo { HASH_ALGO_TGR_160, HASH_ALGO_TGR_192, HASH_ALGO_SM3_256, + HASH_ALGO_STREEBOG_256, + HASH_ALGO_STREEBOG_512, HASH_ALGO__LAST }; diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index e41eda3c71f1..773e476a8e54 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -292,4 +292,25 @@ struct br_mcast_stats { __u64 mcast_bytes[BR_MCAST_DIR_SIZE]; __u64 mcast_packets[BR_MCAST_DIR_SIZE]; }; + +/* bridge boolean options + * BR_BOOLOPT_NO_LL_LEARN - disable learning from link-local packets + * + * IMPORTANT: if adding a new option do not forget to handle + * it in br_boolopt_toggle/get and bridge sysfs + */ +enum br_boolopt_id { + BR_BOOLOPT_NO_LL_LEARN, + BR_BOOLOPT_MAX +}; + +/* struct br_boolopt_multi - change multiple bridge boolean options + * + * @optval: new option values (bit per option) + * @optmask: options to change (bit per option) + */ +struct br_boolopt_multi { + __u32 optval; + __u32 optmask; +}; #endif /* _UAPI_LINUX_IF_BRIDGE_H */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 1debfa42cba1..d6533828123a 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -288,6 +288,7 @@ enum { IFLA_BR_MCAST_IGMP_VERSION, IFLA_BR_MCAST_MLD_VERSION, IFLA_BR_VLAN_STATS_PER_PORT, + IFLA_BR_MULTI_BOOLOPT, __IFLA_BR_MAX, }; @@ -533,6 +534,7 @@ enum { IFLA_VXLAN_LABEL, IFLA_VXLAN_GPE, IFLA_VXLAN_TTL_INHERIT, + IFLA_VXLAN_DF, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) @@ -542,6 +544,14 @@ struct ifla_vxlan_port_range { __be16 high; }; +enum ifla_vxlan_df { + VXLAN_DF_UNSET = 0, + VXLAN_DF_SET, + VXLAN_DF_INHERIT, + __VXLAN_DF_END, + VXLAN_DF_MAX = __VXLAN_DF_END - 1, +}; + /* GENEVE section */ enum { IFLA_GENEVE_UNSPEC, @@ -557,10 +567,19 @@ enum { IFLA_GENEVE_UDP_ZERO_CSUM6_RX, IFLA_GENEVE_LABEL, IFLA_GENEVE_TTL_INHERIT, + IFLA_GENEVE_DF, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) +enum ifla_geneve_df { + GENEVE_DF_UNSET = 0, + GENEVE_DF_SET, + GENEVE_DF_INHERIT, + __GENEVE_DF_END, + GENEVE_DF_MAX = __GENEVE_DF_END - 1, +}; + /* PPP section */ enum { IFLA_PPP_UNSPEC, diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h index ee432cd3018c..23a6753b37df 100644 --- a/include/uapi/linux/if_tun.h +++ b/include/uapi/linux/if_tun.h @@ -59,6 +59,7 @@ #define TUNGETVNETBE _IOR('T', 223, int) #define TUNSETSTEERINGEBPF _IOR('T', 224, int) #define TUNSETFILTEREBPF _IOR('T', 225, int) +#define TUNSETCARRIER _IOW('T', 226, int) /* TUNSETIFF ifr flags */ #define IFF_TUN 0x0001 diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index 1b3d148c4560..7d9105533c7b 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -160,4 +160,24 @@ enum { }; #define IFLA_VTI_MAX (__IFLA_VTI_MAX - 1) + +#define TUNNEL_CSUM __cpu_to_be16(0x01) +#define TUNNEL_ROUTING __cpu_to_be16(0x02) +#define TUNNEL_KEY __cpu_to_be16(0x04) +#define TUNNEL_SEQ __cpu_to_be16(0x08) +#define TUNNEL_STRICT __cpu_to_be16(0x10) +#define TUNNEL_REC __cpu_to_be16(0x20) +#define TUNNEL_VERSION __cpu_to_be16(0x40) +#define TUNNEL_NO_KEY __cpu_to_be16(0x80) +#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100) +#define TUNNEL_OAM __cpu_to_be16(0x0200) +#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400) +#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800) +#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000) +#define TUNNEL_NOCACHE __cpu_to_be16(0x2000) +#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000) + +#define TUNNEL_OPTIONS_PRESENT \ + (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT) + #endif /* _UAPI_IF_TUNNEL_H_ */ diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 48e8a225b985..f6052e70bf40 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -266,10 +266,14 @@ struct sockaddr_in { #define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000) #define IN_MULTICAST(a) IN_CLASSD(a) -#define IN_MULTICAST_NET 0xF0000000 +#define IN_MULTICAST_NET 0xe0000000 -#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) -#define IN_BADCLASS(a) IN_EXPERIMENTAL((a)) +#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff) +#define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) + +#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) +#define IN_CLASSE_NET 0xffffffff +#define IN_CLASSE_NSHIFT 0 /* Address to accept any incoming messages. */ #define INADDR_ANY ((unsigned long int) 0x00000000) diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 6d180cc60a5d..7f14d4a66c28 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -717,6 +717,7 @@ */ #define REL_RESERVED 0x0a #define REL_WHEEL_HI_RES 0x0b +#define REL_HWHEEL_HI_RES 0x0c #define REL_MAX 0x0f #define REL_CNT (REL_MAX+1) diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index f5ff8a76e208..e622fd1fbd46 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -83,11 +83,11 @@ struct kfd_ioctl_set_cu_mask_args { }; struct kfd_ioctl_get_queue_wave_state_args { - uint64_t ctl_stack_address; /* to KFD */ - uint32_t ctl_stack_used_size; /* from KFD */ - uint32_t save_area_used_size; /* from KFD */ - uint32_t queue_id; /* to KFD */ - uint32_t pad; + __u64 ctl_stack_address; /* to KFD */ + __u32 ctl_stack_used_size; /* from KFD */ + __u32 save_area_used_size; /* from KFD */ + __u32 queue_id; /* to KFD */ + __u32 pad; }; /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ @@ -255,10 +255,10 @@ struct kfd_hsa_memory_exception_data { /* hw exception data */ struct kfd_hsa_hw_exception_data { - uint32_t reset_type; - uint32_t reset_cause; - uint32_t memory_lost; - uint32_t gpu_id; + __u32 reset_type; + __u32 reset_cause; + __u32 memory_lost; + __u32 gpu_id; }; /* Event data */ @@ -398,6 +398,24 @@ struct kfd_ioctl_unmap_memory_from_gpu_args { __u32 n_success; /* to/from KFD */ }; +struct kfd_ioctl_get_dmabuf_info_args { + __u64 size; /* from KFD */ + __u64 metadata_ptr; /* to KFD */ + __u32 metadata_size; /* to KFD (space allocated by user) + * from KFD (actual metadata size) + */ + __u32 gpu_id; /* from KFD */ + __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */ + __u32 dmabuf_fd; /* to KFD */ +}; + +struct kfd_ioctl_import_dmabuf_args { + __u64 va_addr; /* to KFD */ + __u64 handle; /* from KFD */ + __u32 gpu_id; /* to KFD */ + __u32 dmabuf_fd; /* to KFD */ +}; + #define AMDKFD_IOCTL_BASE 'K' #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) @@ -486,7 +504,13 @@ struct kfd_ioctl_unmap_memory_from_gpu_args { #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \ AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args) +#define AMDKFD_IOC_GET_DMABUF_INFO \ + AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args) + +#define AMDKFD_IOC_IMPORT_DMABUF \ + AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args) + #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x1C +#define AMDKFD_COMMAND_END 0x1E #endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 2b7a652c9fa4..6d4ea4b6c922 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -492,6 +492,17 @@ struct kvm_dirty_log { }; }; +/* for KVM_CLEAR_DIRTY_LOG */ +struct kvm_clear_dirty_log { + __u32 slot; + __u32 num_pages; + __u64 first_page; + union { + void __user *dirty_bitmap; /* one bit per page */ + __u64 padding2; + }; +}; + /* for KVM_SET_SIGNAL_MASK */ struct kvm_signal_mask { __u32 len; @@ -975,6 +986,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163 #define KVM_CAP_EXCEPTION_PAYLOAD 164 #define KVM_CAP_ARM_VM_IPA_SIZE 165 +#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166 +#define KVM_CAP_HYPERV_CPUID 167 #ifdef KVM_CAP_IRQ_ROUTING @@ -1421,6 +1434,12 @@ struct kvm_enc_region { #define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state) #define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state) +/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT */ +#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log) + +/* Available with KVM_CAP_HYPERV_CPUID */ +#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2) + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 96c24478d8ce..f8c00045d537 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -73,6 +73,7 @@ #define DAXFS_MAGIC 0x64646178 #define BINFMTFS_MAGIC 0x42494e4d #define DEVPTS_SUPER_MAGIC 0x1cd1 +#define BINDERFS_SUPER_MAGIC 0x6c6f6f70 #define FUTEXFS_SUPER_MAGIC 0xBAD1DEA #define PIPEFS_MAGIC 0x50495045 #define PROC_SUPER_MAGIC 0x9fa0 diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h index 45f369dc0a42..00c08120f3ba 100644 --- a/include/uapi/linux/mmc/ioctl.h +++ b/include/uapi/linux/mmc/ioctl.h @@ -5,7 +5,10 @@ #include <linux/types.h> struct mmc_ioc_cmd { - /* Implies direction of data. true = write, false = read */ + /* + * Direction of data: nonzero = write, zero = read. + * Bit 31 selects 'Reliable Write' for RPMB. + */ int write_flag; /* Application-specific command. true = precede with CMD55 */ diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h new file mode 100644 index 000000000000..3f9ec42510b0 --- /dev/null +++ b/include/uapi/linux/mount.h @@ -0,0 +1,58 @@ +#ifndef _UAPI_LINUX_MOUNT_H +#define _UAPI_LINUX_MOUNT_H + +/* + * These are the fs-independent mount-flags: up to 32 flags are supported + * + * Usage of these is restricted within the kernel to core mount(2) code and + * callers of sys_mount() only. Filesystems should be using the SB_* + * equivalent instead. + */ +#define MS_RDONLY 1 /* Mount read-only */ +#define MS_NOSUID 2 /* Ignore suid and sgid bits */ +#define MS_NODEV 4 /* Disallow access to device special files */ +#define MS_NOEXEC 8 /* Disallow program execution */ +#define MS_SYNCHRONOUS 16 /* Writes are synced at once */ +#define MS_REMOUNT 32 /* Alter flags of a mounted FS */ +#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ +#define MS_DIRSYNC 128 /* Directory modifications are synchronous */ +#define MS_NOATIME 1024 /* Do not update access times. */ +#define MS_NODIRATIME 2048 /* Do not update directory access times */ +#define MS_BIND 4096 +#define MS_MOVE 8192 +#define MS_REC 16384 +#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. + MS_VERBOSE is deprecated. */ +#define MS_SILENT 32768 +#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ +#define MS_UNBINDABLE (1<<17) /* change to unbindable */ +#define MS_PRIVATE (1<<18) /* change to private */ +#define MS_SLAVE (1<<19) /* change to slave */ +#define MS_SHARED (1<<20) /* change to shared */ +#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ +#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ +#define MS_I_VERSION (1<<23) /* Update inode I_version field */ +#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ +#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ + +/* These sb flags are internal to the kernel */ +#define MS_SUBMOUNT (1<<26) +#define MS_NOREMOTELOCK (1<<27) +#define MS_NOSEC (1<<28) +#define MS_BORN (1<<29) +#define MS_ACTIVE (1<<30) +#define MS_NOUSER (1<<31) + +/* + * Superblock flags that can be altered by MS_REMOUNT + */ +#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\ + MS_LAZYTIME) + +/* + * Old magic mount flag and mask + */ +#define MS_MGC_VAL 0xC0ED0000 +#define MS_MGC_MSK 0xffff0000 + +#endif /* _UAPI_LINUX_MOUNT_H */ diff --git a/include/uapi/linux/msdos_fs.h b/include/uapi/linux/msdos_fs.h index fde753735aba..a5773899f4d9 100644 --- a/include/uapi/linux/msdos_fs.h +++ b/include/uapi/linux/msdos_fs.h @@ -58,9 +58,6 @@ #define MSDOS_DOT ". " /* ".", padded to MSDOS_NAME chars */ #define MSDOS_DOTDOT ".. " /* "..", padded to MSDOS_NAME chars */ -#define FAT_FIRST_ENT(s, x) ((MSDOS_SB(s)->fat_bits == 32 ? 0x0FFFFF00 : \ - MSDOS_SB(s)->fat_bits == 16 ? 0xFF00 : 0xF00) | (x)) - /* start of data cluster's entry (number of reserved clusters) */ #define FAT_START_ENT 2 @@ -68,8 +65,6 @@ #define MAX_FAT12 0xFF4 #define MAX_FAT16 0xFFF4 #define MAX_FAT32 0x0FFFFFF6 -#define MAX_FAT(s) (MSDOS_SB(s)->fat_bits == 32 ? MAX_FAT32 : \ - MSDOS_SB(s)->fat_bits == 16 ? MAX_FAT16 : MAX_FAT12) /* bad cluster mark */ #define BAD_FAT12 0xFF7 @@ -135,7 +130,7 @@ struct fat_boot_sector { for mount state. */ __u8 signature; /* extended boot signature */ __u8 vol_id[4]; /* volume ID */ - __u8 vol_label[11]; /* volume label */ + __u8 vol_label[MSDOS_NAME]; /* volume label */ __u8 fs_type[8]; /* file system type */ /* other fields are not added here */ } fat16; @@ -158,7 +153,7 @@ struct fat_boot_sector { for mount state. */ __u8 signature; /* extended boot signature */ __u8 vol_id[4]; /* volume ID */ - __u8 vol_label[11]; /* volume label */ + __u8 vol_label[MSDOS_NAME]; /* volume label */ __u8 fs_type[8]; /* file system type */ /* other fields are not added here */ } fat32; diff --git a/include/uapi/linux/ncsi.h b/include/uapi/linux/ncsi.h index 0a26a5576645..a3f87c54fdb3 100644 --- a/include/uapi/linux/ncsi.h +++ b/include/uapi/linux/ncsi.h @@ -26,6 +26,12 @@ * @NCSI_CMD_SEND_CMD: send NC-SI command to network card. * Requires NCSI_ATTR_IFINDEX, NCSI_ATTR_PACKAGE_ID * and NCSI_ATTR_CHANNEL_ID. + * @NCSI_CMD_SET_PACKAGE_MASK: set a whitelist of allowed packages. + * Requires NCSI_ATTR_IFINDEX and NCSI_ATTR_PACKAGE_MASK. + * @NCSI_CMD_SET_CHANNEL_MASK: set a whitelist of allowed channels. + * Requires NCSI_ATTR_IFINDEX, NCSI_ATTR_PACKAGE_ID, and + * NCSI_ATTR_CHANNEL_MASK. If NCSI_ATTR_CHANNEL_ID is present it sets + * the primary channel. * @NCSI_CMD_MAX: highest command number */ enum ncsi_nl_commands { @@ -34,6 +40,8 @@ enum ncsi_nl_commands { NCSI_CMD_SET_INTERFACE, NCSI_CMD_CLEAR_INTERFACE, NCSI_CMD_SEND_CMD, + NCSI_CMD_SET_PACKAGE_MASK, + NCSI_CMD_SET_CHANNEL_MASK, __NCSI_CMD_AFTER_LAST, NCSI_CMD_MAX = __NCSI_CMD_AFTER_LAST - 1 @@ -48,6 +56,10 @@ enum ncsi_nl_commands { * @NCSI_ATTR_PACKAGE_ID: package ID * @NCSI_ATTR_CHANNEL_ID: channel ID * @NCSI_ATTR_DATA: command payload + * @NCSI_ATTR_MULTI_FLAG: flag to signal that multi-mode should be enabled with + * NCSI_CMD_SET_PACKAGE_MASK or NCSI_CMD_SET_CHANNEL_MASK. + * @NCSI_ATTR_PACKAGE_MASK: 32-bit mask of allowed packages. + * @NCSI_ATTR_CHANNEL_MASK: 32-bit mask of allowed channels. * @NCSI_ATTR_MAX: highest attribute number */ enum ncsi_nl_attrs { @@ -57,6 +69,9 @@ enum ncsi_nl_attrs { NCSI_ATTR_PACKAGE_ID, NCSI_ATTR_CHANNEL_ID, NCSI_ATTR_DATA, + NCSI_ATTR_MULTI_FLAG, + NCSI_ATTR_PACKAGE_MASK, + NCSI_ATTR_CHANNEL_MASK, __NCSI_ATTR_AFTER_LAST, NCSI_ATTR_MAX = __NCSI_ATTR_AFTER_LAST - 1 diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 998155444e0d..cd144e3099a3 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -28,6 +28,7 @@ enum { NDA_MASTER, NDA_LINK_NETNSID, NDA_SRC_VNI, + NDA_PROTOCOL, /* Originator of entry */ __NDA_MAX }; diff --git a/include/uapi/linux/net_namespace.h b/include/uapi/linux/net_namespace.h index 0187c74d8889..9f9956809565 100644 --- a/include/uapi/linux/net_namespace.h +++ b/include/uapi/linux/net_namespace.h @@ -16,6 +16,8 @@ enum { NETNSA_NSID, NETNSA_PID, NETNSA_FD, + NETNSA_TARGET_NSID, + NETNSA_CURRENT_NSID, __NETNSA_MAX, }; diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index 97ff3c17ec4d..e5b39721c6e4 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -155,8 +155,8 @@ enum txtime_flags { }; struct sock_txtime { - clockid_t clockid; /* reference clockid */ - __u32 flags; /* as defined by enum txtime_flags */ + __kernel_clockid_t clockid;/* reference clockid */ + __u32 flags; /* as defined by enum txtime_flags */ }; #endif /* _NET_TIMESTAMPING_H */ diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h index cca10e767cd8..ca9e63d6e0e4 100644 --- a/include/uapi/linux/netfilter.h +++ b/include/uapi/linux/netfilter.h @@ -34,10 +34,6 @@ /* only for userspace compatibility */ #ifndef __KERNEL__ -/* Generic cache responses from hook functions. - <= 0x2000 is used for protocol-flags. */ -#define NFC_UNKNOWN 0x4000 -#define NFC_ALTERED 0x8000 /* NF_VERDICT_BITS should be 8 now, but userspace might break if this changes */ #define NF_VERDICT_BITS 16 diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h index 60236f694143..ea69ca21ff23 100644 --- a/include/uapi/linux/netfilter/ipset/ip_set.h +++ b/include/uapi/linux/netfilter/ipset/ip_set.h @@ -13,8 +13,9 @@ #include <linux/types.h> -/* The protocol version */ -#define IPSET_PROTOCOL 6 +/* The protocol versions */ +#define IPSET_PROTOCOL 7 +#define IPSET_PROTOCOL_MIN 6 /* The max length of strings including NUL: set and type identifiers */ #define IPSET_MAXNAMELEN 32 @@ -38,17 +39,19 @@ enum ipset_cmd { IPSET_CMD_TEST, /* 11: Test an element in a set */ IPSET_CMD_HEADER, /* 12: Get set header data only */ IPSET_CMD_TYPE, /* 13: Get set type */ + IPSET_CMD_GET_BYNAME, /* 14: Get set index by name */ + IPSET_CMD_GET_BYINDEX, /* 15: Get set name by index */ IPSET_MSG_MAX, /* Netlink message commands */ /* Commands in userspace: */ - IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 14: Enter restore mode */ - IPSET_CMD_HELP, /* 15: Get help */ - IPSET_CMD_VERSION, /* 16: Get program version */ - IPSET_CMD_QUIT, /* 17: Quit from interactive mode */ + IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 16: Enter restore mode */ + IPSET_CMD_HELP, /* 17: Get help */ + IPSET_CMD_VERSION, /* 18: Get program version */ + IPSET_CMD_QUIT, /* 19: Quit from interactive mode */ IPSET_CMD_MAX, - IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 18: Commit buffered commands */ + IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 20: Commit buffered commands */ }; /* Attributes at command level */ @@ -66,6 +69,7 @@ enum { IPSET_ATTR_LINENO, /* 9: Restore lineno */ IPSET_ATTR_PROTOCOL_MIN, /* 10: Minimal supported version number */ IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN, /* type rev min */ + IPSET_ATTR_INDEX, /* 11: Kernel index of set */ __IPSET_ATTR_CMD_MAX, }; #define IPSET_ATTR_CMD_MAX (__IPSET_ATTR_CMD_MAX - 1) @@ -223,6 +227,7 @@ enum ipset_adt { /* Sets are identified by an index in kernel space. Tweak with ip_set_id_t * and IPSET_INVALID_ID if you want to increase the max number of sets. + * Also, IPSET_ATTR_INDEX must be changed. */ typedef __u16 ip_set_id_t; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 579974b0bf0d..7de4f1bdaf06 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -1635,8 +1635,8 @@ enum nft_ng_attributes { NFTA_NG_MODULUS, NFTA_NG_TYPE, NFTA_NG_OFFSET, - NFTA_NG_SET_NAME, - NFTA_NG_SET_ID, + NFTA_NG_SET_NAME, /* deprecated */ + NFTA_NG_SET_ID, /* deprecated */ __NFTA_NG_MAX }; #define NFTA_NG_MAX (__NFTA_NG_MAX - 1) diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h index 156ccd089df1..1610fdbab98d 100644 --- a/include/uapi/linux/netfilter_bridge.h +++ b/include/uapi/linux/netfilter_bridge.h @@ -11,6 +11,10 @@ #include <linux/if_vlan.h> #include <linux/if_pppox.h> +#ifndef __KERNEL__ +#include <limits.h> /* for INT_MIN, INT_MAX */ +#endif + /* Bridge Hooks */ /* After promisc drops, checksum checks. */ #define NF_BR_PRE_ROUTING 0 diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h index 61f1c7dfd033..3c77f54560f2 100644 --- a/include/uapi/linux/netfilter_decnet.h +++ b/include/uapi/linux/netfilter_decnet.h @@ -15,16 +15,6 @@ #include <limits.h> /* for INT_MIN, INT_MAX */ -/* IP Cache bits. */ -/* Src IP address. */ -#define NFC_DN_SRC 0x0001 -/* Dest IP address. */ -#define NFC_DN_DST 0x0002 -/* Input device. */ -#define NFC_DN_IF_IN 0x0004 -/* Output device. */ -#define NFC_DN_IF_OUT 0x0008 - /* kernel define is in netfilter_defs.h */ #define NF_DN_NUMHOOKS 7 #endif /* ! __KERNEL__ */ diff --git a/include/uapi/linux/netfilter_ipv4.h b/include/uapi/linux/netfilter_ipv4.h index c3b060775e13..155e77d6a42d 100644 --- a/include/uapi/linux/netfilter_ipv4.h +++ b/include/uapi/linux/netfilter_ipv4.h @@ -13,34 +13,6 @@ #include <limits.h> /* for INT_MIN, INT_MAX */ -/* IP Cache bits. */ -/* Src IP address. */ -#define NFC_IP_SRC 0x0001 -/* Dest IP address. */ -#define NFC_IP_DST 0x0002 -/* Input device. */ -#define NFC_IP_IF_IN 0x0004 -/* Output device. */ -#define NFC_IP_IF_OUT 0x0008 -/* TOS. */ -#define NFC_IP_TOS 0x0010 -/* Protocol. */ -#define NFC_IP_PROTO 0x0020 -/* IP options. */ -#define NFC_IP_OPTIONS 0x0040 -/* Frag & flags. */ -#define NFC_IP_FRAG 0x0080 - -/* Per-protocol information: only matters if proto match. */ -/* TCP flags. */ -#define NFC_IP_TCPFLAGS 0x0100 -/* Source port. */ -#define NFC_IP_SRC_PT 0x0200 -/* Dest port. */ -#define NFC_IP_DST_PT 0x0400 -/* Something else about the proto */ -#define NFC_IP_PROTO_UNKNOWN 0x2000 - /* IP Hooks */ /* After promisc drops, checksum checks. */ #define NF_IP_PRE_ROUTING 0 diff --git a/include/uapi/linux/netfilter_ipv6.h b/include/uapi/linux/netfilter_ipv6.h index dc624fd24d25..80aa9b0799af 100644 --- a/include/uapi/linux/netfilter_ipv6.h +++ b/include/uapi/linux/netfilter_ipv6.h @@ -16,35 +16,6 @@ #include <limits.h> /* for INT_MIN, INT_MAX */ -/* IP Cache bits. */ -/* Src IP address. */ -#define NFC_IP6_SRC 0x0001 -/* Dest IP address. */ -#define NFC_IP6_DST 0x0002 -/* Input device. */ -#define NFC_IP6_IF_IN 0x0004 -/* Output device. */ -#define NFC_IP6_IF_OUT 0x0008 -/* TOS. */ -#define NFC_IP6_TOS 0x0010 -/* Protocol. */ -#define NFC_IP6_PROTO 0x0020 -/* IP options. */ -#define NFC_IP6_OPTIONS 0x0040 -/* Frag & flags. */ -#define NFC_IP6_FRAG 0x0080 - - -/* Per-protocol information: only matters if proto match. */ -/* TCP flags. */ -#define NFC_IP6_TCPFLAGS 0x0100 -/* Source port. */ -#define NFC_IP6_SRC_PT 0x0200 -/* Dest port. */ -#define NFC_IP6_DST_PT 0x0400 -/* Something else about the proto */ -#define NFC_IP6_PROTO_UNKNOWN 0x2000 - /* IP6 Hooks */ /* After promisc drops, checksum checks. */ #define NF_IP6_PRE_ROUTING 0 diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index 486ed1f0c0bc..0a4d73317759 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -155,7 +155,7 @@ enum nlmsgerr_attrs { #define NETLINK_LIST_MEMBERSHIPS 9 #define NETLINK_CAP_ACK 10 #define NETLINK_EXT_ACK 11 -#define NETLINK_DUMP_STRICT_CHK 12 +#define NETLINK_GET_STRICT_CHK 12 struct nl_pktinfo { __u32 group; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 6d610bae30a9..31ae5c7f10e3 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1036,6 +1036,35 @@ * @NL80211_CMD_GET_FTM_RESPONDER_STATS: Retrieve FTM responder statistics, in * the %NL80211_ATTR_FTM_RESPONDER_STATS attribute. * + * @NL80211_CMD_PEER_MEASUREMENT_START: start a (set of) peer measurement(s) + * with the given parameters, which are encapsulated in the nested + * %NL80211_ATTR_PEER_MEASUREMENTS attribute. Optionally, MAC address + * randomization may be enabled and configured by specifying the + * %NL80211_ATTR_MAC and %NL80211_ATTR_MAC_MASK attributes. + * If a timeout is requested, use the %NL80211_ATTR_TIMEOUT attribute. + * A u64 cookie for further %NL80211_ATTR_COOKIE use is is returned in + * the netlink extended ack message. + * + * To cancel a measurement, close the socket that requested it. + * + * Measurement results are reported to the socket that requested the + * measurement using @NL80211_CMD_PEER_MEASUREMENT_RESULT when they + * become available, so applications must ensure a large enough socket + * buffer size. + * + * Depending on driver support it may or may not be possible to start + * multiple concurrent measurements. + * @NL80211_CMD_PEER_MEASUREMENT_RESULT: This command number is used for the + * result notification from the driver to the requesting socket. + * @NL80211_CMD_PEER_MEASUREMENT_COMPLETE: Notification only, indicating that + * the measurement completed, using the measurement cookie + * (%NL80211_ATTR_COOKIE). + * + * @NL80211_CMD_NOTIFY_RADAR: Notify the kernel that a radar signal was + * detected and reported by a neighboring device on the channel + * indicated by %NL80211_ATTR_WIPHY_FREQ and other attributes + * determining the width and type. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1250,6 +1279,12 @@ enum nl80211_commands { NL80211_CMD_GET_FTM_RESPONDER_STATS, + NL80211_CMD_PEER_MEASUREMENT_START, + NL80211_CMD_PEER_MEASUREMENT_RESULT, + NL80211_CMD_PEER_MEASUREMENT_COMPLETE, + + NL80211_CMD_NOTIFY_RADAR, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -1706,7 +1741,7 @@ enum nl80211_commands { * the values passed in @NL80211_ATTR_SCAN_SSIDS (eg. if an SSID * is included in the probe request, but the match attributes * will never let it go through), -EINVAL may be returned. - * If ommited, no filtering is done. + * If omitted, no filtering is done. * * @NL80211_ATTR_INTERFACE_COMBINATIONS: Nested attribute listing the supported * interface combinations. In each nested item, it contains attributes @@ -1811,7 +1846,7 @@ enum nl80211_commands { * * @NL80211_ATTR_INACTIVITY_TIMEOUT: timeout value in seconds, this can be * used by the drivers which has MLME in firmware and does not have support - * to report per station tx/rx activity to free up the staion entry from + * to report per station tx/rx activity to free up the station entry from * the list. This needs to be used when the driver advertises the * capability to timeout the stations. * @@ -2172,7 +2207,7 @@ enum nl80211_commands { * * @NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST: When present the RSSI level for BSSs in * the specified band is to be adjusted before doing - * %NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparision to figure out + * %NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparison to figure out * better BSSs. The attribute value is a packed structure * value as specified by &struct nl80211_bss_select_rssi_adjust. * @@ -2254,6 +2289,16 @@ enum nl80211_commands { * @NL80211_ATTR_FTM_RESPONDER_STATS: Nested attribute with FTM responder * statistics, see &enum nl80211_ftm_responder_stats. * + * @NL80211_ATTR_TIMEOUT: Timeout for the given operation in milliseconds (u32), + * if the attribute is not given no timeout is requested. Note that 0 is an + * invalid value. + * + * @NL80211_ATTR_PEER_MEASUREMENTS: peer measurements request (and result) + * data, uses nested attributes specified in + * &enum nl80211_peer_measurement_attrs. + * This is also used for capability advertisement in the wiphy information, + * with the appropriate sub-attributes. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2699,6 +2744,10 @@ enum nl80211_attrs { NL80211_ATTR_FTM_RESPONDER_STATS, + NL80211_ATTR_TIMEOUT, + + NL80211_ATTR_PEER_MEASUREMENTS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3074,6 +3123,8 @@ enum nl80211_sta_bss_param { * with an FCS error (u32, from this station). This count may not include * some packets with an FCS error due to TA corruption. Hence this counter * might not be fully accurate. + * @NL80211_STA_INFO_CONNECTED_TO_GATE: set to true if STA has a path to a + * mesh gate (u8, 0 or 1) * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -3116,6 +3167,7 @@ enum nl80211_sta_info { NL80211_STA_INFO_ACK_SIGNAL_AVG, NL80211_STA_INFO_RX_MPDUS, NL80211_STA_INFO_FCS_ERROR_COUNT, + NL80211_STA_INFO_CONNECTED_TO_GATE, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, @@ -3895,6 +3947,11 @@ enum nl80211_mesh_power_mode { * remove it from the STA's list of peers. You may set this to 0 to disable * the removal of the STA. Default is 30 minutes. * + * @NL80211_MESHCONF_CONNECTED_TO_GATE: If set to true then this mesh STA + * will advertise that it is connected to a gate in the mesh formation + * field. If left unset then the mesh formation field will only + * advertise such if there is an active root mesh path. + * * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use */ enum nl80211_meshconf_params { @@ -3927,6 +3984,7 @@ enum nl80211_meshconf_params { NL80211_MESHCONF_POWER_MODE, NL80211_MESHCONF_AWAKE_WINDOW, NL80211_MESHCONF_PLINK_TIMEOUT, + NL80211_MESHCONF_CONNECTED_TO_GATE, /* keep last */ __NL80211_MESHCONF_ATTR_AFTER_LAST, @@ -4859,7 +4917,7 @@ enum nl80211_iface_limit_attrs { * numbers = [ #{STA} <= 1, #{P2P-client,P2P-GO} <= 3 ], max = 4 * => allows a STA plus three P2P interfaces * - * The list of these four possiblities could completely be contained + * The list of these four possibilities could completely be contained * within the %NL80211_ATTR_INTERFACE_COMBINATIONS attribute to indicate * that any of these groups must match. * @@ -4889,7 +4947,7 @@ enum nl80211_if_combination_attrs { * enum nl80211_plink_state - state of a mesh peer link finite state machine * * @NL80211_PLINK_LISTEN: initial state, considered the implicit - * state of non existant mesh peer links + * state of non existent mesh peer links * @NL80211_PLINK_OPN_SNT: mesh plink open frame has been sent to * this mesh peer * @NL80211_PLINK_OPN_RCVD: mesh plink open frame has been received @@ -5381,7 +5439,7 @@ enum nl80211_timeout_reason { * request parameters IE in the probe request * @NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP: accept broadcast probe responses * @NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE: send probe request frames at - * rate of at least 5.5M. In case non OCE AP is dicovered in the channel, + * rate of at least 5.5M. In case non OCE AP is discovered in the channel, * only the first probe req in the channel will be sent in high rate. * @NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION: allow probe request * tx deferral (dot11FILSProbeDelay shall be set to 15ms) @@ -5842,9 +5900,11 @@ enum nl80211_external_auth_action { * @__NL80211_FTM_RESP_ATTR_INVALID: Invalid * @NL80211_FTM_RESP_ATTR_ENABLED: FTM responder is enabled * @NL80211_FTM_RESP_ATTR_LCI: The content of Measurement Report Element - * (9.4.2.22 in 802.11-2016) with type 8 - LCI (9.4.2.22.10) + * (9.4.2.22 in 802.11-2016) with type 8 - LCI (9.4.2.22.10), + * i.e. starting with the measurement token * @NL80211_FTM_RESP_ATTR_CIVIC: The content of Measurement Report Element - * (9.4.2.22 in 802.11-2016) with type 11 - Civic (Section 9.4.2.22.13) + * (9.4.2.22 in 802.11-2016) with type 11 - Civic (Section 9.4.2.22.13), + * i.e. starting with the measurement token * @__NL80211_FTM_RESP_ATTR_LAST: Internal * @NL80211_FTM_RESP_ATTR_MAX: highest FTM responder attribute. */ @@ -5906,4 +5966,386 @@ enum nl80211_ftm_responder_stats { NL80211_FTM_STATS_MAX = __NL80211_FTM_STATS_AFTER_LAST - 1 }; +/** + * enum nl80211_preamble - frame preamble types + * @NL80211_PREAMBLE_LEGACY: legacy (HR/DSSS, OFDM, ERP PHY) preamble + * @NL80211_PREAMBLE_HT: HT preamble + * @NL80211_PREAMBLE_VHT: VHT preamble + * @NL80211_PREAMBLE_DMG: DMG preamble + */ +enum nl80211_preamble { + NL80211_PREAMBLE_LEGACY, + NL80211_PREAMBLE_HT, + NL80211_PREAMBLE_VHT, + NL80211_PREAMBLE_DMG, +}; + +/** + * enum nl80211_peer_measurement_type - peer measurement types + * @NL80211_PMSR_TYPE_INVALID: invalid/unused, needed as we use + * these numbers also for attributes + * + * @NL80211_PMSR_TYPE_FTM: flight time measurement + * + * @NUM_NL80211_PMSR_TYPES: internal + * @NL80211_PMSR_TYPE_MAX: highest type number + */ +enum nl80211_peer_measurement_type { + NL80211_PMSR_TYPE_INVALID, + + NL80211_PMSR_TYPE_FTM, + + NUM_NL80211_PMSR_TYPES, + NL80211_PMSR_TYPE_MAX = NUM_NL80211_PMSR_TYPES - 1 +}; + +/** + * enum nl80211_peer_measurement_status - peer measurement status + * @NL80211_PMSR_STATUS_SUCCESS: measurement completed successfully + * @NL80211_PMSR_STATUS_REFUSED: measurement was locally refused + * @NL80211_PMSR_STATUS_TIMEOUT: measurement timed out + * @NL80211_PMSR_STATUS_FAILURE: measurement failed, a type-dependent + * reason may be available in the response data + */ +enum nl80211_peer_measurement_status { + NL80211_PMSR_STATUS_SUCCESS, + NL80211_PMSR_STATUS_REFUSED, + NL80211_PMSR_STATUS_TIMEOUT, + NL80211_PMSR_STATUS_FAILURE, +}; + +/** + * enum nl80211_peer_measurement_req - peer measurement request attributes + * @__NL80211_PMSR_REQ_ATTR_INVALID: invalid + * + * @NL80211_PMSR_REQ_ATTR_DATA: This is a nested attribute with measurement + * type-specific request data inside. The attributes used are from the + * enums named nl80211_peer_measurement_<type>_req. + * @NL80211_PMSR_REQ_ATTR_GET_AP_TSF: include AP TSF timestamp, if supported + * (flag attribute) + * + * @NUM_NL80211_PMSR_REQ_ATTRS: internal + * @NL80211_PMSR_REQ_ATTR_MAX: highest attribute number + */ +enum nl80211_peer_measurement_req { + __NL80211_PMSR_REQ_ATTR_INVALID, + + NL80211_PMSR_REQ_ATTR_DATA, + NL80211_PMSR_REQ_ATTR_GET_AP_TSF, + + /* keep last */ + NUM_NL80211_PMSR_REQ_ATTRS, + NL80211_PMSR_REQ_ATTR_MAX = NUM_NL80211_PMSR_REQ_ATTRS - 1 +}; + +/** + * enum nl80211_peer_measurement_resp - peer measurement response attributes + * @__NL80211_PMSR_RESP_ATTR_INVALID: invalid + * + * @NL80211_PMSR_RESP_ATTR_DATA: This is a nested attribute with measurement + * type-specific results inside. The attributes used are from the enums + * named nl80211_peer_measurement_<type>_resp. + * @NL80211_PMSR_RESP_ATTR_STATUS: u32 value with the measurement status + * (using values from &enum nl80211_peer_measurement_status.) + * @NL80211_PMSR_RESP_ATTR_HOST_TIME: host time (%CLOCK_BOOTTIME) when the + * result was measured; this value is not expected to be accurate to + * more than 20ms. (u64, nanoseconds) + * @NL80211_PMSR_RESP_ATTR_AP_TSF: TSF of the AP that the interface + * doing the measurement is connected to when the result was measured. + * This shall be accurately reported if supported and requested + * (u64, usec) + * @NL80211_PMSR_RESP_ATTR_FINAL: If results are sent to the host partially + * (*e.g. with FTM per-burst data) this flag will be cleared on all but + * the last result; if all results are combined it's set on the single + * result. + * @NL80211_PMSR_RESP_ATTR_PAD: padding for 64-bit attributes, ignore + * + * @NUM_NL80211_PMSR_RESP_ATTRS: internal + * @NL80211_PMSR_RESP_ATTR_MAX: highest attribute number + */ +enum nl80211_peer_measurement_resp { + __NL80211_PMSR_RESP_ATTR_INVALID, + + NL80211_PMSR_RESP_ATTR_DATA, + NL80211_PMSR_RESP_ATTR_STATUS, + NL80211_PMSR_RESP_ATTR_HOST_TIME, + NL80211_PMSR_RESP_ATTR_AP_TSF, + NL80211_PMSR_RESP_ATTR_FINAL, + NL80211_PMSR_RESP_ATTR_PAD, + + /* keep last */ + NUM_NL80211_PMSR_RESP_ATTRS, + NL80211_PMSR_RESP_ATTR_MAX = NUM_NL80211_PMSR_RESP_ATTRS - 1 +}; + +/** + * enum nl80211_peer_measurement_peer_attrs - peer attributes for measurement + * @__NL80211_PMSR_PEER_ATTR_INVALID: invalid + * + * @NL80211_PMSR_PEER_ATTR_ADDR: peer's MAC address + * @NL80211_PMSR_PEER_ATTR_CHAN: channel definition, nested, using top-level + * attributes like %NL80211_ATTR_WIPHY_FREQ etc. + * @NL80211_PMSR_PEER_ATTR_REQ: This is a nested attribute indexed by + * measurement type, with attributes from the + * &enum nl80211_peer_measurement_req inside. + * @NL80211_PMSR_PEER_ATTR_RESP: This is a nested attribute indexed by + * measurement type, with attributes from the + * &enum nl80211_peer_measurement_resp inside. + * + * @NUM_NL80211_PMSR_PEER_ATTRS: internal + * @NL80211_PMSR_PEER_ATTR_MAX: highest attribute number + */ +enum nl80211_peer_measurement_peer_attrs { + __NL80211_PMSR_PEER_ATTR_INVALID, + + NL80211_PMSR_PEER_ATTR_ADDR, + NL80211_PMSR_PEER_ATTR_CHAN, + NL80211_PMSR_PEER_ATTR_REQ, + NL80211_PMSR_PEER_ATTR_RESP, + + /* keep last */ + NUM_NL80211_PMSR_PEER_ATTRS, + NL80211_PMSR_PEER_ATTR_MAX = NUM_NL80211_PMSR_PEER_ATTRS - 1, +}; + +/** + * enum nl80211_peer_measurement_attrs - peer measurement attributes + * @__NL80211_PMSR_ATTR_INVALID: invalid + * + * @NL80211_PMSR_ATTR_MAX_PEERS: u32 attribute used for capability + * advertisement only, indicates the maximum number of peers + * measurements can be done with in a single request + * @NL80211_PMSR_ATTR_REPORT_AP_TSF: flag attribute in capability + * indicating that the connected AP's TSF can be reported in + * measurement results + * @NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR: flag attribute in capability + * indicating that MAC address randomization is supported. + * @NL80211_PMSR_ATTR_TYPE_CAPA: capabilities reported by the device, + * this contains a nesting indexed by measurement type, and + * type-specific capabilities inside, which are from the enums + * named nl80211_peer_measurement_<type>_capa. + * @NL80211_PMSR_ATTR_PEERS: nested attribute, the nesting index is + * meaningless, just a list of peers to measure with, with the + * sub-attributes taken from + * &enum nl80211_peer_measurement_peer_attrs. + * + * @NUM_NL80211_PMSR_ATTR: internal + * @NL80211_PMSR_ATTR_MAX: highest attribute number + */ +enum nl80211_peer_measurement_attrs { + __NL80211_PMSR_ATTR_INVALID, + + NL80211_PMSR_ATTR_MAX_PEERS, + NL80211_PMSR_ATTR_REPORT_AP_TSF, + NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR, + NL80211_PMSR_ATTR_TYPE_CAPA, + NL80211_PMSR_ATTR_PEERS, + + /* keep last */ + NUM_NL80211_PMSR_ATTR, + NL80211_PMSR_ATTR_MAX = NUM_NL80211_PMSR_ATTR - 1 +}; + +/** + * enum nl80211_peer_measurement_ftm_capa - FTM capabilities + * @__NL80211_PMSR_FTM_CAPA_ATTR_INVALID: invalid + * + * @NL80211_PMSR_FTM_CAPA_ATTR_ASAP: flag attribute indicating ASAP mode + * is supported + * @NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP: flag attribute indicating non-ASAP + * mode is supported + * @NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI: flag attribute indicating if LCI + * data can be requested during the measurement + * @NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC: flag attribute indicating if civic + * location data can be requested during the measurement + * @NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES: u32 bitmap attribute of bits + * from &enum nl80211_preamble. + * @NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS: bitmap of values from + * &enum nl80211_chan_width indicating the supported channel + * bandwidths for FTM. Note that a higher channel bandwidth may be + * configured to allow for other measurements types with different + * bandwidth requirement in the same measurement. + * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT: u32 attribute indicating + * the maximum bursts exponent that can be used (if not present anything + * is valid) + * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST: u32 attribute indicating + * the maximum FTMs per burst (if not present anything is valid) + * + * @NUM_NL80211_PMSR_FTM_CAPA_ATTR: internal + * @NL80211_PMSR_FTM_CAPA_ATTR_MAX: highest attribute number + */ +enum nl80211_peer_measurement_ftm_capa { + __NL80211_PMSR_FTM_CAPA_ATTR_INVALID, + + NL80211_PMSR_FTM_CAPA_ATTR_ASAP, + NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP, + NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI, + NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC, + NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES, + NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS, + NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT, + NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST, + + /* keep last */ + NUM_NL80211_PMSR_FTM_CAPA_ATTR, + NL80211_PMSR_FTM_CAPA_ATTR_MAX = NUM_NL80211_PMSR_FTM_CAPA_ATTR - 1 +}; + +/** + * enum nl80211_peer_measurement_ftm_req - FTM request attributes + * @__NL80211_PMSR_FTM_REQ_ATTR_INVALID: invalid + * + * @NL80211_PMSR_FTM_REQ_ATTR_ASAP: ASAP mode requested (flag) + * @NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE: preamble type (see + * &enum nl80211_preamble), optional for DMG (u32) + * @NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP: number of bursts exponent as in + * 802.11-2016 9.4.2.168 "Fine Timing Measurement Parameters element" + * (u8, 0-15, optional with default 15 i.e. "no preference") + * @NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD: interval between bursts in units + * of 100ms (u16, optional with default 0) + * @NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION: burst duration, as in 802.11-2016 + * Table 9-257 "Burst Duration field encoding" (u8, 0-15, optional with + * default 15 i.e. "no preference") + * @NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST: number of successful FTM frames + * requested per burst + * (u8, 0-31, optional with default 0 i.e. "no preference") + * @NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES: number of FTMR frame retries + * (u8, default 3) + * @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI: request LCI data (flag) + * @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC: request civic location data + * (flag) + * + * @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal + * @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number + */ +enum nl80211_peer_measurement_ftm_req { + __NL80211_PMSR_FTM_REQ_ATTR_INVALID, + + NL80211_PMSR_FTM_REQ_ATTR_ASAP, + NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE, + NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP, + NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD, + NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION, + NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST, + NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES, + NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI, + NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC, + + /* keep last */ + NUM_NL80211_PMSR_FTM_REQ_ATTR, + NL80211_PMSR_FTM_REQ_ATTR_MAX = NUM_NL80211_PMSR_FTM_REQ_ATTR - 1 +}; + +/** + * enum nl80211_peer_measurement_ftm_failure_reasons - FTM failure reasons + * @NL80211_PMSR_FTM_FAILURE_UNSPECIFIED: unspecified failure, not used + * @NL80211_PMSR_FTM_FAILURE_NO_RESPONSE: no response from the FTM responder + * @NL80211_PMSR_FTM_FAILURE_REJECTED: FTM responder rejected measurement + * @NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL: we already know the peer is + * on a different channel, so can't measure (if we didn't know, we'd + * try and get no response) + * @NL80211_PMSR_FTM_FAILURE_PEER_NOT_CAPABLE: peer can't actually do FTM + * @NL80211_PMSR_FTM_FAILURE_INVALID_TIMESTAMP: invalid T1/T4 timestamps + * received + * @NL80211_PMSR_FTM_FAILURE_PEER_BUSY: peer reports busy, you may retry + * later (see %NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME) + * @NL80211_PMSR_FTM_FAILURE_BAD_CHANGED_PARAMS: parameters were changed + * by the peer and are no longer supported + */ +enum nl80211_peer_measurement_ftm_failure_reasons { + NL80211_PMSR_FTM_FAILURE_UNSPECIFIED, + NL80211_PMSR_FTM_FAILURE_NO_RESPONSE, + NL80211_PMSR_FTM_FAILURE_REJECTED, + NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL, + NL80211_PMSR_FTM_FAILURE_PEER_NOT_CAPABLE, + NL80211_PMSR_FTM_FAILURE_INVALID_TIMESTAMP, + NL80211_PMSR_FTM_FAILURE_PEER_BUSY, + NL80211_PMSR_FTM_FAILURE_BAD_CHANGED_PARAMS, +}; + +/** + * enum nl80211_peer_measurement_ftm_resp - FTM response attributes + * @__NL80211_PMSR_FTM_RESP_ATTR_INVALID: invalid + * + * @NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON: FTM-specific failure reason + * (u32, optional) + * @NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX: optional, if bursts are reported + * as separate results then it will be the burst index 0...(N-1) and + * the top level will indicate partial results (u32) + * @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS: number of FTM Request frames + * transmitted (u32, optional) + * @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES: number of FTM Request frames + * that were acknowleged (u32, optional) + * @NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME: retry time received from the + * busy peer (u32, seconds) + * @NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP: actual number of bursts exponent + * used by the responder (similar to request, u8) + * @NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION: actual burst duration used by + * the responder (similar to request, u8) + * @NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST: actual FTMs per burst used + * by the responder (similar to request, u8) + * @NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG: average RSSI across all FTM action + * frames (optional, s32, 1/2 dBm) + * @NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD: RSSI spread across all FTM action + * frames (optional, s32, 1/2 dBm) + * @NL80211_PMSR_FTM_RESP_ATTR_TX_RATE: bitrate we used for the response to the + * FTM action frame (optional, nested, using &enum nl80211_rate_info + * attributes) + * @NL80211_PMSR_FTM_RESP_ATTR_RX_RATE: bitrate the responder used for the FTM + * action frame (optional, nested, using &enum nl80211_rate_info attrs) + * @NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG: average RTT (s64, picoseconds, optional + * but one of RTT/DIST must be present) + * @NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE: RTT variance (u64, ps^2, note that + * standard deviation is the square root of variance, optional) + * @NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD: RTT spread (u64, picoseconds, + * optional) + * @NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG: average distance (s64, mm, optional + * but one of RTT/DIST must be present) + * @NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE: distance variance (u64, mm^2, note + * that standard deviation is the square root of variance, optional) + * @NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD: distance spread (u64, mm, optional) + * @NL80211_PMSR_FTM_RESP_ATTR_LCI: LCI data from peer (binary, optional); + * this is the contents of the Measurement Report Element (802.11-2016 + * 9.4.2.22.1) starting with the Measurement Token, with Measurement + * Type 8. + * @NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC: civic location data from peer + * (binary, optional); + * this is the contents of the Measurement Report Element (802.11-2016 + * 9.4.2.22.1) starting with the Measurement Token, with Measurement + * Type 11. + * @NL80211_PMSR_FTM_RESP_ATTR_PAD: ignore, for u64/s64 padding only + * + * @NUM_NL80211_PMSR_FTM_RESP_ATTR: internal + * @NL80211_PMSR_FTM_RESP_ATTR_MAX: highest attribute number + */ +enum nl80211_peer_measurement_ftm_resp { + __NL80211_PMSR_FTM_RESP_ATTR_INVALID, + + NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON, + NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX, + NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS, + NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES, + NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME, + NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP, + NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION, + NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST, + NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG, + NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD, + NL80211_PMSR_FTM_RESP_ATTR_TX_RATE, + NL80211_PMSR_FTM_RESP_ATTR_RX_RATE, + NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG, + NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE, + NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD, + NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG, + NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE, + NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD, + NL80211_PMSR_FTM_RESP_ATTR_LCI, + NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC, + NL80211_PMSR_FTM_RESP_ATTR_PAD, + + /* keep last */ + NUM_NL80211_PMSR_FTM_RESP_ATTR, + NL80211_PMSR_FTM_RESP_ATTR_MAX = NUM_NL80211_PMSR_FTM_RESP_ATTR - 1 +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 401d0c1e612d..95d0db2a8350 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -485,6 +485,11 @@ enum { TCA_FLOWER_IN_HW_COUNT, + TCA_FLOWER_KEY_PORT_SRC_MIN, /* be16 */ + TCA_FLOWER_KEY_PORT_SRC_MAX, /* be16 */ + TCA_FLOWER_KEY_PORT_DST_MIN, /* be16 */ + TCA_FLOWER_KEY_PORT_DST_MAX, /* be16 */ + __TCA_FLOWER_MAX, }; @@ -518,6 +523,8 @@ enum { TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1), }; +#define TCA_FLOWER_MASK_FLAGS_RANGE (1 << 0) /* Range-based match */ + /* Match-all classifier */ enum { diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 89ee47c2f17d..0d18b1d1fbbc 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -291,11 +291,38 @@ enum { TCA_GRED_DPS, TCA_GRED_MAX_P, TCA_GRED_LIMIT, + TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */ __TCA_GRED_MAX, }; #define TCA_GRED_MAX (__TCA_GRED_MAX - 1) +enum { + TCA_GRED_VQ_ENTRY_UNSPEC, + TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */ + __TCA_GRED_VQ_ENTRY_MAX, +}; +#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1) + +enum { + TCA_GRED_VQ_UNSPEC, + TCA_GRED_VQ_PAD, + TCA_GRED_VQ_DP, /* u32 */ + TCA_GRED_VQ_STAT_BYTES, /* u64 */ + TCA_GRED_VQ_STAT_PACKETS, /* u32 */ + TCA_GRED_VQ_STAT_BACKLOG, /* u32 */ + TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */ + TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */ + TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */ + TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */ + TCA_GRED_VQ_STAT_PDROP, /* u32 */ + TCA_GRED_VQ_STAT_OTHER, /* u32 */ + TCA_GRED_VQ_FLAGS, /* u32 */ + __TCA_GRED_VQ_MAX +}; + +#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1) + struct tc_gred_qopt { __u32 limit; /* HARD maximal queue length (bytes) */ __u32 qth_min; /* Min average length threshold (bytes) */ @@ -864,6 +891,8 @@ enum { TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */ + TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */ + __TCA_FQ_MAX }; @@ -882,6 +911,7 @@ struct tc_fq_qd_stats { __u32 inactive_flows; __u32 throttled_flows; __u32 unthrottle_latency_ns; + __u64 ce_mark; /* packets above ce_threshold */ }; /* Heavy-Hitter Filter */ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index c0d7ea0bf5b6..b4875a93363a 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -212,6 +212,7 @@ struct prctl_mm_map { #define PR_SET_SPECULATION_CTRL 53 /* Speculation control variants */ # define PR_SPEC_STORE_BYPASS 0 +# define PR_SPEC_INDIRECT_BRANCH 1 /* Return and control values for PR_SET/GET_SPECULATION_CTRL */ # define PR_SPEC_NOT_AFFECTED 0 # define PR_SPEC_PRCTL (1UL << 0) @@ -219,4 +220,12 @@ struct prctl_mm_map { # define PR_SPEC_DISABLE (1UL << 2) # define PR_SPEC_FORCE_DISABLE (1UL << 3) +/* Reset arm64 pointer authentication keys */ +#define PR_PAC_RESET_KEYS 54 +# define PR_PAC_APIAKEY (1UL << 0) +# define PR_PAC_APIBKEY (1UL << 1) +# define PR_PAC_APDAKEY (1UL << 2) +# define PR_PAC_APDBKEY (1UL << 3) +# define PR_PAC_APGAKEY (1UL << 4) + #endif /* _LINUX_PRCTL_H */ diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index 3039bf6a742e..d73d83950265 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h @@ -84,6 +84,16 @@ struct ptp_sys_offset { struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1]; }; +struct ptp_sys_offset_extended { + unsigned int n_samples; /* Desired number of measurements. */ + unsigned int rsv[3]; /* Reserved for future use. */ + /* + * Array of [system, phc, system] time stamps. The kernel will provide + * 3*n_samples time stamps. + */ + struct ptp_clock_time ts[PTP_MAX_SAMPLES][3]; +}; + struct ptp_sys_offset_precise { struct ptp_clock_time device; struct ptp_clock_time sys_realtime; @@ -136,6 +146,8 @@ struct ptp_pin_desc { #define PTP_PIN_SETFUNC _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc) #define PTP_SYS_OFFSET_PRECISE \ _IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise) +#define PTP_SYS_OFFSET_EXTENDED \ + _IOW(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended) struct ptp_extts_event { struct ptp_clock_time t; /* Time event occured. */ diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index 34dd3d497f2c..d584073532b8 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -129,6 +129,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_STREAM_SCHEDULER_VALUE 124 #define SCTP_INTERLEAVING_SUPPORTED 125 #define SCTP_SENDMSG_CONNECT 126 +#define SCTP_EVENT 127 /* PR-SCTP policies */ #define SCTP_PR_SCTP_NONE 0x0000 @@ -568,6 +569,8 @@ struct sctp_assoc_reset_event { #define SCTP_ASSOC_CHANGE_DENIED 0x0004 #define SCTP_ASSOC_CHANGE_FAILED 0x0008 +#define SCTP_STREAM_CHANGE_DENIED SCTP_ASSOC_CHANGE_DENIED +#define SCTP_STREAM_CHANGE_FAILED SCTP_ASSOC_CHANGE_FAILED struct sctp_stream_change_event { __u16 strchange_type; __u16 strchange_flags; @@ -630,7 +633,9 @@ union sctp_notification { */ enum sctp_sn_type { - SCTP_SN_TYPE_BASE = (1<<15), + SCTP_SN_TYPE_BASE = (1<<15), + SCTP_DATA_IO_EVENT = SCTP_SN_TYPE_BASE, +#define SCTP_DATA_IO_EVENT SCTP_DATA_IO_EVENT SCTP_ASSOC_CHANGE, #define SCTP_ASSOC_CHANGE SCTP_ASSOC_CHANGE SCTP_PEER_ADDR_CHANGE, @@ -655,6 +660,8 @@ enum sctp_sn_type { #define SCTP_ASSOC_RESET_EVENT SCTP_ASSOC_RESET_EVENT SCTP_STREAM_CHANGE_EVENT, #define SCTP_STREAM_CHANGE_EVENT SCTP_STREAM_CHANGE_EVENT + SCTP_SN_TYPE_MAX = SCTP_STREAM_CHANGE_EVENT, +#define SCTP_SN_TYPE_MAX SCTP_SN_TYPE_MAX }; /* Notification error codes used to fill up the error fields in some @@ -1148,9 +1155,16 @@ struct sctp_add_streams { uint16_t sas_outstrms; }; +struct sctp_event { + sctp_assoc_t se_assoc_id; + uint16_t se_type; + uint8_t se_on; +}; + /* SCTP Stream schedulers */ enum sctp_sched_type { SCTP_SS_FCFS, + SCTP_SS_DEFAULT = SCTP_SS_FCFS, SCTP_SS_PRIO, SCTP_SS_RR, SCTP_SS_MAX = SCTP_SS_RR diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h index 9efc0e73d50b..90734aa5aa36 100644 --- a/include/uapi/linux/seccomp.h +++ b/include/uapi/linux/seccomp.h @@ -15,11 +15,13 @@ #define SECCOMP_SET_MODE_STRICT 0 #define SECCOMP_SET_MODE_FILTER 1 #define SECCOMP_GET_ACTION_AVAIL 2 +#define SECCOMP_GET_NOTIF_SIZES 3 /* Valid flags for SECCOMP_SET_MODE_FILTER */ -#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) -#define SECCOMP_FILTER_FLAG_LOG (1UL << 1) -#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) +#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) +#define SECCOMP_FILTER_FLAG_LOG (1UL << 1) +#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) +#define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3) /* * All BPF programs must return a 32-bit value. @@ -35,6 +37,7 @@ #define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */ #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */ +#define SECCOMP_RET_USER_NOTIF 0x7fc00000U /* notifies userspace */ #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */ #define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */ #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */ @@ -60,4 +63,35 @@ struct seccomp_data { __u64 args[6]; }; +struct seccomp_notif_sizes { + __u16 seccomp_notif; + __u16 seccomp_notif_resp; + __u16 seccomp_data; +}; + +struct seccomp_notif { + __u64 id; + __u32 pid; + __u32 flags; + struct seccomp_data data; +}; + +struct seccomp_notif_resp { + __u64 id; + __s64 val; + __s32 error; + __u32 flags; +}; + +#define SECCOMP_IOC_MAGIC '!' +#define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr) +#define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type) +#define SECCOMP_IOW(nr, type) _IOW(SECCOMP_IOC_MAGIC, nr, type) +#define SECCOMP_IOWR(nr, type) _IOWR(SECCOMP_IOC_MAGIC, nr, type) + +/* Flags for seccomp notification fd ioctl. */ +#define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif) +#define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \ + struct seccomp_notif_resp) +#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64) #endif /* _UAPI_LINUX_SECCOMP_H */ diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index dce5f9dae121..df4a7534e239 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -281,4 +281,7 @@ /* MediaTek BTIF */ #define PORT_MTK_BTIF 117 +/* RDA UART */ +#define PORT_RDA 118 + #endif /* _UAPILINUX_SERIAL_CORE_H */ diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index f80135e5feaa..86dc24a96c90 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -243,6 +243,7 @@ enum LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */ LINUX_MIB_TCPRETRANSFAIL, /* TCPRetransFail */ LINUX_MIB_TCPRCVCOALESCE, /* TCPRcvCoalesce */ + LINUX_MIB_TCPBACKLOGCOALESCE, /* TCPBacklogCoalesce */ LINUX_MIB_TCPOFOQUEUE, /* TCPOFOQueue */ LINUX_MIB_TCPOFODROP, /* TCPOFODrop */ LINUX_MIB_TCPOFOMERGE, /* TCPOFOMerge */ diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index d71013fffaf6..87aa2a6d9125 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -153,6 +153,7 @@ enum KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */ KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ KERN_PANIC_ON_WARN=77, /* int: call panic() in WARN() functions */ + KERN_PANIC_PRINT=78, /* ulong: bitmask to print system info on panic */ }; diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index e02d31986ff9..8bb6cc5f3235 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -266,6 +266,7 @@ enum { TCP_NLA_BYTES_RETRANS, /* Data bytes retransmitted */ TCP_NLA_DSACK_DUPS, /* DSACK blocks received */ TCP_NLA_REORD_SEEN, /* reordering events seen */ + TCP_NLA_SRTT, /* smoothed RTT in usecs */ }; /* for TCP_MD5SIG socket option */ diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h index 09502de447f5..30baccb6c9c4 100644 --- a/include/uapi/linux/udp.h +++ b/include/uapi/linux/udp.h @@ -33,6 +33,7 @@ struct udphdr { #define UDP_NO_CHECK6_TX 101 /* Disable sending checksum for UDP6X */ #define UDP_NO_CHECK6_RX 102 /* Disable accpeting checksum for UDP6 */ #define UDP_SEGMENT 103 /* Set GSO segmentation size */ +#define UDP_GRO 104 /* This socket can receive UDP GRO packets */ /* UDP encapsulation types */ #define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */ diff --git a/include/uapi/linux/v4l2-common.h b/include/uapi/linux/v4l2-common.h index 4f7b892377cd..7d21c1634b4d 100644 --- a/include/uapi/linux/v4l2-common.h +++ b/include/uapi/linux/v4l2-common.h @@ -79,24 +79,11 @@ /* Current composing area plus all padding pixels */ #define V4L2_SEL_TGT_COMPOSE_PADDED 0x0103 -/* Backward compatibility target definitions --- to be removed. */ -#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP -#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE -#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL V4L2_SEL_TGT_CROP -#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL V4L2_SEL_TGT_COMPOSE -#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS V4L2_SEL_TGT_CROP_BOUNDS -#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS V4L2_SEL_TGT_COMPOSE_BOUNDS - /* Selection flags */ #define V4L2_SEL_FLAG_GE (1 << 0) #define V4L2_SEL_FLAG_LE (1 << 1) #define V4L2_SEL_FLAG_KEEP_CONFIG (1 << 2) -/* Backward compatibility flag definitions --- to be removed. */ -#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE -#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE -#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG - struct v4l2_edid { __u32 pad; __u32 start_block; @@ -105,4 +92,19 @@ struct v4l2_edid { __u8 *edid; }; +#ifndef __KERNEL__ +/* Backward compatibility target definitions --- to be removed. */ +#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP +#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE +#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL V4L2_SEL_TGT_CROP +#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL V4L2_SEL_TGT_COMPOSE +#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS V4L2_SEL_TGT_CROP_BOUNDS +#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS V4L2_SEL_TGT_COMPOSE_BOUNDS + +/* Backward compatibility flag definitions --- to be removed. */ +#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE +#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE +#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG +#endif + #endif /* __V4L2_COMMON__ */ diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 51b095898f4b..3dcfc6148f99 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -50,6 +50,8 @@ #ifndef __LINUX_V4L2_CONTROLS_H #define __LINUX_V4L2_CONTROLS_H +#include <linux/types.h> + /* Control classes */ #define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */ #define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */ @@ -402,9 +404,6 @@ enum v4l2_mpeg_video_multi_slice_mode { #define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228) #define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229) -#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250) -#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251) - #define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300) #define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301) #define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302) @@ -1095,66 +1094,4 @@ enum v4l2_detect_md_mode { #define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3) #define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4) -#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1 -#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2 -#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3 -#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4 - -struct v4l2_mpeg2_sequence { - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */ - __u16 horizontal_size; - __u16 vertical_size; - __u32 vbv_buffer_size; - - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */ - __u8 profile_and_level_indication; - __u8 progressive_sequence; - __u8 chroma_format; -}; - -struct v4l2_mpeg2_picture { - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */ - __u8 picture_coding_type; - - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */ - __u8 f_code[2][2]; - __u8 intra_dc_precision; - __u8 picture_structure; - __u8 top_field_first; - __u8 frame_pred_frame_dct; - __u8 concealment_motion_vectors; - __u8 q_scale_type; - __u8 intra_vlc_format; - __u8 alternate_scan; - __u8 repeat_first_field; - __u8 progressive_frame; -}; - -struct v4l2_ctrl_mpeg2_slice_params { - __u32 bit_size; - __u32 data_bit_offset; - - struct v4l2_mpeg2_sequence sequence; - struct v4l2_mpeg2_picture picture; - - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */ - __u8 quantiser_scale_code; - - __u8 backward_ref_index; - __u8 forward_ref_index; -}; - -struct v4l2_ctrl_mpeg2_quantization { - /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */ - __u8 load_intra_quantiser_matrix; - __u8 load_non_intra_quantiser_matrix; - __u8 load_chroma_intra_quantiser_matrix; - __u8 load_chroma_non_intra_quantiser_matrix; - - __u8 intra_quantiser_matrix[64]; - __u8 non_intra_quantiser_matrix[64]; - __u8 chroma_intra_quantiser_matrix[64]; - __u8 chroma_non_intra_quantiser_matrix[64]; -}; - #endif diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 813102810f53..02bb7ad6e986 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -354,6 +354,21 @@ struct vfio_region_gfx_edid { }; /* + * 10de vendor sub-type + * + * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space. + */ +#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1) + +/* + * 1014 vendor sub-type + * + * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU + * to do TLB invalidation on a GPU. + */ +#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1) + +/* * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped * which allows direct access to non-MSIX registers which happened to be within * the same system page. @@ -363,6 +378,33 @@ struct vfio_region_gfx_edid { */ #define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE 3 +/* + * Capability with compressed real address (aka SSA - small system address) + * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing + * and by the userspace to associate a NVLink bridge with a GPU. + */ +#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT 4 + +struct vfio_region_info_cap_nvlink2_ssatgt { + struct vfio_info_cap_header header; + __u64 tgt; +}; + +/* + * Capability with an NVLink link speed. The value is read by + * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed" + * property in the device tree. The value is fixed in the hardware + * and failing to provide the correct value results in the link + * not working with no indication from the driver why. + */ +#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD 5 + +struct vfio_region_info_cap_nvlink2_lnkspd { + struct vfio_info_cap_header header; + __u32 link_speed; + __u32 __pad; +}; + /** * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9, * struct vfio_irq_info) diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index 84c3de89696a..40d028eed645 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -11,94 +11,9 @@ * device configuration. */ +#include <linux/vhost_types.h> #include <linux/types.h> -#include <linux/compiler.h> #include <linux/ioctl.h> -#include <linux/virtio_config.h> -#include <linux/virtio_ring.h> - -struct vhost_vring_state { - unsigned int index; - unsigned int num; -}; - -struct vhost_vring_file { - unsigned int index; - int fd; /* Pass -1 to unbind from file. */ - -}; - -struct vhost_vring_addr { - unsigned int index; - /* Option flags. */ - unsigned int flags; - /* Flag values: */ - /* Whether log address is valid. If set enables logging. */ -#define VHOST_VRING_F_LOG 0 - - /* Start of array of descriptors (virtually contiguous) */ - __u64 desc_user_addr; - /* Used structure address. Must be 32 bit aligned */ - __u64 used_user_addr; - /* Available structure address. Must be 16 bit aligned */ - __u64 avail_user_addr; - /* Logging support. */ - /* Log writes to used structure, at offset calculated from specified - * address. Address must be 32 bit aligned. */ - __u64 log_guest_addr; -}; - -/* no alignment requirement */ -struct vhost_iotlb_msg { - __u64 iova; - __u64 size; - __u64 uaddr; -#define VHOST_ACCESS_RO 0x1 -#define VHOST_ACCESS_WO 0x2 -#define VHOST_ACCESS_RW 0x3 - __u8 perm; -#define VHOST_IOTLB_MISS 1 -#define VHOST_IOTLB_UPDATE 2 -#define VHOST_IOTLB_INVALIDATE 3 -#define VHOST_IOTLB_ACCESS_FAIL 4 - __u8 type; -}; - -#define VHOST_IOTLB_MSG 0x1 -#define VHOST_IOTLB_MSG_V2 0x2 - -struct vhost_msg { - int type; - union { - struct vhost_iotlb_msg iotlb; - __u8 padding[64]; - }; -}; - -struct vhost_msg_v2 { - __u32 type; - __u32 reserved; - union { - struct vhost_iotlb_msg iotlb; - __u8 padding[64]; - }; -}; - -struct vhost_memory_region { - __u64 guest_phys_addr; - __u64 memory_size; /* bytes */ - __u64 userspace_addr; - __u64 flags_padding; /* No flags are currently specified. */ -}; - -/* All region addresses and sizes must be 4K aligned. */ -#define VHOST_PAGE_SIZE 0x1000 - -struct vhost_memory { - __u32 nregions; - __u32 padding; - struct vhost_memory_region regions[0]; -}; /* ioctls */ @@ -186,31 +101,7 @@ struct vhost_memory { * device. This can be used to stop the ring (e.g. for migration). */ #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file) -/* Feature bits */ -/* Log all write descriptors. Can be changed while device is active. */ -#define VHOST_F_LOG_ALL 26 -/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */ -#define VHOST_NET_F_VIRTIO_NET_HDR 27 - -/* VHOST_SCSI specific definitions */ - -/* - * Used by QEMU userspace to ensure a consistent vhost-scsi ABI. - * - * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate + - * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage - * ABI Rev 1: January 2013. Ignore vhost_tpgt filed in struct vhost_scsi_target. - * All the targets under vhost_wwpn can be seen and used by guset. - */ - -#define VHOST_SCSI_ABI_VERSION 1 - -struct vhost_scsi_target { - int abi_version; - char vhost_wwpn[224]; /* TRANSPORT_IQN_LEN */ - unsigned short vhost_tpgt; - unsigned short reserved; -}; +/* VHOST_SCSI specific defines */ #define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target) #define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h new file mode 100644 index 000000000000..c907290ff065 --- /dev/null +++ b/include/uapi/linux/vhost_types.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_VHOST_TYPES_H +#define _LINUX_VHOST_TYPES_H +/* Userspace interface for in-kernel virtio accelerators. */ + +/* vhost is used to reduce the number of system calls involved in virtio. + * + * Existing virtio net code is used in the guest without modification. + * + * This header includes interface used by userspace hypervisor for + * device configuration. + */ + +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/virtio_config.h> +#include <linux/virtio_ring.h> + +struct vhost_vring_state { + unsigned int index; + unsigned int num; +}; + +struct vhost_vring_file { + unsigned int index; + int fd; /* Pass -1 to unbind from file. */ + +}; + +struct vhost_vring_addr { + unsigned int index; + /* Option flags. */ + unsigned int flags; + /* Flag values: */ + /* Whether log address is valid. If set enables logging. */ +#define VHOST_VRING_F_LOG 0 + + /* Start of array of descriptors (virtually contiguous) */ + __u64 desc_user_addr; + /* Used structure address. Must be 32 bit aligned */ + __u64 used_user_addr; + /* Available structure address. Must be 16 bit aligned */ + __u64 avail_user_addr; + /* Logging support. */ + /* Log writes to used structure, at offset calculated from specified + * address. Address must be 32 bit aligned. */ + __u64 log_guest_addr; +}; + +/* no alignment requirement */ +struct vhost_iotlb_msg { + __u64 iova; + __u64 size; + __u64 uaddr; +#define VHOST_ACCESS_RO 0x1 +#define VHOST_ACCESS_WO 0x2 +#define VHOST_ACCESS_RW 0x3 + __u8 perm; +#define VHOST_IOTLB_MISS 1 +#define VHOST_IOTLB_UPDATE 2 +#define VHOST_IOTLB_INVALIDATE 3 +#define VHOST_IOTLB_ACCESS_FAIL 4 + __u8 type; +}; + +#define VHOST_IOTLB_MSG 0x1 +#define VHOST_IOTLB_MSG_V2 0x2 + +struct vhost_msg { + int type; + union { + struct vhost_iotlb_msg iotlb; + __u8 padding[64]; + }; +}; + +struct vhost_msg_v2 { + __u32 type; + __u32 reserved; + union { + struct vhost_iotlb_msg iotlb; + __u8 padding[64]; + }; +}; + +struct vhost_memory_region { + __u64 guest_phys_addr; + __u64 memory_size; /* bytes */ + __u64 userspace_addr; + __u64 flags_padding; /* No flags are currently specified. */ +}; + +/* All region addresses and sizes must be 4K aligned. */ +#define VHOST_PAGE_SIZE 0x1000 + +struct vhost_memory { + __u32 nregions; + __u32 padding; + struct vhost_memory_region regions[0]; +}; + +/* VHOST_SCSI specific definitions */ + +/* + * Used by QEMU userspace to ensure a consistent vhost-scsi ABI. + * + * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate + + * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage + * ABI Rev 1: January 2013. Ignore vhost_tpgt field in struct vhost_scsi_target. + * All the targets under vhost_wwpn can be seen and used by guset. + */ + +#define VHOST_SCSI_ABI_VERSION 1 + +struct vhost_scsi_target { + int abi_version; + char vhost_wwpn[224]; /* TRANSPORT_IQN_LEN */ + unsigned short vhost_tpgt; + unsigned short reserved; +}; + +/* Feature bits */ +/* Log all write descriptors. Can be changed while device is active. */ +#define VHOST_F_LOG_ALL 26 +/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */ +#define VHOST_NET_F_VIRTIO_NET_HDR 27 + +#endif diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index c8e8ff810190..b5671ce2724f 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -145,6 +145,7 @@ enum v4l2_buf_type { V4L2_BUF_TYPE_SDR_CAPTURE = 11, V4L2_BUF_TYPE_SDR_OUTPUT = 12, V4L2_BUF_TYPE_META_CAPTURE = 13, + V4L2_BUF_TYPE_META_OUTPUT = 14, /* Deprecated, do not use */ V4L2_BUF_TYPE_PRIVATE = 0x80, }; @@ -469,6 +470,7 @@ struct v4l2_capability { #define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */ #define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */ #define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */ +#define V4L2_CAP_META_OUTPUT 0x08000000 /* Is a metadata output device */ #define V4L2_CAP_TOUCH 0x10000000 /* Is a touch device */ @@ -689,6 +691,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */ #define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */ #define V4L2_PIX_FMT_SUNXI_TILED_NV12 v4l2_fourcc('S', 'T', '1', '2') /* Sunxi Tiled NV12 Format */ +#define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */ /* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */ #define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */ @@ -879,6 +882,7 @@ struct v4l2_requestbuffers { #define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1) #define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2) #define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3) +#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4) /** * struct v4l2_plane - plane info for multi-planar buffers @@ -1622,8 +1626,6 @@ struct v4l2_ext_control { __u8 __user *p_u8; __u16 __user *p_u16; __u32 __user *p_u32; - struct v4l2_ctrl_mpeg2_slice_params __user *p_mpeg2_slice_params; - struct v4l2_ctrl_mpeg2_quantization __user *p_mpeg2_quantization; void __user *ptr; }; } __attribute__ ((packed)); @@ -1669,8 +1671,6 @@ enum v4l2_ctrl_type { V4L2_CTRL_TYPE_U8 = 0x0100, V4L2_CTRL_TYPE_U16 = 0x0101, V4L2_CTRL_TYPE_U32 = 0x0102, - V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS = 0x0103, - V4L2_CTRL_TYPE_MPEG2_QUANTIZATION = 0x0104, }; /* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h index 9ebe4d968dd5..0f99d7b49ede 100644 --- a/include/uapi/linux/virtio_blk.h +++ b/include/uapi/linux/virtio_blk.h @@ -38,6 +38,8 @@ #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ #define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */ #define VIRTIO_BLK_F_MQ 12 /* support more than one vq */ +#define VIRTIO_BLK_F_DISCARD 13 /* DISCARD is supported */ +#define VIRTIO_BLK_F_WRITE_ZEROES 14 /* WRITE ZEROES is supported */ /* Legacy feature bits */ #ifndef VIRTIO_BLK_NO_LEGACY @@ -86,6 +88,39 @@ struct virtio_blk_config { /* number of vqs, only available when VIRTIO_BLK_F_MQ is set */ __u16 num_queues; + + /* the next 3 entries are guarded by VIRTIO_BLK_F_DISCARD */ + /* + * The maximum discard sectors (in 512-byte sectors) for + * one segment. + */ + __u32 max_discard_sectors; + /* + * The maximum number of discard segments in a + * discard command. + */ + __u32 max_discard_seg; + /* Discard commands must be aligned to this number of sectors. */ + __u32 discard_sector_alignment; + + /* the next 3 entries are guarded by VIRTIO_BLK_F_WRITE_ZEROES */ + /* + * The maximum number of write zeroes sectors (in 512-byte sectors) in + * one segment. + */ + __u32 max_write_zeroes_sectors; + /* + * The maximum number of segments in a write zeroes + * command. + */ + __u32 max_write_zeroes_seg; + /* + * Set if a VIRTIO_BLK_T_WRITE_ZEROES request may result in the + * deallocation of one or more of the sectors. + */ + __u8 write_zeroes_may_unmap; + + __u8 unused1[3]; } __attribute__((packed)); /* @@ -114,6 +149,12 @@ struct virtio_blk_config { /* Get device ID command */ #define VIRTIO_BLK_T_GET_ID 8 +/* Discard command */ +#define VIRTIO_BLK_T_DISCARD 11 + +/* Write zeroes command */ +#define VIRTIO_BLK_T_WRITE_ZEROES 13 + #ifndef VIRTIO_BLK_NO_LEGACY /* Barrier before this op. */ #define VIRTIO_BLK_T_BARRIER 0x80000000 @@ -133,6 +174,19 @@ struct virtio_blk_outhdr { __virtio64 sector; }; +/* Unmap this range (only valid for write zeroes command) */ +#define VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP 0x00000001 + +/* Discard/write zeroes range for each request. */ +struct virtio_blk_discard_write_zeroes { + /* discard/write zeroes start sector */ + __le64 sector; + /* number of discard/write zeroes sectors */ + __le32 num_sectors; + /* flags for this range */ + __le32 flags; +}; + #ifndef VIRTIO_BLK_NO_LEGACY struct virtio_scsi_inhdr { __virtio32 errors; diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index 449132c76b1c..1196e1c1d4f6 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -75,6 +75,9 @@ */ #define VIRTIO_F_IOMMU_PLATFORM 33 +/* This feature indicates support for the packed virtqueue layout. */ +#define VIRTIO_F_RING_PACKED 34 + /* * Does the device support Single Root I/O Virtualization? */ diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h index f43c3c6171ff..8e88eba1fa7a 100644 --- a/include/uapi/linux/virtio_gpu.h +++ b/include/uapi/linux/virtio_gpu.h @@ -41,6 +41,7 @@ #include <linux/types.h> #define VIRTIO_GPU_F_VIRGL 0 +#define VIRTIO_GPU_F_EDID 1 enum virtio_gpu_ctrl_type { VIRTIO_GPU_UNDEFINED = 0, @@ -56,6 +57,7 @@ enum virtio_gpu_ctrl_type { VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, VIRTIO_GPU_CMD_GET_CAPSET_INFO, VIRTIO_GPU_CMD_GET_CAPSET, + VIRTIO_GPU_CMD_GET_EDID, /* 3d commands */ VIRTIO_GPU_CMD_CTX_CREATE = 0x0200, @@ -76,6 +78,7 @@ enum virtio_gpu_ctrl_type { VIRTIO_GPU_RESP_OK_DISPLAY_INFO, VIRTIO_GPU_RESP_OK_CAPSET_INFO, VIRTIO_GPU_RESP_OK_CAPSET, + VIRTIO_GPU_RESP_OK_EDID, /* error responses */ VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200, @@ -291,6 +294,21 @@ struct virtio_gpu_resp_capset { __u8 capset_data[]; }; +/* VIRTIO_GPU_CMD_GET_EDID */ +struct virtio_gpu_cmd_get_edid { + struct virtio_gpu_ctrl_hdr hdr; + __le32 scanout; + __le32 padding; +}; + +/* VIRTIO_GPU_RESP_OK_EDID */ +struct virtio_gpu_resp_edid { + struct virtio_gpu_ctrl_hdr hdr; + __le32 size; + __le32 padding; + __u8 edid[1024]; +}; + #define VIRTIO_GPU_EVENT_DISPLAY (1 << 0) struct virtio_gpu_config { diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index 6d5d5faa989b..2414f8af26b3 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h @@ -44,6 +44,13 @@ /* This means the buffer contains a list of buffer descriptors. */ #define VRING_DESC_F_INDIRECT 4 +/* + * Mark a descriptor as available or used in packed ring. + * Notice: they are defined as shifts instead of shifted values. + */ +#define VRING_PACKED_DESC_F_AVAIL 7 +#define VRING_PACKED_DESC_F_USED 15 + /* The Host uses this in used->flags to advise the Guest: don't kick me when * you add a buffer. It's unreliable, so it's simply an optimization. Guest * will still kick if it's out of buffers. */ @@ -53,6 +60,23 @@ * optimization. */ #define VRING_AVAIL_F_NO_INTERRUPT 1 +/* Enable events in packed ring. */ +#define VRING_PACKED_EVENT_FLAG_ENABLE 0x0 +/* Disable events in packed ring. */ +#define VRING_PACKED_EVENT_FLAG_DISABLE 0x1 +/* + * Enable events for a specific descriptor in packed ring. + * (as specified by Descriptor Ring Change Event Offset/Wrap Counter). + * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated. + */ +#define VRING_PACKED_EVENT_FLAG_DESC 0x2 + +/* + * Wrap counter bit shift in event suppression structure + * of packed ring. + */ +#define VRING_PACKED_EVENT_F_WRAP_CTR 15 + /* We support indirect buffer descriptors */ #define VIRTIO_RING_F_INDIRECT_DESC 28 @@ -171,4 +195,32 @@ static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old) return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old); } +struct vring_packed_desc_event { + /* Descriptor Ring Change Event Offset/Wrap Counter. */ + __le16 off_wrap; + /* Descriptor Ring Change Event Flags. */ + __le16 flags; +}; + +struct vring_packed_desc { + /* Buffer Address. */ + __le64 addr; + /* Buffer Length. */ + __le32 len; + /* Buffer ID. */ + __le16 id; + /* The flags depending on descriptor type. */ + __le16 flags; +}; + +struct vring_packed { + unsigned int num; + + struct vring_packed_desc *desc; + + struct vring_packed_desc_event *driver; + + struct vring_packed_desc_event *device; +}; + #endif /* _UAPI_LINUX_VIRTIO_RING_H */ diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h index c6a984c0c881..01ac5853d9ac 100644 --- a/include/uapi/rdma/hfi/hfi1_user.h +++ b/include/uapi/rdma/hfi/hfi1_user.h @@ -6,7 +6,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015 - 2018 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -95,7 +95,7 @@ #define HFI1_CAP_SDMA_AHG (1UL << 2) /* Enable SDMA AHG support */ #define HFI1_CAP_EXTENDED_PSN (1UL << 3) /* Enable Extended PSN support */ #define HFI1_CAP_HDRSUPP (1UL << 4) /* Enable Header Suppression */ -/* 1UL << 5 unused */ +#define HFI1_CAP_TID_RDMA (1UL << 5) /* Enable TID RDMA operations */ #define HFI1_CAP_USE_SDMA_HEAD (1UL << 6) /* DMA Hdr Q tail vs. use CSR */ #define HFI1_CAP_MULTI_PKT_EGR (1UL << 7) /* Enable multi-packet Egr buffs*/ #define HFI1_CAP_NODROP_RHQ_FULL (1UL << 8) /* Don't drop on Hdr Q full */ @@ -106,7 +106,7 @@ #define HFI1_CAP_NO_INTEGRITY (1UL << 13) /* Enable ctxt integrity checks */ #define HFI1_CAP_PKEY_CHECK (1UL << 14) /* Enable ctxt PKey checking */ #define HFI1_CAP_STATIC_RATE_CTRL (1UL << 15) /* Allow PBC.StaticRateControl */ -/* 1UL << 16 unused */ +#define HFI1_CAP_OPFN (1UL << 16) /* Enable the OPFN protocol */ #define HFI1_CAP_SDMA_HEAD_CHECK (1UL << 17) /* SDMA head checking */ #define HFI1_CAP_EARLY_CREDIT_RETURN (1UL << 18) /* early credit return */ diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index c1f87735514f..ef3c7ec793a7 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -46,6 +46,12 @@ struct hns_roce_ib_create_cq_resp { __aligned_u64 cap_flags; }; +struct hns_roce_ib_create_srq { + __aligned_u64 buf_addr; + __aligned_u64 db_addr; + __aligned_u64 que_addr; +}; + struct hns_roce_ib_create_qp { __aligned_u64 buf_addr; __aligned_u64 db_addr; diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h index 2c881aaf05c2..64f0e3aacd3f 100644 --- a/include/uapi/rdma/ib_user_ioctl_cmds.h +++ b/include/uapi/rdma/ib_user_ioctl_cmds.h @@ -63,6 +63,23 @@ enum { UVERBS_ATTR_UHW_OUT, }; +enum uverbs_methods_device { + UVERBS_METHOD_INVOKE_WRITE, + UVERBS_METHOD_INFO_HANDLES, + UVERBS_METHOD_QUERY_PORT, +}; + +enum uverbs_attrs_invoke_write_cmd_attr_ids { + UVERBS_ATTR_CORE_IN, + UVERBS_ATTR_CORE_OUT, + UVERBS_ATTR_WRITE_CMD, +}; + +enum uverbs_attrs_query_port_cmd_attr_ids { + UVERBS_ATTR_QUERY_PORT_PORT_NUM, + UVERBS_ATTR_QUERY_PORT_RESP, +}; + enum uverbs_attrs_create_cq_cmd_attr_ids { UVERBS_ATTR_CREATE_CQ_HANDLE, UVERBS_ATTR_CREATE_CQ_CQE, @@ -135,6 +152,19 @@ enum uverbs_attrs_reg_dm_mr_cmd_attr_ids { enum uverbs_methods_mr { UVERBS_METHOD_DM_MR_REG, + UVERBS_METHOD_MR_DESTROY, + UVERBS_METHOD_ADVISE_MR, +}; + +enum uverbs_attrs_mr_destroy_ids { + UVERBS_ATTR_DESTROY_MR_HANDLE, +}; + +enum uverbs_attrs_advise_mr_cmd_attr_ids { + UVERBS_ATTR_ADVISE_MR_PD_HANDLE, + UVERBS_ATTR_ADVISE_MR_ADVICE, + UVERBS_ATTR_ADVISE_MR_FLAGS, + UVERBS_ATTR_ADVISE_MR_SGE_LIST, }; enum uverbs_attrs_create_counters_cmd_attr_ids { @@ -157,4 +187,58 @@ enum uverbs_methods_actions_counters_ops { UVERBS_METHOD_COUNTERS_READ, }; +enum uverbs_attrs_info_handles_id { + UVERBS_ATTR_INFO_OBJECT_ID, + UVERBS_ATTR_INFO_TOTAL_HANDLES, + UVERBS_ATTR_INFO_HANDLES_LIST, +}; + +enum uverbs_methods_pd { + UVERBS_METHOD_PD_DESTROY, +}; + +enum uverbs_attrs_pd_destroy_ids { + UVERBS_ATTR_DESTROY_PD_HANDLE, +}; + +enum uverbs_methods_mw { + UVERBS_METHOD_MW_DESTROY, +}; + +enum uverbs_attrs_mw_destroy_ids { + UVERBS_ATTR_DESTROY_MW_HANDLE, +}; + +enum uverbs_methods_xrcd { + UVERBS_METHOD_XRCD_DESTROY, +}; + +enum uverbs_attrs_xrcd_destroy_ids { + UVERBS_ATTR_DESTROY_XRCD_HANDLE, +}; + +enum uverbs_methods_ah { + UVERBS_METHOD_AH_DESTROY, +}; + +enum uverbs_attrs_ah_destroy_ids { + UVERBS_ATTR_DESTROY_AH_HANDLE, +}; + +enum uverbs_methods_rwq_ind_tbl { + UVERBS_METHOD_RWQ_IND_TBL_DESTROY, +}; + +enum uverbs_attrs_rwq_ind_tbl_destroy_ids { + UVERBS_ATTR_DESTROY_RWQ_IND_TBL_HANDLE, +}; + +enum uverbs_methods_flow { + UVERBS_METHOD_FLOW_DESTROY, +}; + +enum uverbs_attrs_flow_destroy_ids { + UVERBS_ATTR_DESTROY_FLOW_HANDLE, +}; + #endif diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index 6cdf192070a2..72c7fc75f960 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -35,6 +35,7 @@ #define IB_USER_IOCTL_VERBS_H #include <linux/types.h> +#include <rdma/ib_user_verbs.h> #ifndef RDMA_UAPI_PTR #define RDMA_UAPI_PTR(_type, _name) __aligned_u64 _name @@ -157,4 +158,19 @@ enum ib_uverbs_read_counters_flags { IB_UVERBS_READ_COUNTERS_PREFER_CACHED = 1 << 0, }; +enum ib_uverbs_advise_mr_advice { + IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH, + IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE, +}; + +enum ib_uverbs_advise_mr_flag { + IB_UVERBS_ADVISE_MR_FLAG_FLUSH = 1 << 0, +}; + +struct ib_uverbs_query_port_resp_ex { + struct ib_uverbs_query_port_resp legacy_resp; + __u16 port_cap_flags2; + __u8 reserved[6]; +}; + #endif diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 1254b51a551a..480d9a60b68e 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -46,7 +46,7 @@ #define IB_USER_VERBS_ABI_VERSION 6 #define IB_USER_VERBS_CMD_THRESHOLD 50 -enum { +enum ib_uverbs_write_cmds { IB_USER_VERBS_CMD_GET_CONTEXT, IB_USER_VERBS_CMD_QUERY_DEVICE, IB_USER_VERBS_CMD_QUERY_PORT, @@ -164,6 +164,7 @@ struct ib_uverbs_get_context { struct ib_uverbs_get_context_resp { __u32 async_fd; __u32 num_comp_vectors; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_device { @@ -310,6 +311,7 @@ struct ib_uverbs_alloc_pd { struct ib_uverbs_alloc_pd_resp { __u32 pd_handle; + __u32 driver_data[0]; }; struct ib_uverbs_dealloc_pd { @@ -325,6 +327,7 @@ struct ib_uverbs_open_xrcd { struct ib_uverbs_open_xrcd_resp { __u32 xrcd_handle; + __u32 driver_data[0]; }; struct ib_uverbs_close_xrcd { @@ -345,6 +348,7 @@ struct ib_uverbs_reg_mr_resp { __u32 mr_handle; __u32 lkey; __u32 rkey; + __u32 driver_data[0]; }; struct ib_uverbs_rereg_mr { @@ -356,11 +360,13 @@ struct ib_uverbs_rereg_mr { __aligned_u64 hca_va; __u32 pd_handle; __u32 access_flags; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_rereg_mr_resp { __u32 lkey; __u32 rkey; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_dereg_mr { @@ -372,11 +378,13 @@ struct ib_uverbs_alloc_mw { __u32 pd_handle; __u8 mw_type; __u8 reserved[3]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_alloc_mw_resp { __u32 mw_handle; __u32 rkey; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_dealloc_mw { @@ -419,6 +427,7 @@ struct ib_uverbs_ex_create_cq { struct ib_uverbs_create_cq_resp { __u32 cq_handle; __u32 cqe; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_ex_create_cq_resp { @@ -629,6 +638,7 @@ struct ib_uverbs_create_qp_resp { __u32 max_recv_sge; __u32 max_inline_data; __u32 reserved; + __u32 driver_data[0]; }; struct ib_uverbs_ex_create_qp_resp { @@ -733,9 +743,6 @@ struct ib_uverbs_ex_modify_qp { __u32 reserved; }; -struct ib_uverbs_modify_qp_resp { -}; - struct ib_uverbs_ex_modify_qp_resp { __u32 comp_mask; __u32 response_length; @@ -863,10 +870,12 @@ struct ib_uverbs_create_ah { __u32 pd_handle; __u32 reserved; struct ib_uverbs_ah_attr attr; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_create_ah_resp { __u32 ah_handle; + __u32 driver_data[0]; }; struct ib_uverbs_destroy_ah { @@ -1175,6 +1184,7 @@ struct ib_uverbs_create_srq_resp { __u32 max_wr; __u32 max_sge; __u32 srqn; + __u32 driver_data[0]; }; struct ib_uverbs_modify_srq { diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index 8fa9f90e2bb1..87b3198f4b5d 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -48,6 +48,7 @@ enum { MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6, MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7, MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8, + MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9, }; enum { @@ -236,6 +237,7 @@ enum mlx5_ib_query_dev_resp_flags { /* Support 128B CQE compression */ MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0, MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1, + MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2, }; enum mlx5_ib_tunnel_offloads { diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h index 408e220034de..b8d121d457f1 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h +++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h @@ -158,6 +158,7 @@ enum mlx5_ib_create_flow_attrs { MLX5_IB_ATTR_CREATE_FLOW_MATCHER, MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, MLX5_IB_ATTR_CREATE_FLOW_TAG, + MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, }; enum mlx5_ib_destoy_flow_attrs { diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index f9c41bf59efc..2e18b77a817f 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -283,6 +283,9 @@ enum rdma_nldev_attr { /* * Device and port capabilities + * + * When used for port info, first 32-bits are CapabilityMask followed by + * 16-bit CapabilityMask2. */ RDMA_NLDEV_ATTR_CAP_FLAGS, /* u64 */ diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h index f0a547d86679..ae12826ed641 100644 --- a/include/uapi/sound/firewire.h +++ b/include/uapi/sound/firewire.h @@ -12,6 +12,7 @@ #define SNDRV_FIREWIRE_EVENT_EFW_RESPONSE 0x4e617475 #define SNDRV_FIREWIRE_EVENT_DIGI00X_MESSAGE 0x746e736c #define SNDRV_FIREWIRE_EVENT_MOTU_NOTIFICATION 0x64776479 +#define SNDRV_FIREWIRE_EVENT_TASCAM_CONTROL 0x7473636d struct snd_firewire_event_common { unsigned int type; /* SNDRV_FIREWIRE_EVENT_xxx */ @@ -53,12 +54,24 @@ struct snd_firewire_event_motu_notification { __u32 message; /* MOTU-specific bits. */ }; +struct snd_firewire_tascam_change { + unsigned int index; + __be32 before; + __be32 after; +}; + +struct snd_firewire_event_tascam_control { + unsigned int type; + struct snd_firewire_tascam_change changes[0]; +}; + union snd_firewire_event { struct snd_firewire_event_common common; struct snd_firewire_event_lock_status lock_status; struct snd_firewire_event_dice_notification dice_notification; struct snd_firewire_event_efw_response efw_response; struct snd_firewire_event_digi00x_message digi00x_message; + struct snd_firewire_event_tascam_control tascam_control; struct snd_firewire_event_motu_notification motu_notification; }; @@ -66,6 +79,7 @@ union snd_firewire_event { #define SNDRV_FIREWIRE_IOCTL_GET_INFO _IOR('H', 0xf8, struct snd_firewire_get_info) #define SNDRV_FIREWIRE_IOCTL_LOCK _IO('H', 0xf9) #define SNDRV_FIREWIRE_IOCTL_UNLOCK _IO('H', 0xfa) +#define SNDRV_FIREWIRE_IOCTL_TASCAM_STATE _IOR('H', 0xfb, struct snd_firewire_tascam_state) #define SNDRV_FIREWIRE_TYPE_DICE 1 #define SNDRV_FIREWIRE_TYPE_FIREWORKS 2 @@ -88,4 +102,10 @@ struct snd_firewire_get_info { * Returns -EBUSY if the driver is already streaming. */ +#define SNDRV_FIREWIRE_TASCAM_STATE_COUNT 64 + +struct snd_firewire_tascam_state { + __be32 data[SNDRV_FIREWIRE_TASCAM_STATE_COUNT]; +}; + #endif /* _UAPI_SOUND_FIREWIRE_H_INCLUDED */ diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index abbad94e14a1..e582e8e7527a 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -246,6 +246,9 @@ struct ipu_image { struct v4l2_rect rect; dma_addr_t phys0; dma_addr_t phys1; + /* chroma plane offset overrides */ + u32 u_offset; + u32 v_offset; }; void ipu_cpmem_zero(struct ipuv3_channel *ch); @@ -387,6 +390,12 @@ int ipu_ic_task_init(struct ipu_ic *ic, int out_width, int out_height, enum ipu_color_space in_cs, enum ipu_color_space out_cs); +int ipu_ic_task_init_rsc(struct ipu_ic *ic, + int in_width, int in_height, + int out_width, int out_height, + enum ipu_color_space in_cs, + enum ipu_color_space out_cs, + u32 rsc); int ipu_ic_task_graphics_init(struct ipu_ic *ic, enum ipu_color_space in_g_cs, bool galpha_en, u32 galpha, diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h index d8fc96ed11e9..4ba5efe8d086 100644 --- a/include/video/samsung_fimd.h +++ b/include/video/samsung_fimd.h @@ -198,6 +198,7 @@ #define WINCONx_BURSTLEN_8WORD (0x1 << 9) #define WINCONx_BURSTLEN_4WORD (0x2 << 9) #define WINCONx_ENWIN (1 << 0) +#define WINCONx_BLEND_MODE_MASK (0xc2) #define WINCON0_BPPMODE_MASK (0xf << 2) #define WINCON0_BPPMODE_SHIFT 2 @@ -211,6 +212,7 @@ #define WINCON0_BPPMODE_24BPP_888 (0xb << 2) #define WINCON1_LOCALSEL_CAMIF (1 << 23) +#define WINCON1_ALPHA_MUL (1 << 7) #define WINCON1_BLD_PIX (1 << 6) #define WINCON1_BPPMODE_MASK (0xf << 2) #define WINCON1_BPPMODE_SHIFT 2 @@ -437,6 +439,14 @@ #define WPALCON_W0PAL_16BPP_565 (0x6 << 0) /* Blending equation control */ +#define BLENDEQx(_win) (0x244 + ((_win - 1) * 4)) +#define BLENDEQ_ZERO 0x0 +#define BLENDEQ_ONE 0x1 +#define BLENDEQ_ALPHA_A 0x2 +#define BLENDEQ_ONE_MINUS_ALPHA_A 0x3 +#define BLENDEQ_ALPHA0 0x6 +#define BLENDEQ_B_FUNC_F(_x) (_x << 6) +#define BLENDEQ_A_FUNC_F(_x) (_x << 0) #define BLENDCON 0x260 #define BLENDCON_NEW_MASK (1 << 0) #define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0) diff --git a/include/xen/balloon.h b/include/xen/balloon.h index 61f410fd74e4..4914b93a23f2 100644 --- a/include/xen/balloon.h +++ b/include/xen/balloon.h @@ -44,8 +44,3 @@ static inline void xen_balloon_init(void) { } #endif - -#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG -struct resource; -void arch_xen_balloon_init(struct resource *hostmem_resource); -#endif diff --git a/include/xen/interface/hvm/start_info.h b/include/xen/interface/hvm/start_info.h index 648415976ead..50af9ea2ff1e 100644 --- a/include/xen/interface/hvm/start_info.h +++ b/include/xen/interface/hvm/start_info.h @@ -33,7 +33,7 @@ * | magic | Contains the magic value XEN_HVM_START_MAGIC_VALUE * | | ("xEn3" with the 0x80 bit of the "E" set). * 4 +----------------+ - * | version | Version of this structure. Current version is 0. New + * | version | Version of this structure. Current version is 1. New * | | versions are guaranteed to be backwards-compatible. * 8 +----------------+ * | flags | SIF_xxx flags. @@ -48,6 +48,15 @@ * 32 +----------------+ * | rsdp_paddr | Physical address of the RSDP ACPI data structure. * 40 +----------------+ + * | memmap_paddr | Physical address of the (optional) memory map. Only + * | | present in version 1 and newer of the structure. + * 48 +----------------+ + * | memmap_entries | Number of entries in the memory map table. Zero + * | | if there is no memory map being provided. Only + * | | present in version 1 and newer of the structure. + * 52 +----------------+ + * | reserved | Version 1 and newer only. + * 56 +----------------+ * * The layout of each entry in the module structure is the following: * @@ -62,14 +71,52 @@ * | reserved | * 32 +----------------+ * + * The layout of each entry in the memory map table is as follows: + * + * 0 +----------------+ + * | addr | Base address + * 8 +----------------+ + * | size | Size of mapping in bytes + * 16 +----------------+ + * | type | Type of mapping as defined between the hypervisor + * | | and guest. See XEN_HVM_MEMMAP_TYPE_* values below. + * 20 +----------------| + * | reserved | + * 24 +----------------+ + * * The address and sizes are always a 64bit little endian unsigned integer. * * NB: Xen on x86 will always try to place all the data below the 4GiB * boundary. + * + * Version numbers of the hvm_start_info structure have evolved like this: + * + * Version 0: Initial implementation. + * + * Version 1: Added the memmap_paddr/memmap_entries fields (plus 4 bytes of + * padding) to the end of the hvm_start_info struct. These new + * fields can be used to pass a memory map to the guest. The + * memory map is optional and so guests that understand version 1 + * of the structure must check that memmap_entries is non-zero + * before trying to read the memory map. */ #define XEN_HVM_START_MAGIC_VALUE 0x336ec578 /* + * The values used in the type field of the memory map table entries are + * defined below and match the Address Range Types as defined in the "System + * Address Map Interfaces" section of the ACPI Specification. Please refer to + * section 15 in version 6.2 of the ACPI spec: http://uefi.org/specifications + */ +#define XEN_HVM_MEMMAP_TYPE_RAM 1 +#define XEN_HVM_MEMMAP_TYPE_RESERVED 2 +#define XEN_HVM_MEMMAP_TYPE_ACPI 3 +#define XEN_HVM_MEMMAP_TYPE_NVS 4 +#define XEN_HVM_MEMMAP_TYPE_UNUSABLE 5 +#define XEN_HVM_MEMMAP_TYPE_DISABLED 6 +#define XEN_HVM_MEMMAP_TYPE_PMEM 7 + +/* * C representation of the x86/HVM start info layout. * * The canonical definition of this layout is above, this is just a way to @@ -86,6 +133,13 @@ struct hvm_start_info { uint64_t cmdline_paddr; /* Physical address of the command line. */ uint64_t rsdp_paddr; /* Physical address of the RSDP ACPI data */ /* structure. */ + /* All following fields only present in version 1 and newer */ + uint64_t memmap_paddr; /* Physical address of an array of */ + /* hvm_memmap_table_entry. */ + uint32_t memmap_entries; /* Number of entries in the memmap table. */ + /* Value will be zero if there is no memory */ + /* map being provided. */ + uint32_t reserved; /* Must be zero. */ }; struct hvm_modlist_entry { @@ -95,4 +149,11 @@ struct hvm_modlist_entry { uint64_t reserved; }; +struct hvm_memmap_table_entry { + uint64_t addr; /* Base address of the memory region */ + uint64_t size; /* Size of the memory region in bytes */ + uint32_t type; /* Mapping type */ + uint32_t reserved; /* Must be zero for Version 1. */ +}; + #endif /* __XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H__ */ diff --git a/include/xen/xen-front-pgdir-shbuf.h b/include/xen/xen-front-pgdir-shbuf.h new file mode 100644 index 000000000000..150ef7ec51ec --- /dev/null +++ b/include/xen/xen-front-pgdir-shbuf.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ + +/* + * Xen frontend/backend page directory based shared buffer + * helper module. + * + * Copyright (C) 2018 EPAM Systems Inc. + * + * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> + */ + +#ifndef __XEN_FRONT_PGDIR_SHBUF_H_ +#define __XEN_FRONT_PGDIR_SHBUF_H_ + +#include <linux/kernel.h> + +#include <xen/grant_table.h> + +struct xen_front_pgdir_shbuf_ops; + +struct xen_front_pgdir_shbuf { + /* + * Number of references granted for the backend use: + * + * - for frontend allocated/imported buffers this holds the number + * of grant references for the page directory and the pages + * of the buffer + * + * - for the buffer provided by the backend this only holds the number + * of grant references for the page directory itself as grant + * references for the buffer will be provided by the backend. + */ + int num_grefs; + grant_ref_t *grefs; + /* Page directory backing storage. */ + u8 *directory; + + /* + * Number of pages for the shared buffer itself (excluding the page + * directory). + */ + int num_pages; + /* + * Backing storage of the shared buffer: these are the pages being + * shared. + */ + struct page **pages; + + struct xenbus_device *xb_dev; + + /* These are the ops used internally depending on be_alloc mode. */ + const struct xen_front_pgdir_shbuf_ops *ops; + + /* Xen map handles for the buffer allocated by the backend. */ + grant_handle_t *backend_map_handles; +}; + +struct xen_front_pgdir_shbuf_cfg { + struct xenbus_device *xb_dev; + + /* Number of pages of the buffer backing storage. */ + int num_pages; + /* Pages of the buffer to be shared. */ + struct page **pages; + + /* + * This is allocated outside because there are use-cases when + * the buffer structure is allocated as a part of a bigger one. + */ + struct xen_front_pgdir_shbuf *pgdir; + /* + * Mode of grant reference sharing: if set then backend will share + * grant references to the buffer with the frontend. + */ + int be_alloc; +}; + +int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg); + +grant_ref_t +xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf); + +int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf); + +int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf); + +void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf); + +#endif /* __XEN_FRONT_PGDIR_SHBUF_H_ */ diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 18803ff76e27..4969817124a8 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -42,16 +42,12 @@ int xen_setup_shutdown_event(void); extern unsigned long *xen_contiguous_bitmap; -#ifdef CONFIG_XEN_PV +#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, unsigned int address_bits, dma_addr_t *dma_handle); void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); - -int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, - xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot, - unsigned int domid, bool no_translate, struct page **pages); #else static inline int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, @@ -63,7 +59,13 @@ static inline int xen_create_contiguous_region(phys_addr_t pstart, static inline void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) { } +#endif +#if defined(CONFIG_XEN_PV) +int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, + xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot, + unsigned int domid, bool no_translate, struct page **pages); +#else static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot, unsigned int domid, diff --git a/include/xen/xen.h b/include/xen/xen.h index d7a2678da77f..0e2156786ad2 100644 --- a/include/xen/xen.h +++ b/include/xen/xen.h @@ -29,6 +29,9 @@ extern bool xen_pvh; extern uint32_t xen_start_flags; +#include <xen/interface/hvm/start_info.h> +extern struct hvm_start_info pvh_start_info; + #ifdef CONFIG_XEN_DOM0 #include <xen/interface/xen.h> #include <asm/xen/hypervisor.h> |