diff options
Diffstat (limited to 'include')
450 files changed, 13187 insertions, 2964 deletions
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h index 88cb477524a6..d5ec6c87810f 100644 --- a/include/acpi/acbuffer.h +++ b/include/acpi/acbuffer.h @@ -111,7 +111,9 @@ struct acpi_gtm_info { struct acpi_pld_info { u8 revision; u8 ignore_color; - u32 color; + u8 red; + u8 green; + u8 blue; u16 width; u16 height; u8 user_visible; @@ -155,8 +157,14 @@ struct acpi_pld_info { #define ACPI_PLD_GET_IGNORE_COLOR(dword) ACPI_GET_BITS (dword, 7, ACPI_1BIT_MASK) #define ACPI_PLD_SET_IGNORE_COLOR(dword,value) ACPI_SET_BITS (dword, 7, ACPI_1BIT_MASK, value) /* Offset 7, Len 1 */ -#define ACPI_PLD_GET_COLOR(dword) ACPI_GET_BITS (dword, 8, ACPI_24BIT_MASK) -#define ACPI_PLD_SET_COLOR(dword,value) ACPI_SET_BITS (dword, 8, ACPI_24BIT_MASK, value) /* Offset 8, Len 24 */ +#define ACPI_PLD_GET_RED(dword) ACPI_GET_BITS (dword, 8, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_RED(dword,value) ACPI_SET_BITS (dword, 8, ACPI_8BIT_MASK, value) /* Offset 8, Len 8 */ + +#define ACPI_PLD_GET_GREEN(dword) ACPI_GET_BITS (dword, 16, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_GREEN(dword,value) ACPI_SET_BITS (dword, 16, ACPI_8BIT_MASK, value) /* Offset 16, Len 8 */ + +#define ACPI_PLD_GET_BLUE(dword) ACPI_GET_BITS (dword, 24, ACPI_8BIT_MASK) +#define ACPI_PLD_SET_BLUE(dword,value) ACPI_SET_BITS (dword, 24, ACPI_8BIT_MASK, value) /* Offset 24, Len 8 */ /* Second 32-bit dword, bits 33:63 */ diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index f34a0835aa4f..7581518e3eff 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -27,6 +27,7 @@ #define __ACPI_BUS_H__ #include <linux/device.h> +#include <linux/property.h> /* TBD: Make dynamic */ #define ACPI_MAX_HANDLES 10 @@ -337,10 +338,20 @@ struct acpi_device_physical_node { bool put_online:1; }; +/* ACPI Device Specific Data (_DSD) */ +struct acpi_device_data { + const union acpi_object *pointer; + const union acpi_object *properties; + const union acpi_object *of_compatible; +}; + +struct acpi_gpio_mapping; + /* Device */ struct acpi_device { int device_type; acpi_handle handle; /* no handle for fixed hardware */ + struct fwnode_handle fwnode; struct acpi_device *parent; struct list_head children; struct list_head node; @@ -353,17 +364,35 @@ struct acpi_device { struct acpi_device_wakeup wakeup; struct acpi_device_perf performance; struct acpi_device_dir dir; + struct acpi_device_data data; struct acpi_scan_handler *handler; struct acpi_hotplug_context *hp; struct acpi_driver *driver; + const struct acpi_gpio_mapping *driver_gpios; void *driver_data; struct device dev; unsigned int physical_node_count; + unsigned int dep_unmet; struct list_head physical_node_list; struct mutex physical_node_lock; void (*remove)(struct acpi_device *); }; +static inline bool is_acpi_node(struct fwnode_handle *fwnode) +{ + return fwnode && fwnode->type == FWNODE_ACPI; +} + +static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode) +{ + return fwnode ? container_of(fwnode, struct acpi_device, fwnode) : NULL; +} + +static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) +{ + return &adev->fwnode; +} + static inline void *acpi_driver_data(struct acpi_device *d) { return d->driver_data; @@ -516,6 +545,7 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, void (*work_func)(struct work_struct *work)); acpi_status acpi_remove_pm_notifier(struct acpi_device *adev); int acpi_pm_device_sleep_state(struct device *, int *, int); +int acpi_pm_device_run_wake(struct device *, bool); #else static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev, @@ -535,11 +565,6 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m) return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ? m : ACPI_STATE_D0; } -#endif - -#ifdef CONFIG_PM_RUNTIME -int acpi_pm_device_run_wake(struct device *, bool); -#else static inline int acpi_pm_device_run_wake(struct device *dev, bool enable) { return -ENODEV; diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index ab2acf629a64..5ba78464c1b1 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -46,7 +46,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20140926 +#define ACPI_CA_VERSION 0x20141107 #include <acpi/acconfig.h> #include <acpi/actypes.h> diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 7000e66f768e..bbef17368e49 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -736,6 +736,10 @@ typedef u32 acpi_event_status; #define ACPI_GPE_ENABLE 0 #define ACPI_GPE_DISABLE 1 #define ACPI_GPE_CONDITIONAL_ENABLE 2 +#define ACPI_GPE_SAVE_MASK 4 + +#define ACPI_GPE_ENABLE_SAVE (ACPI_GPE_ENABLE | ACPI_GPE_SAVE_MASK) +#define ACPI_GPE_DISABLE_SAVE (ACPI_GPE_DISABLE | ACPI_GPE_SAVE_MASK) /* * GPE info flags - Per GPE diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 9b9b6f29bbf3..3ca9b751f122 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -67,9 +67,6 @@ struct acpi_processor_cx { }; struct acpi_processor_power { - struct acpi_processor_cx *state; - unsigned long bm_check_timestamp; - u32 default_state; int count; struct acpi_processor_cx states[ACPI_PROCESSOR_MAX_POWER]; int timer_broadcast_on_state; @@ -313,11 +310,13 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) #endif /* CONFIG_CPU_FREQ */ /* in processor_core.c */ -void acpi_processor_set_pdc(acpi_handle handle); int acpi_get_apicid(acpi_handle, int type, u32 acpi_id); int acpi_map_cpuid(int apic_id, u32 acpi_id); int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); +/* in processor_pdc.c */ +void acpi_processor_set_pdc(acpi_handle handle); + /* in processor_throttling.c */ int acpi_processor_tstate_has_changed(struct acpi_processor *pr); int acpi_processor_get_throttling_info(struct acpi_processor *pr); diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 1402fa855388..f5c40b0fadc2 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -42,6 +42,14 @@ #define wmb() mb() #endif +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif + +#ifndef dma_wmb +#define dma_wmb() wmb() +#endif + #ifndef read_barrier_depends #define read_barrier_depends() do { } while (0) #endif diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index 01f227e14254..b59b5a52637e 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h @@ -5,6 +5,119 @@ #include <linux/uaccess.h> #include <asm/errno.h> +#ifndef CONFIG_SMP +/* + * The following implementation only for uniprocessor machines. + * For UP, it's relies on the fact that pagefault_disable() also disables + * preemption to ensure mutual exclusion. + * + */ + +/** + * futex_atomic_op_inuser() - Atomic arithmetic operation with constant + * argument and comparison of the previous + * futex value with another constant. + * + * @encoded_op: encoded operation to execute + * @uaddr: pointer to user space address + * + * Return: + * 0 - On success + * <0 - On error + */ +static inline int +futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval, ret; + u32 tmp; + + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + pagefault_disable(); + + ret = -EFAULT; + if (unlikely(get_user(oldval, uaddr) != 0)) + goto out_pagefault_enable; + + ret = 0; + tmp = oldval; + + switch (op) { + case FUTEX_OP_SET: + tmp = oparg; + break; + case FUTEX_OP_ADD: + tmp += oparg; + break; + case FUTEX_OP_OR: + tmp |= oparg; + break; + case FUTEX_OP_ANDN: + tmp &= ~oparg; + break; + case FUTEX_OP_XOR: + tmp ^= oparg; + break; + default: + ret = -ENOSYS; + } + + if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0)) + ret = -EFAULT; + +out_pagefault_enable: + pagefault_enable(); + + if (ret == 0) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +/** + * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the + * uaddr with newval if the current value is + * oldval. + * @uval: pointer to store content of @uaddr + * @uaddr: pointer to user space address + * @oldval: old value + * @newval: new value to store to @uaddr + * + * Return: + * 0 - On success + * <0 - On error + */ +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + u32 val; + + if (unlikely(get_user(val, uaddr) != 0)) + return -EFAULT; + + if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) + return -EFAULT; + + *uval = val; + + return 0; +} + +#else static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) { @@ -54,4 +167,5 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -ENOSYS; } +#endif /* CONFIG_SMP */ #endif diff --git a/include/asm-generic/hash.h b/include/asm-generic/hash.h deleted file mode 100644 index b6312843dbd9..000000000000 --- a/include/asm-generic/hash.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ASM_GENERIC_HASH_H -#define __ASM_GENERIC_HASH_H - -struct fast_hash_ops; -static inline void setup_arch_fast_hash(struct fast_hash_ops *ops) -{ -} - -#endif /* __ASM_GENERIC_HASH_H */ diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index b8fdc57a7335..9db042304df3 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -12,6 +12,7 @@ #define __ASM_GENERIC_IO_H #include <asm/page.h> /* I/O is all done through memory accesses */ +#include <linux/string.h> /* for memset() and memcpy() */ #include <linux/types.h> #ifdef CONFIG_GENERIC_IOMAP @@ -24,260 +25,691 @@ #define mmiowb() do {} while (0) #endif -/*****************************************************************************/ /* - * readX/writeX() are used to access memory mapped devices. On some - * architectures the memory mapped IO stuff needs to be accessed - * differently. On the simple architectures, we just read/write the - * memory location directly. + * __raw_{read,write}{b,w,l,q}() access memory in native endianness. + * + * On some architectures memory mapped IO needs to be accessed differently. + * On the simple architectures, we just read/write the memory location + * directly. */ + #ifndef __raw_readb +#define __raw_readb __raw_readb static inline u8 __raw_readb(const volatile void __iomem *addr) { - return *(const volatile u8 __force *) addr; + return *(const volatile u8 __force *)addr; } #endif #ifndef __raw_readw +#define __raw_readw __raw_readw static inline u16 __raw_readw(const volatile void __iomem *addr) { - return *(const volatile u16 __force *) addr; + return *(const volatile u16 __force *)addr; } #endif #ifndef __raw_readl +#define __raw_readl __raw_readl static inline u32 __raw_readl(const volatile void __iomem *addr) { - return *(const volatile u32 __force *) addr; + return *(const volatile u32 __force *)addr; } #endif -#define readb __raw_readb - -#define readw readw -static inline u16 readw(const volatile void __iomem *addr) -{ - return __le16_to_cpu(__raw_readw(addr)); -} - -#define readl readl -static inline u32 readl(const volatile void __iomem *addr) +#ifdef CONFIG_64BIT +#ifndef __raw_readq +#define __raw_readq __raw_readq +static inline u64 __raw_readq(const volatile void __iomem *addr) { - return __le32_to_cpu(__raw_readl(addr)); + return *(const volatile u64 __force *)addr; } +#endif +#endif /* CONFIG_64BIT */ #ifndef __raw_writeb -static inline void __raw_writeb(u8 b, volatile void __iomem *addr) +#define __raw_writeb __raw_writeb +static inline void __raw_writeb(u8 value, volatile void __iomem *addr) { - *(volatile u8 __force *) addr = b; + *(volatile u8 __force *)addr = value; } #endif #ifndef __raw_writew -static inline void __raw_writew(u16 b, volatile void __iomem *addr) +#define __raw_writew __raw_writew +static inline void __raw_writew(u16 value, volatile void __iomem *addr) { - *(volatile u16 __force *) addr = b; + *(volatile u16 __force *)addr = value; } #endif #ifndef __raw_writel -static inline void __raw_writel(u32 b, volatile void __iomem *addr) +#define __raw_writel __raw_writel +static inline void __raw_writel(u32 value, volatile void __iomem *addr) { - *(volatile u32 __force *) addr = b; + *(volatile u32 __force *)addr = value; } #endif -#define writeb __raw_writeb -#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr) -#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr) - #ifdef CONFIG_64BIT -#ifndef __raw_readq -static inline u64 __raw_readq(const volatile void __iomem *addr) +#ifndef __raw_writeq +#define __raw_writeq __raw_writeq +static inline void __raw_writeq(u64 value, volatile void __iomem *addr) { - return *(const volatile u64 __force *) addr; + *(volatile u64 __force *)addr = value; } #endif +#endif /* CONFIG_64BIT */ -#define readq readq -static inline u64 readq(const volatile void __iomem *addr) -{ - return __le64_to_cpu(__raw_readq(addr)); -} +/* + * {read,write}{b,w,l,q}() access little endian memory and return result in + * native endianness. + */ -#ifndef __raw_writeq -static inline void __raw_writeq(u64 b, volatile void __iomem *addr) +#ifndef readb +#define readb readb +static inline u8 readb(const volatile void __iomem *addr) { - *(volatile u64 __force *) addr = b; + return __raw_readb(addr); } #endif -#define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr) -#endif /* CONFIG_64BIT */ - -#ifndef PCI_IOBASE -#define PCI_IOBASE ((void __iomem *) 0) +#ifndef readw +#define readw readw +static inline u16 readw(const volatile void __iomem *addr) +{ + return __le16_to_cpu(__raw_readw(addr)); +} #endif -/*****************************************************************************/ -/* - * traditional input/output functions - */ - -static inline u8 inb(unsigned long addr) +#ifndef readl +#define readl readl +static inline u32 readl(const volatile void __iomem *addr) { - return readb(addr + PCI_IOBASE); + return __le32_to_cpu(__raw_readl(addr)); } +#endif -static inline u16 inw(unsigned long addr) +#ifdef CONFIG_64BIT +#ifndef readq +#define readq readq +static inline u64 readq(const volatile void __iomem *addr) { - return readw(addr + PCI_IOBASE); + return __le64_to_cpu(__raw_readq(addr)); } +#endif +#endif /* CONFIG_64BIT */ -static inline u32 inl(unsigned long addr) +#ifndef writeb +#define writeb writeb +static inline void writeb(u8 value, volatile void __iomem *addr) { - return readl(addr + PCI_IOBASE); + __raw_writeb(value, addr); } +#endif -static inline void outb(u8 b, unsigned long addr) +#ifndef writew +#define writew writew +static inline void writew(u16 value, volatile void __iomem *addr) { - writeb(b, addr + PCI_IOBASE); + __raw_writew(cpu_to_le16(value), addr); } +#endif -static inline void outw(u16 b, unsigned long addr) +#ifndef writel +#define writel writel +static inline void writel(u32 value, volatile void __iomem *addr) { - writew(b, addr + PCI_IOBASE); + __raw_writel(__cpu_to_le32(value), addr); } +#endif -static inline void outl(u32 b, unsigned long addr) +#ifdef CONFIG_64BIT +#ifndef writeq +#define writeq writeq +static inline void writeq(u64 value, volatile void __iomem *addr) { - writel(b, addr + PCI_IOBASE); + __raw_writeq(__cpu_to_le64(value), addr); } +#endif +#endif /* CONFIG_64BIT */ + +/* + * {read,write}{b,w,l,q}_relaxed() are like the regular version, but + * are not guaranteed to provide ordering against spinlocks or memory + * accesses. + */ +#ifndef readb_relaxed +#define readb_relaxed readb +#endif -#define inb_p(addr) inb(addr) -#define inw_p(addr) inw(addr) -#define inl_p(addr) inl(addr) -#define outb_p(x, addr) outb((x), (addr)) -#define outw_p(x, addr) outw((x), (addr)) -#define outl_p(x, addr) outl((x), (addr)) +#ifndef readw_relaxed +#define readw_relaxed readw +#endif -#ifndef insb -static inline void insb(unsigned long addr, void *buffer, int count) +#ifndef readl_relaxed +#define readl_relaxed readl +#endif + +#ifndef readq_relaxed +#define readq_relaxed readq +#endif + +#ifndef writeb_relaxed +#define writeb_relaxed writeb +#endif + +#ifndef writew_relaxed +#define writew_relaxed writew +#endif + +#ifndef writel_relaxed +#define writel_relaxed writel +#endif + +#ifndef writeq_relaxed +#define writeq_relaxed writeq +#endif + +/* + * {read,write}s{b,w,l,q}() repeatedly access the same memory address in + * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). + */ +#ifndef readsb +#define readsb readsb +static inline void readsb(const volatile void __iomem *addr, void *buffer, + unsigned int count) { if (count) { u8 *buf = buffer; + do { - u8 x = __raw_readb(addr + PCI_IOBASE); + u8 x = __raw_readb(addr); *buf++ = x; } while (--count); } } #endif -#ifndef insw -static inline void insw(unsigned long addr, void *buffer, int count) +#ifndef readsw +#define readsw readsw +static inline void readsw(const volatile void __iomem *addr, void *buffer, + unsigned int count) { if (count) { u16 *buf = buffer; + do { - u16 x = __raw_readw(addr + PCI_IOBASE); + u16 x = __raw_readw(addr); *buf++ = x; } while (--count); } } #endif -#ifndef insl -static inline void insl(unsigned long addr, void *buffer, int count) +#ifndef readsl +#define readsl readsl +static inline void readsl(const volatile void __iomem *addr, void *buffer, + unsigned int count) { if (count) { u32 *buf = buffer; + do { - u32 x = __raw_readl(addr + PCI_IOBASE); + u32 x = __raw_readl(addr); *buf++ = x; } while (--count); } } #endif -#ifndef outsb -static inline void outsb(unsigned long addr, const void *buffer, int count) +#ifdef CONFIG_64BIT +#ifndef readsq +#define readsq readsq +static inline void readsq(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + if (count) { + u64 *buf = buffer; + + do { + u64 x = __raw_readq(addr); + *buf++ = x; + } while (--count); + } +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef writesb +#define writesb writesb +static inline void writesb(volatile void __iomem *addr, const void *buffer, + unsigned int count) { if (count) { const u8 *buf = buffer; + do { - __raw_writeb(*buf++, addr + PCI_IOBASE); + __raw_writeb(*buf++, addr); } while (--count); } } #endif -#ifndef outsw -static inline void outsw(unsigned long addr, const void *buffer, int count) +#ifndef writesw +#define writesw writesw +static inline void writesw(volatile void __iomem *addr, const void *buffer, + unsigned int count) { if (count) { const u16 *buf = buffer; + do { - __raw_writew(*buf++, addr + PCI_IOBASE); + __raw_writew(*buf++, addr); } while (--count); } } #endif -#ifndef outsl -static inline void outsl(unsigned long addr, const void *buffer, int count) +#ifndef writesl +#define writesl writesl +static inline void writesl(volatile void __iomem *addr, const void *buffer, + unsigned int count) { if (count) { const u32 *buf = buffer; + do { - __raw_writel(*buf++, addr + PCI_IOBASE); + __raw_writel(*buf++, addr); } while (--count); } } #endif -#ifndef CONFIG_GENERIC_IOMAP -#define ioread8(addr) readb(addr) -#define ioread16(addr) readw(addr) -#define ioread16be(addr) __be16_to_cpu(__raw_readw(addr)) -#define ioread32(addr) readl(addr) -#define ioread32be(addr) __be32_to_cpu(__raw_readl(addr)) - -#define iowrite8(v, addr) writeb((v), (addr)) -#define iowrite16(v, addr) writew((v), (addr)) -#define iowrite16be(v, addr) __raw_writew(__cpu_to_be16(v), addr) -#define iowrite32(v, addr) writel((v), (addr)) -#define iowrite32be(v, addr) __raw_writel(__cpu_to_be32(v), addr) - -#define ioread8_rep(p, dst, count) \ - insb((unsigned long) (p), (dst), (count)) -#define ioread16_rep(p, dst, count) \ - insw((unsigned long) (p), (dst), (count)) -#define ioread32_rep(p, dst, count) \ - insl((unsigned long) (p), (dst), (count)) - -#define iowrite8_rep(p, src, count) \ - outsb((unsigned long) (p), (src), (count)) -#define iowrite16_rep(p, src, count) \ - outsw((unsigned long) (p), (src), (count)) -#define iowrite32_rep(p, src, count) \ - outsl((unsigned long) (p), (src), (count)) -#endif /* CONFIG_GENERIC_IOMAP */ +#ifdef CONFIG_64BIT +#ifndef writesq +#define writesq writesq +static inline void writesq(volatile void __iomem *addr, const void *buffer, + unsigned int count) +{ + if (count) { + const u64 *buf = buffer; + + do { + __raw_writeq(*buf++, addr); + } while (--count); + } +} +#endif +#endif /* CONFIG_64BIT */ + +#ifndef PCI_IOBASE +#define PCI_IOBASE ((void __iomem *)0) +#endif #ifndef IO_SPACE_LIMIT #define IO_SPACE_LIMIT 0xffff #endif +/* + * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be + * implemented on hardware that needs an additional delay for I/O accesses to + * take effect. + */ + +#ifndef inb +#define inb inb +static inline u8 inb(unsigned long addr) +{ + return readb(PCI_IOBASE + addr); +} +#endif + +#ifndef inw +#define inw inw +static inline u16 inw(unsigned long addr) +{ + return readw(PCI_IOBASE + addr); +} +#endif + +#ifndef inl +#define inl inl +static inline u32 inl(unsigned long addr) +{ + return readl(PCI_IOBASE + addr); +} +#endif + +#ifndef outb +#define outb outb +static inline void outb(u8 value, unsigned long addr) +{ + writeb(value, PCI_IOBASE + addr); +} +#endif + +#ifndef outw +#define outw outw +static inline void outw(u16 value, unsigned long addr) +{ + writew(value, PCI_IOBASE + addr); +} +#endif + +#ifndef outl +#define outl outl +static inline void outl(u32 value, unsigned long addr) +{ + writel(value, PCI_IOBASE + addr); +} +#endif + +#ifndef inb_p +#define inb_p inb_p +static inline u8 inb_p(unsigned long addr) +{ + return inb(addr); +} +#endif + +#ifndef inw_p +#define inw_p inw_p +static inline u16 inw_p(unsigned long addr) +{ + return inw(addr); +} +#endif + +#ifndef inl_p +#define inl_p inl_p +static inline u32 inl_p(unsigned long addr) +{ + return inl(addr); +} +#endif + +#ifndef outb_p +#define outb_p outb_p +static inline void outb_p(u8 value, unsigned long addr) +{ + outb(value, addr); +} +#endif + +#ifndef outw_p +#define outw_p outw_p +static inline void outw_p(u16 value, unsigned long addr) +{ + outw(value, addr); +} +#endif + +#ifndef outl_p +#define outl_p outl_p +static inline void outl_p(u32 value, unsigned long addr) +{ + outl(value, addr); +} +#endif + +/* + * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a + * single I/O port multiple times. + */ + +#ifndef insb +#define insb insb +static inline void insb(unsigned long addr, void *buffer, unsigned int count) +{ + readsb(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insw +#define insw insw +static inline void insw(unsigned long addr, void *buffer, unsigned int count) +{ + readsw(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insl +#define insl insl +static inline void insl(unsigned long addr, void *buffer, unsigned int count) +{ + readsl(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsb +#define outsb outsb +static inline void outsb(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesb(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsw +#define outsw outsw +static inline void outsw(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesw(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef outsl +#define outsl outsl +static inline void outsl(unsigned long addr, const void *buffer, + unsigned int count) +{ + writesl(PCI_IOBASE + addr, buffer, count); +} +#endif + +#ifndef insb_p +#define insb_p insb_p +static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) +{ + insb(addr, buffer, count); +} +#endif + +#ifndef insw_p +#define insw_p insw_p +static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) +{ + insw(addr, buffer, count); +} +#endif + +#ifndef insl_p +#define insl_p insl_p +static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) +{ + insl(addr, buffer, count); +} +#endif + +#ifndef outsb_p +#define outsb_p outsb_p +static inline void outsb_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsb(addr, buffer, count); +} +#endif + +#ifndef outsw_p +#define outsw_p outsw_p +static inline void outsw_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsw(addr, buffer, count); +} +#endif + +#ifndef outsl_p +#define outsl_p outsl_p +static inline void outsl_p(unsigned long addr, const void *buffer, + unsigned int count) +{ + outsl(addr, buffer, count); +} +#endif + +#ifndef CONFIG_GENERIC_IOMAP +#ifndef ioread8 +#define ioread8 ioread8 +static inline u8 ioread8(const volatile void __iomem *addr) +{ + return readb(addr); +} +#endif + +#ifndef ioread16 +#define ioread16 ioread16 +static inline u16 ioread16(const volatile void __iomem *addr) +{ + return readw(addr); +} +#endif + +#ifndef ioread32 +#define ioread32 ioread32 +static inline u32 ioread32(const volatile void __iomem *addr) +{ + return readl(addr); +} +#endif + +#ifndef iowrite8 +#define iowrite8 iowrite8 +static inline void iowrite8(u8 value, volatile void __iomem *addr) +{ + writeb(value, addr); +} +#endif + +#ifndef iowrite16 +#define iowrite16 iowrite16 +static inline void iowrite16(u16 value, volatile void __iomem *addr) +{ + writew(value, addr); +} +#endif + +#ifndef iowrite32 +#define iowrite32 iowrite32 +static inline void iowrite32(u32 value, volatile void __iomem *addr) +{ + writel(value, addr); +} +#endif + +#ifndef ioread16be +#define ioread16be ioread16be +static inline u16 ioread16be(const volatile void __iomem *addr) +{ + return __be16_to_cpu(__raw_readw(addr)); +} +#endif + +#ifndef ioread32be +#define ioread32be ioread32be +static inline u32 ioread32be(const volatile void __iomem *addr) +{ + return __be32_to_cpu(__raw_readl(addr)); +} +#endif + +#ifndef iowrite16be +#define iowrite16be iowrite16be +static inline void iowrite16be(u16 value, void volatile __iomem *addr) +{ + __raw_writew(__cpu_to_be16(value), addr); +} +#endif + +#ifndef iowrite32be +#define iowrite32be iowrite32be +static inline void iowrite32be(u32 value, volatile void __iomem *addr) +{ + __raw_writel(__cpu_to_be32(value), addr); +} +#endif + +#ifndef ioread8_rep +#define ioread8_rep ioread8_rep +static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, + unsigned int count) +{ + readsb(addr, buffer, count); +} +#endif + +#ifndef ioread16_rep +#define ioread16_rep ioread16_rep +static inline void ioread16_rep(const volatile void __iomem *addr, + void *buffer, unsigned int count) +{ + readsw(addr, buffer, count); +} +#endif + +#ifndef ioread32_rep +#define ioread32_rep ioread32_rep +static inline void ioread32_rep(const volatile void __iomem *addr, + void *buffer, unsigned int count) +{ + readsl(addr, buffer, count); +} +#endif + +#ifndef iowrite8_rep +#define iowrite8_rep iowrite8_rep +static inline void iowrite8_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesb(addr, buffer, count); +} +#endif + +#ifndef iowrite16_rep +#define iowrite16_rep iowrite16_rep +static inline void iowrite16_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesw(addr, buffer, count); +} +#endif + +#ifndef iowrite32_rep +#define iowrite32_rep iowrite32_rep +static inline void iowrite32_rep(volatile void __iomem *addr, + const void *buffer, + unsigned int count) +{ + writesl(addr, buffer, count); +} +#endif +#endif /* CONFIG_GENERIC_IOMAP */ + #ifdef __KERNEL__ #include <linux/vmalloc.h> -#define __io_virt(x) ((void __force *) (x)) +#define __io_virt(x) ((void __force *)(x)) #ifndef CONFIG_GENERIC_IOMAP struct pci_dev; extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); #ifndef pci_iounmap +#define pci_iounmap pci_iounmap static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) { } @@ -289,11 +721,15 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) * These are pretty trivial */ #ifndef virt_to_phys +#define virt_to_phys virt_to_phys static inline unsigned long virt_to_phys(volatile void *address) { return __pa((unsigned long)address); } +#endif +#ifndef phys_to_virt +#define phys_to_virt phys_to_virt static inline void *phys_to_virt(unsigned long address) { return __va(address); @@ -306,37 +742,65 @@ static inline void *phys_to_virt(unsigned long address) * This implementation is for the no-MMU case only... if you have an MMU * you'll need to provide your own definitions. */ + #ifndef CONFIG_MMU -static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) +#ifndef ioremap +#define ioremap ioremap +static inline void __iomem *ioremap(phys_addr_t offset, size_t size) { - return (void __iomem*) (unsigned long)offset; + return (void __iomem *)(unsigned long)offset; } +#endif -#define __ioremap(offset, size, flags) ioremap(offset, size) +#ifndef __ioremap +#define __ioremap __ioremap +static inline void __iomem *__ioremap(phys_addr_t offset, size_t size, + unsigned long flags) +{ + return ioremap(offset, size); +} +#endif #ifndef ioremap_nocache -#define ioremap_nocache ioremap +#define ioremap_nocache ioremap_nocache +static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) +{ + return ioremap(offset, size); +} #endif #ifndef ioremap_wc -#define ioremap_wc ioremap_nocache +#define ioremap_wc ioremap_wc +static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) +{ + return ioremap_nocache(offset, size); +} #endif +#ifndef iounmap +#define iounmap iounmap static inline void iounmap(void __iomem *addr) { } +#endif #endif /* CONFIG_MMU */ #ifdef CONFIG_HAS_IOPORT_MAP #ifndef CONFIG_GENERIC_IOMAP +#ifndef ioport_map +#define ioport_map ioport_map static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { return PCI_IOBASE + (port & IO_SPACE_LIMIT); } +#endif +#ifndef ioport_unmap +#define ioport_unmap ioport_unmap static inline void ioport_unmap(void __iomem *p) { } +#endif #else /* CONFIG_GENERIC_IOMAP */ extern void __iomem *ioport_map(unsigned long port, unsigned int nr); extern void ioport_unmap(void __iomem *p); @@ -344,35 +808,68 @@ extern void ioport_unmap(void __iomem *p); #endif /* CONFIG_HAS_IOPORT_MAP */ #ifndef xlate_dev_kmem_ptr -#define xlate_dev_kmem_ptr(p) p +#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr +static inline void *xlate_dev_kmem_ptr(void *addr) +{ + return addr; +} #endif + #ifndef xlate_dev_mem_ptr -#define xlate_dev_mem_ptr(p) __va(p) +#define xlate_dev_mem_ptr xlate_dev_mem_ptr +static inline void *xlate_dev_mem_ptr(phys_addr_t addr) +{ + return __va(addr); +} +#endif + +#ifndef unxlate_dev_mem_ptr +#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr +static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) +{ +} #endif #ifdef CONFIG_VIRT_TO_BUS #ifndef virt_to_bus -static inline unsigned long virt_to_bus(volatile void *address) +static inline unsigned long virt_to_bus(void *address) { - return ((unsigned long) address); + return (unsigned long)address; } static inline void *bus_to_virt(unsigned long address) { - return (void *) address; + return (void *)address; } #endif #endif #ifndef memset_io -#define memset_io(a, b, c) memset(__io_virt(a), (b), (c)) +#define memset_io memset_io +static inline void memset_io(volatile void __iomem *addr, int value, + size_t size) +{ + memset(__io_virt(addr), value, size); +} #endif #ifndef memcpy_fromio -#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c)) +#define memcpy_fromio memcpy_fromio +static inline void memcpy_fromio(void *buffer, + const volatile void __iomem *addr, + size_t size) +{ + memcpy(buffer, __io_virt(addr), size); +} #endif + #ifndef memcpy_toio -#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c)) +#define memcpy_toio memcpy_toio +static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, + size_t size) +{ + memcpy(__io_virt(addr), buffer, size); +} #endif #endif /* __KERNEL__ */ diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h index 67dea8123683..866aa461efa5 100644 --- a/include/asm-generic/mm_hooks.h +++ b/include/asm-generic/mm_hooks.h @@ -1,7 +1,7 @@ /* - * Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap, to - * be included in asm-FOO/mmu_context.h for any arch FOO which doesn't - * need to hook these. + * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap + * and arch_unmap to be included in asm-FOO/mmu_context.h for any + * arch FOO which doesn't need to hook these. */ #ifndef _ASM_GENERIC_MM_HOOKS_H #define _ASM_GENERIC_MM_HOOKS_H @@ -15,4 +15,15 @@ static inline void arch_exit_mmap(struct mm_struct *mm) { } +static inline void arch_unmap(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + #endif /* _ASM_GENERIC_MM_HOOKS_H */ diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h new file mode 100644 index 000000000000..61c58d8878ce --- /dev/null +++ b/include/asm-generic/msi.h @@ -0,0 +1,32 @@ +#ifndef __ASM_GENERIC_MSI_H +#define __ASM_GENERIC_MSI_H + +#include <linux/types.h> + +#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS +# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2 +#endif + +struct msi_desc; + +/** + * struct msi_alloc_info - Default structure for MSI interrupt allocation. + * @desc: Pointer to msi descriptor + * @hwirq: Associated hw interrupt number in the domain + * @scratchpad: Storage for implementation specific scratch data + * + * Architectures can provide their own implementation by not including + * asm-generic/msi.h into their arch specific header file. + */ +typedef struct msi_alloc_info { + struct msi_desc *desc; + irq_hw_number_t hwirq; + union { + unsigned long ul; + void *ptr; + } scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS]; +} msi_alloc_info_t; + +#define GENERIC_MSI_DOMAIN_OPS 1 + +#endif diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 752e30d63904..177d5973b132 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -103,6 +103,17 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif +#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp, + int full) +{ + return pmdp_get_and_clear(mm, address, pmdp); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h index 1cd3f5d767a8..eb6f9e6c3075 100644 --- a/include/asm-generic/preempt.h +++ b/include/asm-generic/preempt.h @@ -23,9 +23,6 @@ static __always_inline void preempt_count_set(int pc) /* * must be macros to avoid header recursion hell */ -#define task_preempt_count(p) \ - (task_thread_info(p)->preempt_count & ~PREEMPT_NEED_RESCHED) - #define init_task_preempt_count(p) do { \ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ } while (0) diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h new file mode 100644 index 000000000000..9fa1f653ed3b --- /dev/null +++ b/include/asm-generic/seccomp.h @@ -0,0 +1,30 @@ +/* + * include/asm-generic/seccomp.h + * + * Copyright (C) 2014 Linaro Limited + * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_GENERIC_SECCOMP_H +#define _ASM_GENERIC_SECCOMP_H + +#include <linux/unistd.h> + +#if defined(CONFIG_COMPAT) && !defined(__NR_seccomp_read_32) +#define __NR_seccomp_read_32 __NR_read +#define __NR_seccomp_write_32 __NR_write +#define __NR_seccomp_exit_32 __NR_exit +#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn +#endif /* CONFIG_COMPAT && ! already defined */ + +#define __NR_seccomp_read __NR_read +#define __NR_seccomp_write __NR_write +#define __NR_seccomp_exit __NR_exit +#ifndef __NR_seccomp_sigreturn +#define __NR_seccomp_sigreturn __NR_rt_sigreturn +#endif + +#endif /* _ASM_GENERIC_SECCOMP_H */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 5672d7ea1fa0..08848050922e 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -96,10 +96,9 @@ struct mmu_gather { #endif unsigned long start; unsigned long end; - unsigned int need_flush : 1, /* Did free PTEs */ /* we are in the middle of an operation to clear * a full mm and can make some optimizations */ - fullmm : 1, + unsigned int fullmm : 1, /* we have performed an operation which * requires a complete flush of the tlb */ need_flush_all : 1; @@ -128,16 +127,54 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) tlb_flush_mmu(tlb); } +static inline void __tlb_adjust_range(struct mmu_gather *tlb, + unsigned long address) +{ + tlb->start = min(tlb->start, address); + tlb->end = max(tlb->end, address + PAGE_SIZE); +} + +static inline void __tlb_reset_range(struct mmu_gather *tlb) +{ + tlb->start = TASK_SIZE; + tlb->end = 0; +} + +/* + * In the case of tlb vma handling, we can optimise these away in the + * case where we're doing a full MM flush. When we're doing a munmap, + * the vmas are adjusted to only cover the region to be torn down. + */ +#ifndef tlb_start_vma +#define tlb_start_vma(tlb, vma) do { } while (0) +#endif + +#define __tlb_end_vma(tlb, vma) \ + do { \ + if (!tlb->fullmm && tlb->end) { \ + tlb_flush(tlb); \ + __tlb_reset_range(tlb); \ + } \ + } while (0) + +#ifndef tlb_end_vma +#define tlb_end_vma __tlb_end_vma +#endif + +#ifndef __tlb_remove_tlb_entry +#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) +#endif + /** * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. * - * Record the fact that pte's were really umapped in ->need_flush, so we can - * later optimise away the tlb invalidate. This helps when userspace is - * unmapping already-unmapped pages, which happens quite a lot. + * Record the fact that pte's were really unmapped by updating the range, + * so we can later optimise away the tlb invalidate. This helps when + * userspace is unmapping already-unmapped pages, which happens quite a lot. */ #define tlb_remove_tlb_entry(tlb, ptep, address) \ do { \ - tlb->need_flush = 1; \ + __tlb_adjust_range(tlb, address); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) @@ -151,27 +188,27 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ do { \ - tlb->need_flush = 1; \ + __tlb_adjust_range(tlb, address); \ __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ } while (0) #define pte_free_tlb(tlb, ptep, address) \ do { \ - tlb->need_flush = 1; \ + __tlb_adjust_range(tlb, address); \ __pte_free_tlb(tlb, ptep, address); \ } while (0) #ifndef __ARCH_HAS_4LEVEL_HACK #define pud_free_tlb(tlb, pudp, address) \ do { \ - tlb->need_flush = 1; \ + __tlb_adjust_range(tlb, address); \ __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif #define pmd_free_tlb(tlb, pmdp, address) \ do { \ - tlb->need_flush = 1; \ + __tlb_adjust_range(tlb, address); \ __pmd_free_tlb(tlb, pmdp, address); \ } while (0) diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 74b13ec1ebd4..98abda9ed3aa 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -17,6 +17,32 @@ struct crypto_ahash; +/** + * DOC: Message Digest Algorithm Definitions + * + * These data structures define modular message digest algorithm + * implementations, managed via crypto_register_ahash(), + * crypto_register_shash(), crypto_unregister_ahash() and + * crypto_unregister_shash(). + */ + +/** + * struct hash_alg_common - define properties of message digest + * @digestsize: Size of the result of the transformation. A buffer of this size + * must be available to the @final and @finup calls, so they can + * store the resulting hash into it. For various predefined sizes, + * search include/crypto/ using + * git grep _DIGEST_SIZE include/crypto. + * @statesize: Size of the block for partial state of the transformation. A + * buffer of this size must be passed to the @export function as it + * will save the partial state of the transformation into it. On the + * other side, the @import function will load the state from a + * buffer of this size as well. + * @base: Start of data structure of cipher algorithm. The common data + * structure of crypto_alg contains information common to all ciphers. + * The hash_alg_common data structure now adds the hash-specific + * information. + */ struct hash_alg_common { unsigned int digestsize; unsigned int statesize; @@ -37,6 +63,63 @@ struct ahash_request { void *__ctx[] CRYPTO_MINALIGN_ATTR; }; +/** + * struct ahash_alg - asynchronous message digest definition + * @init: Initialize the transformation context. Intended only to initialize the + * state of the HASH transformation at the begining. This shall fill in + * the internal structures used during the entire duration of the whole + * transformation. No data processing happens at this point. + * @update: Push a chunk of data into the driver for transformation. This + * function actually pushes blocks of data from upper layers into the + * driver, which then passes those to the hardware as seen fit. This + * function must not finalize the HASH transformation by calculating the + * final message digest as this only adds more data into the + * transformation. This function shall not modify the transformation + * context, as this function may be called in parallel with the same + * transformation object. Data processing can happen synchronously + * [SHASH] or asynchronously [AHASH] at this point. + * @final: Retrieve result from the driver. This function finalizes the + * transformation and retrieves the resulting hash from the driver and + * pushes it back to upper layers. No data processing happens at this + * point. + * @finup: Combination of @update and @final. This function is effectively a + * combination of @update and @final calls issued in sequence. As some + * hardware cannot do @update and @final separately, this callback was + * added to allow such hardware to be used at least by IPsec. Data + * processing can happen synchronously [SHASH] or asynchronously [AHASH] + * at this point. + * @digest: Combination of @init and @update and @final. This function + * effectively behaves as the entire chain of operations, @init, + * @update and @final issued in sequence. Just like @finup, this was + * added for hardware which cannot do even the @finup, but can only do + * the whole transformation in one run. Data processing can happen + * synchronously [SHASH] or asynchronously [AHASH] at this point. + * @setkey: Set optional key used by the hashing algorithm. Intended to push + * optional key used by the hashing algorithm from upper layers into + * the driver. This function can store the key in the transformation + * context or can outright program it into the hardware. In the former + * case, one must be careful to program the key into the hardware at + * appropriate time and one must be careful that .setkey() can be + * called multiple times during the existence of the transformation + * object. Not all hashing algorithms do implement this function as it + * is only needed for keyed message digests. SHAx/MDx/CRCx do NOT + * implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement + * this function. This function must be called before any other of the + * @init, @update, @final, @finup, @digest is called. No data + * processing happens at this point. + * @export: Export partial state of the transformation. This function dumps the + * entire state of the ongoing transformation into a provided block of + * data so it can be @import 'ed back later on. This is useful in case + * you want to save partial result of the transformation after + * processing certain amount of data and reload this partial result + * multiple times later on for multiple re-use. No data processing + * happens at this point. + * @import: Import partial state of the transformation. This function loads the + * entire state of the ongoing transformation from a provided block of + * data so the transformation can continue from this point onward. No + * data processing happens at this point. + * @halg: see struct hash_alg_common + */ struct ahash_alg { int (*init)(struct ahash_request *req); int (*update)(struct ahash_request *req); @@ -63,6 +146,23 @@ struct shash_desc { crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \ struct shash_desc *shash = (struct shash_desc *)__##shash##_desc +/** + * struct shash_alg - synchronous message digest definition + * @init: see struct ahash_alg + * @update: see struct ahash_alg + * @final: see struct ahash_alg + * @finup: see struct ahash_alg + * @digest: see struct ahash_alg + * @export: see struct ahash_alg + * @import: see struct ahash_alg + * @setkey: see struct ahash_alg + * @digestsize: see struct ahash_alg + * @statesize: see struct ahash_alg + * @descsize: Size of the operational state for the message digest. This state + * size is the memory size that needs to be allocated for + * shash_desc.__ctx + * @base: internally used + */ struct shash_alg { int (*init)(struct shash_desc *desc); int (*update)(struct shash_desc *desc, const u8 *data, @@ -107,11 +207,35 @@ struct crypto_shash { struct crypto_tfm base; }; +/** + * DOC: Asynchronous Message Digest API + * + * The asynchronous message digest API is used with the ciphers of type + * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto) + * + * The asynchronous cipher operation discussion provided for the + * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well. + */ + static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) { return container_of(tfm, struct crypto_ahash, base); } +/** + * crypto_alloc_ahash() - allocate ahash cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ahash cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an ahash. The returned struct + * crypto_ahash is the cipher handle that is required for any subsequent + * API invocation for that ahash. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, u32 mask); @@ -120,6 +244,10 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) return &tfm->base; } +/** + * crypto_free_ahash() - zeroize and free the ahash handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_ahash(struct crypto_ahash *tfm) { crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm)); @@ -143,6 +271,16 @@ static inline struct hash_alg_common *crypto_hash_alg_common( return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg); } +/** + * crypto_ahash_digestsize() - obtain message digest size + * @tfm: cipher handle + * + * The size for the message digest created by the message digest cipher + * referenced with the cipher handle is returned. + * + * + * Return: message digest size of cipher + */ static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) { return crypto_hash_alg_common(tfm)->digestsize; @@ -168,12 +306,32 @@ static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags) crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags); } +/** + * crypto_ahash_reqtfm() - obtain cipher handle from request + * @req: asynchronous request handle that contains the reference to the ahash + * cipher handle + * + * Return the ahash cipher handle that is registered with the asynchronous + * request handle ahash_request. + * + * Return: ahash cipher handle + */ static inline struct crypto_ahash *crypto_ahash_reqtfm( struct ahash_request *req) { return __crypto_ahash_cast(req->base.tfm); } +/** + * crypto_ahash_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return the size of the ahash state size. With the crypto_ahash_export + * function, the caller can export the state into a buffer whose size is + * defined with this function. + * + * Return: size of the ahash state + */ static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) { return tfm->reqsize; @@ -184,38 +342,166 @@ static inline void *ahash_request_ctx(struct ahash_request *req) return req->__ctx; } +/** + * crypto_ahash_setkey - set key for cipher handle + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the ahash cipher. The cipher + * handle must point to a keyed hash in order for this function to succeed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen); + +/** + * crypto_ahash_finup() - update and finalize message digest + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * This function is a "short-hand" for the function calls of + * crypto_ahash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate functions. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ int crypto_ahash_finup(struct ahash_request *req); + +/** + * crypto_ahash_final() - calculate message digest + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * Finalize the message digest operation and create the message digest + * based on all data added to the cipher handle. The message digest is placed + * into the output buffer registered with the ahash_request handle. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ int crypto_ahash_final(struct ahash_request *req); + +/** + * crypto_ahash_digest() - calculate message digest for a buffer + * @req: reference to the ahash_request handle that holds all information + * needed to perform the cipher operation + * + * This function is a "short-hand" for the function calls of crypto_ahash_init, + * crypto_ahash_update and crypto_ahash_final. The parameters have the same + * meaning as discussed for those separate three functions. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ int crypto_ahash_digest(struct ahash_request *req); +/** + * crypto_ahash_export() - extract current message digest state + * @req: reference to the ahash_request handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * This function exports the hash state of the ahash_request handle into the + * caller-allocated output buffer out which must have sufficient size (e.g. by + * calling crypto_ahash_reqsize). + * + * Return: 0 if the export was successful; < 0 if an error occurred + */ static inline int crypto_ahash_export(struct ahash_request *req, void *out) { return crypto_ahash_reqtfm(req)->export(req, out); } +/** + * crypto_ahash_import() - import message digest state + * @req: reference to ahash_request handle the state is imported into + * @in: buffer holding the state + * + * This function imports the hash state into the ahash_request handle from the + * input buffer. That buffer should have been generated with the + * crypto_ahash_export function. + * + * Return: 0 if the import was successful; < 0 if an error occurred + */ static inline int crypto_ahash_import(struct ahash_request *req, const void *in) { return crypto_ahash_reqtfm(req)->import(req, in); } +/** + * crypto_ahash_init() - (re)initialize message digest handle + * @req: ahash_request handle that already is initialized with all necessary + * data using the ahash_request_* API functions + * + * The call (re-)initializes the message digest referenced by the ahash_request + * handle. Any potentially existing state created by previous operations is + * discarded. + * + * Return: 0 if the message digest initialization was successful; < 0 if an + * error occurred + */ static inline int crypto_ahash_init(struct ahash_request *req) { return crypto_ahash_reqtfm(req)->init(req); } +/** + * crypto_ahash_update() - add data to message digest for processing + * @req: ahash_request handle that was previously initialized with the + * crypto_ahash_init call. + * + * Updates the message digest state of the &ahash_request handle. The input data + * is pointed to by the scatter/gather list registered in the &ahash_request + * handle + * + * Return: 0 if the message digest update was successful; < 0 if an error + * occurred + */ static inline int crypto_ahash_update(struct ahash_request *req) { return crypto_ahash_reqtfm(req)->update(req); } +/** + * DOC: Asynchronous Hash Request Handle + * + * The &ahash_request data structure contains all pointers to data + * required for the asynchronous cipher operation. This includes the cipher + * handle (which can be used by multiple &ahash_request instances), pointer + * to plaintext and the message digest output buffer, asynchronous callback + * function, etc. It acts as a handle to the ahash_request_* API calls in a + * similar way as ahash handle to the crypto_ahash_* API calls. + */ + +/** + * ahash_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing ahash handle in the request + * data structure with a different one. + */ static inline void ahash_request_set_tfm(struct ahash_request *req, struct crypto_ahash *tfm) { req->base.tfm = crypto_ahash_tfm(tfm); } +/** + * ahash_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the ahash + * message digest API calls. During + * the allocation, the provided ahash handle + * is registered in the request data structure. + * + * Return: allocated request handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ static inline struct ahash_request *ahash_request_alloc( struct crypto_ahash *tfm, gfp_t gfp) { @@ -230,6 +516,10 @@ static inline struct ahash_request *ahash_request_alloc( return req; } +/** + * ahash_request_free() - zeroize and free the request data structure + * @req: request data structure cipher handle to be freed + */ static inline void ahash_request_free(struct ahash_request *req) { kzfree(req); @@ -241,6 +531,31 @@ static inline struct ahash_request *ahash_request_cast( return container_of(req, struct ahash_request, base); } +/** + * ahash_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * &crypto_async_request data structure provided to the callback function. + * + * This function allows setting the callback function that is triggered once + * the cipher operation completes. + * + * The callback function is registered with the &ahash_request handle and + * must comply with the following template + * + * void callback_function(struct crypto_async_request *req, int error) + */ static inline void ahash_request_set_callback(struct ahash_request *req, u32 flags, crypto_completion_t compl, @@ -251,6 +566,19 @@ static inline void ahash_request_set_callback(struct ahash_request *req, req->base.flags = flags; } +/** + * ahash_request_set_crypt() - set data buffers + * @req: ahash_request handle to be updated + * @src: source scatter/gather list + * @result: buffer that is filled with the message digest -- the caller must + * ensure that the buffer has sufficient space by, for example, calling + * crypto_ahash_digestsize() + * @nbytes: number of bytes to process from the source scatter/gather list + * + * By using this call, the caller references the source scatter/gather list. + * The source scatter/gather list points to the data the message digest is to + * be calculated for. + */ static inline void ahash_request_set_crypt(struct ahash_request *req, struct scatterlist *src, u8 *result, unsigned int nbytes) @@ -260,6 +588,33 @@ static inline void ahash_request_set_crypt(struct ahash_request *req, req->result = result; } +/** + * DOC: Synchronous Message Digest API + * + * The synchronous message digest API is used with the ciphers of type + * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto) + * + * The message digest API is able to maintain state information for the + * caller. + * + * The synchronous message digest API can store user-related context in in its + * shash_desc request data structure. + */ + +/** + * crypto_alloc_shash() - allocate message digest handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a message digest. The returned &struct + * crypto_shash is the cipher handle that is required for any subsequent + * API invocation for that message digest. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, u32 mask); @@ -268,6 +623,10 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) return &tfm->base; } +/** + * crypto_free_shash() - zeroize and free the message digest handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_shash(struct crypto_shash *tfm) { crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm)); @@ -279,6 +638,15 @@ static inline unsigned int crypto_shash_alignmask( return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm)); } +/** + * crypto_shash_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the message digest cipher referenced with the cipher + * handle is returned. + * + * Return: block size of cipher + */ static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm) { return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm)); @@ -294,6 +662,15 @@ static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm) return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg); } +/** + * crypto_shash_digestsize() - obtain message digest size + * @tfm: cipher handle + * + * The size for the message digest created by the message digest cipher + * referenced with the cipher handle is returned. + * + * Return: digest size of cipher + */ static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm) { return crypto_shash_alg(tfm)->digestsize; @@ -319,6 +696,21 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags) crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags); } +/** + * crypto_shash_descsize() - obtain the operational state size + * @tfm: cipher handle + * + * The size of the operational state the cipher needs during operation is + * returned for the hash referenced with the cipher handle. This size is + * required to calculate the memory requirements to allow the caller allocating + * sufficient memory for operational state. + * + * The operational state is defined with struct shash_desc where the size of + * that data structure is to be calculated as + * sizeof(struct shash_desc) + crypto_shash_descsize(alg) + * + * Return: size of the operational state + */ static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) { return tfm->descsize; @@ -329,29 +721,129 @@ static inline void *shash_desc_ctx(struct shash_desc *desc) return desc->__ctx; } +/** + * crypto_shash_setkey() - set key for message digest + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the keyed message digest cipher. The + * cipher handle must point to a keyed message digest cipher in order for this + * function to succeed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen); + +/** + * crypto_shash_digest() - calculate message digest for buffer + * @desc: see crypto_shash_final() + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This function is a "short-hand" for the function calls of crypto_shash_init, + * crypto_shash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate three functions. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out); +/** + * crypto_shash_export() - extract operational state for message digest + * @desc: reference to the operational state handle whose state is exported + * @out: output buffer of sufficient size that can hold the hash state + * + * This function exports the hash state of the operational state handle into the + * caller-allocated output buffer out which must have sufficient size (e.g. by + * calling crypto_shash_descsize). + * + * Return: 0 if the export creation was successful; < 0 if an error occurred + */ static inline int crypto_shash_export(struct shash_desc *desc, void *out) { return crypto_shash_alg(desc->tfm)->export(desc, out); } +/** + * crypto_shash_import() - import operational state + * @desc: reference to the operational state handle the state imported into + * @in: buffer holding the state + * + * This function imports the hash state into the operational state handle from + * the input buffer. That buffer should have been generated with the + * crypto_ahash_export function. + * + * Return: 0 if the import was successful; < 0 if an error occurred + */ static inline int crypto_shash_import(struct shash_desc *desc, const void *in) { return crypto_shash_alg(desc->tfm)->import(desc, in); } +/** + * crypto_shash_init() - (re)initialize message digest + * @desc: operational state handle that is already filled + * + * The call (re-)initializes the message digest referenced by the + * operational state handle. Any potentially existing state created by + * previous operations is discarded. + * + * Return: 0 if the message digest initialization was successful; < 0 if an + * error occurred + */ static inline int crypto_shash_init(struct shash_desc *desc) { return crypto_shash_alg(desc->tfm)->init(desc); } +/** + * crypto_shash_update() - add data to message digest for processing + * @desc: operational state handle that is already initialized + * @data: input data to be added to the message digest + * @len: length of the input data + * + * Updates the message digest state of the operational state handle. + * + * Return: 0 if the message digest update was successful; < 0 if an error + * occurred + */ int crypto_shash_update(struct shash_desc *desc, const u8 *data, unsigned int len); + +/** + * crypto_shash_final() - calculate message digest + * @desc: operational state handle that is already filled with data + * @out: output buffer filled with the message digest + * + * Finalize the message digest operation and create the message digest + * based on all data added to the cipher handle. The message digest is placed + * into the output buffer. The caller must ensure that the output buffer is + * large enough by using crypto_shash_digestsize. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ int crypto_shash_final(struct shash_desc *desc, u8 *out); + +/** + * crypto_shash_finup() - calculate message digest of buffer + * @desc: see crypto_shash_final() + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This function is a "short-hand" for the function calls of + * crypto_shash_update and crypto_shash_final. The parameters have the same + * meaning as discussed for those separate functions. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ int crypto_shash_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out); diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index d61c11170213..cd62bf4289e9 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -42,6 +42,7 @@ struct af_alg_completion { struct af_alg_control { struct af_alg_iv *iv; int op; + unsigned int aead_assoclen; }; struct af_alg_type { diff --git a/include/crypto/rng.h b/include/crypto/rng.h index c93f9b917925..a16fb10142bf 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -20,11 +20,38 @@ extern struct crypto_rng *crypto_default_rng; int crypto_get_default_rng(void); void crypto_put_default_rng(void); +/** + * DOC: Random number generator API + * + * The random number generator API is used with the ciphers of type + * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto) + */ + static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm) { return (struct crypto_rng *)tfm; } +/** + * crypto_alloc_rng() -- allocate RNG handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a random number generator. The returned struct + * crypto_rng is the cipher handle that is required for any subsequent + * API invocation for that random number generator. + * + * For all random number generators, this call creates a new private copy of + * the random number generator that does not share a state with other + * instances. The only exception is the "krng" random number generator which + * is a kernel crypto API use case for the get_random_bytes() function of the + * /dev/random driver. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask) { @@ -40,6 +67,14 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm) return &tfm->base; } +/** + * crypto_rng_alg - obtain name of RNG + * @tfm: cipher handle + * + * Return the generic name (cra_name) of the initialized random number generator + * + * Return: generic name string + */ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) { return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng; @@ -50,23 +85,68 @@ static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm) return &crypto_rng_tfm(tfm)->crt_rng; } +/** + * crypto_free_rng() - zeroize and free RNG handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_rng(struct crypto_rng *tfm) { crypto_free_tfm(crypto_rng_tfm(tfm)); } +/** + * crypto_rng_get_bytes() - get random number + * @tfm: cipher handle + * @rdata: output buffer holding the random numbers + * @dlen: length of the output buffer + * + * This function fills the caller-allocated buffer with random numbers using the + * random number generator referenced by the cipher handle. + * + * Return: > 0 function was successful and returns the number of generated + * bytes; < 0 if an error occurred + */ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen) { return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen); } +/** + * crypto_rng_reset() - re-initialize the RNG + * @tfm: cipher handle + * @seed: seed input data + * @slen: length of the seed input data + * + * The reset function completely re-initializes the random number generator + * referenced by the cipher handle by clearing the current state. The new state + * is initialized with the caller provided seed or automatically, depending + * on the random number generator type (the ANSI X9.31 RNG requires + * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding). + * The seed is provided as a parameter to this function call. The provided seed + * should have the length of the seed size defined for the random number + * generator as defined by crypto_rng_seedsize. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ static inline int crypto_rng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) { return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen); } +/** + * crypto_rng_seedsize() - obtain seed size of RNG + * @tfm: cipher handle + * + * The function returns the seed size for the random number generator + * referenced by the cipher handle. This value may be zero if the random + * number generator does not implement or require a reseeding. For example, + * the SP800-90A DRBGs implement an automated reseeding after reaching a + * pre-defined threshold. + * + * Return: seed size for the random number generator + */ static inline int crypto_rng_seedsize(struct crypto_rng *tfm) { return crypto_rng_alg(tfm)->seedsize; diff --git a/include/dt-bindings/arm/ux500_pm_domains.h b/include/dt-bindings/arm/ux500_pm_domains.h new file mode 100644 index 000000000000..398a6c0288d1 --- /dev/null +++ b/include/dt-bindings/arm/ux500_pm_domains.h @@ -0,0 +1,15 @@ +/* + * Copyright (C) 2014 Linaro Ltd. + * + * Author: Ulf Hansson <ulf.hansson@linaro.org> + * License terms: GNU General Public License (GPL) version 2 + */ +#ifndef _DT_BINDINGS_ARM_UX500_PM_DOMAINS_H +#define _DT_BINDINGS_ARM_UX500_PM_DOMAINS_H + +#define DOMAIN_VAPE 0 + +/* Number of PM domains. */ +#define NR_DOMAINS (DOMAIN_VAPE + 1) + +#endif diff --git a/include/dt-bindings/clock/imx5-clock.h b/include/dt-bindings/clock/imx5-clock.h index 5f2667ecd98e..f4b7478e23c8 100644 --- a/include/dt-bindings/clock/imx5-clock.h +++ b/include/dt-bindings/clock/imx5-clock.h @@ -198,6 +198,9 @@ #define IMX5_CLK_OCRAM 186 #define IMX5_CLK_SAHARA_IPG_GATE 187 #define IMX5_CLK_SATA_REF 188 -#define IMX5_CLK_END 189 +#define IMX5_CLK_STEP_SEL 189 +#define IMX5_CLK_CPU_PODF_SEL 190 +#define IMX5_CLK_ARM 191 +#define IMX5_CLK_END 192 #endif /* __DT_BINDINGS_CLOCK_IMX5_H */ diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h index a929f86d0ddd..d72b5b35f15e 100644 --- a/include/dt-bindings/clock/qcom,mmcc-apq8084.h +++ b/include/dt-bindings/clock/qcom,mmcc-apq8084.h @@ -60,7 +60,7 @@ #define ESC1_CLK_SRC 43 #define HDMI_CLK_SRC 44 #define VSYNC_CLK_SRC 45 -#define RBCPR_CLK_SRC 46 +#define MMSS_RBCPR_CLK_SRC 46 #define RBBMTIMER_CLK_SRC 47 #define MAPLE_CLK_SRC 48 #define VDP_CLK_SRC 49 diff --git a/include/dt-bindings/clock/r8a7740-clock.h b/include/dt-bindings/clock/r8a7740-clock.h index f6b4b0fe7a43..476135da0f23 100644 --- a/include/dt-bindings/clock/r8a7740-clock.h +++ b/include/dt-bindings/clock/r8a7740-clock.h @@ -40,6 +40,7 @@ /* MSTP2 */ #define R8A7740_CLK_SCIFA6 30 +#define R8A7740_CLK_INTCA 29 #define R8A7740_CLK_SCIFA7 22 #define R8A7740_CLK_DMAC1 18 #define R8A7740_CLK_DMAC2 17 diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h index 8ea7ab0346ad..c27b3b5133b9 100644 --- a/include/dt-bindings/clock/r8a7790-clock.h +++ b/include/dt-bindings/clock/r8a7790-clock.h @@ -26,8 +26,18 @@ #define R8A7790_CLK_MSIOF0 0 /* MSTP1 */ -#define R8A7790_CLK_JPU 6 +#define R8A7790_CLK_VCP1 0 +#define R8A7790_CLK_VCP0 1 +#define R8A7790_CLK_VPC1 2 +#define R8A7790_CLK_VPC0 3 +#define R8A7790_CLK_JPU 6 +#define R8A7790_CLK_SSP1 9 #define R8A7790_CLK_TMU1 11 +#define R8A7790_CLK_3DG 12 +#define R8A7790_CLK_2DDMAC 15 +#define R8A7790_CLK_FDP1_2 17 +#define R8A7790_CLK_FDP1_1 18 +#define R8A7790_CLK_FDP1_0 19 #define R8A7790_CLK_TMU3 21 #define R8A7790_CLK_TMU2 22 #define R8A7790_CLK_CMT0 24 @@ -68,6 +78,8 @@ #define R8A7790_CLK_USBDMAC1 31 /* MSTP5 */ +#define R8A7790_CLK_AUDIO_DMAC1 1 +#define R8A7790_CLK_AUDIO_DMAC0 2 #define R8A7790_CLK_THERMAL 22 #define R8A7790_CLK_PWM 23 diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h index 58c3f49d068c..3ea2bbc0da3f 100644 --- a/include/dt-bindings/clock/r8a7791-clock.h +++ b/include/dt-bindings/clock/r8a7791-clock.h @@ -25,8 +25,15 @@ #define R8A7791_CLK_MSIOF0 0 /* MSTP1 */ -#define R8A7791_CLK_JPU 6 +#define R8A7791_CLK_VCP0 1 +#define R8A7791_CLK_VPC0 3 +#define R8A7791_CLK_JPU 6 +#define R8A7791_CLK_SSP1 9 #define R8A7791_CLK_TMU1 11 +#define R8A7791_CLK_3DG 12 +#define R8A7791_CLK_2DDMAC 15 +#define R8A7791_CLK_FDP1_1 18 +#define R8A7791_CLK_FDP1_0 19 #define R8A7791_CLK_TMU3 21 #define R8A7791_CLK_TMU2 22 #define R8A7791_CLK_CMT0 24 @@ -62,6 +69,8 @@ #define R8A7791_CLK_USBDMAC1 31 /* MSTP5 */ +#define R8A7791_CLK_AUDIO_DMAC1 1 +#define R8A7791_CLK_AUDIO_DMAC0 2 #define R8A7791_CLK_THERMAL 22 #define R8A7791_CLK_PWM 23 diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h index 9ac1043e25bc..aa9c286e60c0 100644 --- a/include/dt-bindings/clock/r8a7794-clock.h +++ b/include/dt-bindings/clock/r8a7794-clock.h @@ -26,11 +26,18 @@ #define R8A7794_CLK_MSIOF0 0 /* MSTP1 */ +#define R8A7794_CLK_VCP0 1 +#define R8A7794_CLK_VPC0 3 #define R8A7794_CLK_TMU1 11 +#define R8A7794_CLK_3DG 12 +#define R8A7794_CLK_2DDMAC 15 +#define R8A7794_CLK_FDP1_0 19 #define R8A7794_CLK_TMU3 21 #define R8A7794_CLK_TMU2 22 #define R8A7794_CLK_CMT0 24 #define R8A7794_CLK_TMU0 25 +#define R8A7794_CLK_VSP1_DU0 28 +#define R8A7794_CLK_VSP1_S 31 /* MSTP2 */ #define R8A7794_CLK_SCIFA2 2 @@ -61,6 +68,8 @@ #define R8A7794_CLK_SCIF0 21 /* MSTP8 */ +#define R8A7794_CLK_VIN1 10 +#define R8A7794_CLK_VIN0 11 #define R8A7794_CLK_ETHER 13 /* MSTP9 */ diff --git a/include/dt-bindings/clock/stih407-clks.h b/include/dt-bindings/clock/stih407-clks.h new file mode 100644 index 000000000000..7af2b717b3b2 --- /dev/null +++ b/include/dt-bindings/clock/stih407-clks.h @@ -0,0 +1,86 @@ +/* + * This header provides constants clk index STMicroelectronics + * STiH407 SoC. + */ +#ifndef _DT_BINDINGS_CLK_STIH407 +#define _DT_BINDINGS_CLK_STIH407 + +/* CLOCKGEN C0 */ +#define CLK_ICN_GPU 0 +#define CLK_FDMA 1 +#define CLK_NAND 2 +#define CLK_HVA 3 +#define CLK_PROC_STFE 4 +#define CLK_PROC_TP 5 +#define CLK_RX_ICN_DMU 6 +#define CLK_RX_ICN_DISP_0 6 +#define CLK_RX_ICN_DISP_1 6 +#define CLK_RX_ICN_HVA 7 +#define CLK_RX_ICN_TS 7 +#define CLK_ICN_CPU 8 +#define CLK_TX_ICN_DMU 9 +#define CLK_TX_ICN_HVA 9 +#define CLK_TX_ICN_TS 9 +#define CLK_ICN_COMPO 9 +#define CLK_MMC_0 10 +#define CLK_MMC_1 11 +#define CLK_JPEGDEC 12 +#define CLK_ICN_REG 13 +#define CLK_TRACE_A9 13 +#define CLK_PTI_STM 13 +#define CLK_EXT2F_A9 13 +#define CLK_IC_BDISP_0 14 +#define CLK_IC_BDISP_1 15 +#define CLK_PP_DMU 16 +#define CLK_VID_DMU 17 +#define CLK_DSS_LPC 18 +#define CLK_ST231_AUD_0 19 +#define CLK_ST231_GP_0 19 +#define CLK_ST231_GP_1 20 +#define CLK_ST231_DMU 21 +#define CLK_ICN_LMI 22 +#define CLK_TX_ICN_DISP_0 23 +#define CLK_TX_ICN_DISP_1 23 +#define CLK_ICN_SBC 24 +#define CLK_STFE_FRC2 25 +#define CLK_ETH_PHY 26 +#define CLK_ETH_REF_PHYCLK 27 +#define CLK_FLASH_PROMIP 28 +#define CLK_MAIN_DISP 29 +#define CLK_AUX_DISP 30 +#define CLK_COMPO_DVP 31 + +/* CLOCKGEN D0 */ +#define CLK_PCM_0 0 +#define CLK_PCM_1 1 +#define CLK_PCM_2 2 +#define CLK_SPDIFF 3 + +/* CLOCKGEN D2 */ +#define CLK_PIX_MAIN_DISP 0 +#define CLK_PIX_PIP 1 +#define CLK_PIX_GDP1 2 +#define CLK_PIX_GDP2 3 +#define CLK_PIX_GDP3 4 +#define CLK_PIX_GDP4 5 +#define CLK_PIX_AUX_DISP 6 +#define CLK_DENC 7 +#define CLK_PIX_HDDAC 8 +#define CLK_HDDAC 9 +#define CLK_SDDAC 10 +#define CLK_PIX_DVO 11 +#define CLK_DVO 12 +#define CLK_PIX_HDMI 13 +#define CLK_TMDS_HDMI 14 +#define CLK_REF_HDMIPHY 15 + +/* CLOCKGEN D3 */ +#define CLK_STFE_FRC1 0 +#define CLK_TSOUT_0 1 +#define CLK_TSOUT_1 2 +#define CLK_MCHI 3 +#define CLK_VSENS_COMPO 4 +#define CLK_FRC1_REMOTE 5 +#define CLK_LPC_0 6 +#define CLK_LPC_1 7 +#endif diff --git a/include/dt-bindings/clock/stih410-clks.h b/include/dt-bindings/clock/stih410-clks.h new file mode 100644 index 000000000000..2097a4bbe155 --- /dev/null +++ b/include/dt-bindings/clock/stih410-clks.h @@ -0,0 +1,25 @@ +/* + * This header provides constants clk index STMicroelectronics + * STiH410 SoC. + */ +#ifndef _DT_BINDINGS_CLK_STIH410 +#define _DT_BINDINGS_CLK_STIH410 + +#include "stih407-clks.h" + +/* STiH410 introduces new clock outputs compared to STiH407 */ + +/* CLOCKGEN C0 */ +#define CLK_TX_ICN_HADES 32 +#define CLK_RX_ICN_HADES 33 +#define CLK_ICN_REG_16 34 +#define CLK_PP_HADES 35 +#define CLK_CLUST_HADES 36 +#define CLK_HWPE_HADES 37 +#define CLK_FC_HADES 38 + +/* CLOCKGEN D0 */ +#define CLK_PCMR10_MASTER 4 +#define CLK_USB2_PHY 5 + +#endif diff --git a/include/dt-bindings/clock/tegra114-car.h b/include/dt-bindings/clock/tegra114-car.h index fc12621fb432..534c03f8ad72 100644 --- a/include/dt-bindings/clock/tegra114-car.h +++ b/include/dt-bindings/clock/tegra114-car.h @@ -49,7 +49,7 @@ #define TEGRA114_CLK_I2S0 30 /* 31 */ -/* 32 */ +#define TEGRA114_CLK_MC 32 /* 33 */ #define TEGRA114_CLK_APBDMA 34 /* 35 */ diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h index 6bac637fd635..af9bc9a3ddbc 100644 --- a/include/dt-bindings/clock/tegra124-car.h +++ b/include/dt-bindings/clock/tegra124-car.h @@ -48,7 +48,7 @@ #define TEGRA124_CLK_I2S0 30 /* 31 */ -/* 32 */ +#define TEGRA124_CLK_MC 32 /* 33 */ #define TEGRA124_CLK_APBDMA 34 /* 35 */ diff --git a/include/dt-bindings/clock/tegra20-car.h b/include/dt-bindings/clock/tegra20-car.h index 9406207cfac8..04500b243a4d 100644 --- a/include/dt-bindings/clock/tegra20-car.h +++ b/include/dt-bindings/clock/tegra20-car.h @@ -49,7 +49,7 @@ /* 30 */ #define TEGRA20_CLK_CACHE2 31 -#define TEGRA20_CLK_MEM 32 +#define TEGRA20_CLK_MC 32 #define TEGRA20_CLK_AHBDMA 33 #define TEGRA20_CLK_APBDMA 34 /* 35 */ diff --git a/include/dt-bindings/dma/at91.h b/include/dt-bindings/dma/at91.h index e835037a77b4..ab6cbba45401 100644 --- a/include/dt-bindings/dma/at91.h +++ b/include/dt-bindings/dma/at91.h @@ -9,6 +9,8 @@ #ifndef __DT_BINDINGS_AT91_DMA_H__ #define __DT_BINDINGS_AT91_DMA_H__ +/* ---------- HDMAC ---------- */ + /* * Source and/or destination peripheral ID */ @@ -24,4 +26,27 @@ #define AT91_DMA_CFG_FIFOCFG_ALAP (0x1 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* largest defined AHB burst */ #define AT91_DMA_CFG_FIFOCFG_ASAP (0x2 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* single AHB access */ + +/* ---------- XDMAC ---------- */ +#define AT91_XDMAC_DT_MEM_IF_MASK (0x1) +#define AT91_XDMAC_DT_MEM_IF_OFFSET (13) +#define AT91_XDMAC_DT_MEM_IF(mem_if) (((mem_if) & AT91_XDMAC_DT_MEM_IF_MASK) \ + << AT91_XDMAC_DT_MEM_IF_OFFSET) +#define AT91_XDMAC_DT_GET_MEM_IF(cfg) (((cfg) >> AT91_XDMAC_DT_MEM_IF_OFFSET) \ + & AT91_XDMAC_DT_MEM_IF_MASK) + +#define AT91_XDMAC_DT_PER_IF_MASK (0x1) +#define AT91_XDMAC_DT_PER_IF_OFFSET (14) +#define AT91_XDMAC_DT_PER_IF(per_if) (((per_if) & AT91_XDMAC_DT_PER_IF_MASK) \ + << AT91_XDMAC_DT_PER_IF_OFFSET) +#define AT91_XDMAC_DT_GET_PER_IF(cfg) (((cfg) >> AT91_XDMAC_DT_PER_IF_OFFSET) \ + & AT91_XDMAC_DT_PER_IF_MASK) + +#define AT91_XDMAC_DT_PERID_MASK (0x7f) +#define AT91_XDMAC_DT_PERID_OFFSET (24) +#define AT91_XDMAC_DT_PERID(perid) (((perid) & AT91_XDMAC_DT_PERID_MASK) \ + << AT91_XDMAC_DT_PERID_OFFSET) +#define AT91_XDMAC_DT_GET_PERID(cfg) (((cfg) >> AT91_XDMAC_DT_PERID_OFFSET) \ + & AT91_XDMAC_DT_PERID_MASK) + #endif /* __DT_BINDINGS_AT91_DMA_H__ */ diff --git a/include/dt-bindings/interrupt-controller/mips-gic.h b/include/dt-bindings/interrupt-controller/mips-gic.h new file mode 100644 index 000000000000..cf35a577e371 --- /dev/null +++ b/include/dt-bindings/interrupt-controller/mips-gic.h @@ -0,0 +1,9 @@ +#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H +#define _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H + +#include <dt-bindings/interrupt-controller/irq.h> + +#define GIC_SHARED 0 +#define GIC_LOCAL 1 + +#endif diff --git a/include/dt-bindings/memory/tegra114-mc.h b/include/dt-bindings/memory/tegra114-mc.h new file mode 100644 index 000000000000..8f48985a3139 --- /dev/null +++ b/include/dt-bindings/memory/tegra114-mc.h @@ -0,0 +1,25 @@ +#ifndef DT_BINDINGS_MEMORY_TEGRA114_MC_H +#define DT_BINDINGS_MEMORY_TEGRA114_MC_H + +#define TEGRA_SWGROUP_PTC 0 +#define TEGRA_SWGROUP_DC 1 +#define TEGRA_SWGROUP_DCB 2 +#define TEGRA_SWGROUP_EPP 3 +#define TEGRA_SWGROUP_G2 4 +#define TEGRA_SWGROUP_AVPC 5 +#define TEGRA_SWGROUP_NV 6 +#define TEGRA_SWGROUP_HDA 7 +#define TEGRA_SWGROUP_HC 8 +#define TEGRA_SWGROUP_MSENC 9 +#define TEGRA_SWGROUP_PPCS 10 +#define TEGRA_SWGROUP_VDE 11 +#define TEGRA_SWGROUP_MPCORELP 12 +#define TEGRA_SWGROUP_MPCORE 13 +#define TEGRA_SWGROUP_VI 14 +#define TEGRA_SWGROUP_ISP 15 +#define TEGRA_SWGROUP_XUSB_HOST 16 +#define TEGRA_SWGROUP_XUSB_DEV 17 +#define TEGRA_SWGROUP_EMUCIF 18 +#define TEGRA_SWGROUP_TSEC 19 + +#endif diff --git a/include/dt-bindings/memory/tegra124-mc.h b/include/dt-bindings/memory/tegra124-mc.h new file mode 100644 index 000000000000..7d8ee798f34e --- /dev/null +++ b/include/dt-bindings/memory/tegra124-mc.h @@ -0,0 +1,31 @@ +#ifndef DT_BINDINGS_MEMORY_TEGRA124_MC_H +#define DT_BINDINGS_MEMORY_TEGRA124_MC_H + +#define TEGRA_SWGROUP_PTC 0 +#define TEGRA_SWGROUP_DC 1 +#define TEGRA_SWGROUP_DCB 2 +#define TEGRA_SWGROUP_AFI 3 +#define TEGRA_SWGROUP_AVPC 4 +#define TEGRA_SWGROUP_HDA 5 +#define TEGRA_SWGROUP_HC 6 +#define TEGRA_SWGROUP_MSENC 7 +#define TEGRA_SWGROUP_PPCS 8 +#define TEGRA_SWGROUP_SATA 9 +#define TEGRA_SWGROUP_VDE 10 +#define TEGRA_SWGROUP_MPCORELP 11 +#define TEGRA_SWGROUP_MPCORE 12 +#define TEGRA_SWGROUP_ISP2 13 +#define TEGRA_SWGROUP_XUSB_HOST 14 +#define TEGRA_SWGROUP_XUSB_DEV 15 +#define TEGRA_SWGROUP_ISP2B 16 +#define TEGRA_SWGROUP_TSEC 17 +#define TEGRA_SWGROUP_A9AVP 18 +#define TEGRA_SWGROUP_GPU 19 +#define TEGRA_SWGROUP_SDMMC1A 20 +#define TEGRA_SWGROUP_SDMMC2A 21 +#define TEGRA_SWGROUP_SDMMC3A 22 +#define TEGRA_SWGROUP_SDMMC4A 23 +#define TEGRA_SWGROUP_VIC 24 +#define TEGRA_SWGROUP_VI 25 + +#endif diff --git a/include/dt-bindings/memory/tegra30-mc.h b/include/dt-bindings/memory/tegra30-mc.h new file mode 100644 index 000000000000..502beb03d777 --- /dev/null +++ b/include/dt-bindings/memory/tegra30-mc.h @@ -0,0 +1,24 @@ +#ifndef DT_BINDINGS_MEMORY_TEGRA30_MC_H +#define DT_BINDINGS_MEMORY_TEGRA30_MC_H + +#define TEGRA_SWGROUP_PTC 0 +#define TEGRA_SWGROUP_DC 1 +#define TEGRA_SWGROUP_DCB 2 +#define TEGRA_SWGROUP_EPP 3 +#define TEGRA_SWGROUP_G2 4 +#define TEGRA_SWGROUP_MPE 5 +#define TEGRA_SWGROUP_VI 6 +#define TEGRA_SWGROUP_AFI 7 +#define TEGRA_SWGROUP_AVPC 8 +#define TEGRA_SWGROUP_NV 9 +#define TEGRA_SWGROUP_NV2 10 +#define TEGRA_SWGROUP_HDA 11 +#define TEGRA_SWGROUP_HC 12 +#define TEGRA_SWGROUP_PPCS 13 +#define TEGRA_SWGROUP_SATA 14 +#define TEGRA_SWGROUP_VDE 15 +#define TEGRA_SWGROUP_MPCORELP 16 +#define TEGRA_SWGROUP_MPCORE 17 +#define TEGRA_SWGROUP_ISP 18 + +#endif diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h index 3d33794e4f3e..7448edff4723 100644 --- a/include/dt-bindings/pinctrl/dra.h +++ b/include/dt-bindings/pinctrl/dra.h @@ -40,8 +40,8 @@ /* Active pin states */ #define PIN_OUTPUT (0 | PULL_DIS) -#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP) -#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA) +#define PIN_OUTPUT_PULLUP (PULL_UP) +#define PIN_OUTPUT_PULLDOWN (0) #define PIN_INPUT (INPUT_EN | PULL_DIS) #define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL) #define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP) diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h new file mode 100644 index 000000000000..fa74d7cc960c --- /dev/null +++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h @@ -0,0 +1,142 @@ +/* + * This header provides constants for the Qualcomm PMIC GPIO binding. + */ + +#ifndef _DT_BINDINGS_PINCTRL_QCOM_PMIC_GPIO_H +#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_GPIO_H + +#define PMIC_GPIO_PULL_UP_30 0 +#define PMIC_GPIO_PULL_UP_1P5 1 +#define PMIC_GPIO_PULL_UP_31P5 2 +#define PMIC_GPIO_PULL_UP_1P5_30 3 + +#define PMIC_GPIO_STRENGTH_NO 0 +#define PMIC_GPIO_STRENGTH_HIGH 1 +#define PMIC_GPIO_STRENGTH_MED 2 +#define PMIC_GPIO_STRENGTH_LOW 3 + +/* + * Note: PM8018 GPIO3 and GPIO4 are supporting + * only S3 and L2 options (1.8V) + */ +#define PM8018_GPIO_L6 0 +#define PM8018_GPIO_L5 1 +#define PM8018_GPIO_S3 2 +#define PM8018_GPIO_L14 3 +#define PM8018_GPIO_L2 4 +#define PM8018_GPIO_L4 5 +#define PM8018_GPIO_VDD 6 + +/* + * Note: PM8038 GPIO7 and GPIO8 are supporting + * only L11 and L4 options (1.8V) + */ +#define PM8038_GPIO_VPH 0 +#define PM8038_GPIO_BB 1 +#define PM8038_GPIO_L11 2 +#define PM8038_GPIO_L15 3 +#define PM8038_GPIO_L4 4 +#define PM8038_GPIO_L3 5 +#define PM8038_GPIO_L17 6 + +#define PM8058_GPIO_VPH 0 +#define PM8058_GPIO_BB 1 +#define PM8058_GPIO_S3 2 +#define PM8058_GPIO_L3 3 +#define PM8058_GPIO_L7 4 +#define PM8058_GPIO_L6 5 +#define PM8058_GPIO_L5 6 +#define PM8058_GPIO_L2 7 + +#define PM8917_GPIO_VPH 0 +#define PM8917_GPIO_S4 2 +#define PM8917_GPIO_L15 3 +#define PM8917_GPIO_L4 4 +#define PM8917_GPIO_L3 5 +#define PM8917_GPIO_L17 6 + +#define PM8921_GPIO_VPH 0 +#define PM8921_GPIO_BB 1 +#define PM8921_GPIO_S4 2 +#define PM8921_GPIO_L15 3 +#define PM8921_GPIO_L4 4 +#define PM8921_GPIO_L3 5 +#define PM8921_GPIO_L17 6 + +/* + * Note: PM8941 gpios from 15 to 18 are supporting + * only S3 and L6 options (1.8V) + */ +#define PM8941_GPIO_VPH 0 +#define PM8941_GPIO_L1 1 +#define PM8941_GPIO_S3 2 +#define PM8941_GPIO_L6 3 + +/* + * Note: PMA8084 gpios from 15 to 18 are supporting + * only S4 and L6 options (1.8V) + */ +#define PMA8084_GPIO_VPH 0 +#define PMA8084_GPIO_L1 1 +#define PMA8084_GPIO_S4 2 +#define PMA8084_GPIO_L6 3 + +/* To be used with "function" */ +#define PMIC_GPIO_FUNC_NORMAL "normal" +#define PMIC_GPIO_FUNC_PAIRED "paired" +#define PMIC_GPIO_FUNC_FUNC1 "func1" +#define PMIC_GPIO_FUNC_FUNC2 "func2" +#define PMIC_GPIO_FUNC_DTEST1 "dtest1" +#define PMIC_GPIO_FUNC_DTEST2 "dtest2" +#define PMIC_GPIO_FUNC_DTEST3 "dtest3" +#define PMIC_GPIO_FUNC_DTEST4 "dtest4" + +#define PM8038_GPIO1_2_LPG_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO3_5V_BOOST_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO4_SSBI_ALT_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO5_6_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO10_11_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO6_7_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO9_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8038_GPIO6_12_KYPD_DRV PMIC_GPIO_FUNC_FUNC2 + +#define PM8058_GPIO7_8_MP3_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO7_8_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO9_26_KYPD_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO24_26_LPG_DRV PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO33_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO34_35_MP3_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO36_BCLK_19P2MHZ PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO37_UPL_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO37_UART_M_RX PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO38_XO_SLEEP_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO38_39_CLK_32KHZ PMIC_GPIO_FUNC_FUNC2 +#define PM8058_GPIO39_MP3_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8058_GPIO40_EXT_BB_EN PMIC_GPIO_FUNC_FUNC1 + +#define PM8917_GPIO9_18_KEYP_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO20_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO21_23_UART_TX PMIC_GPIO_FUNC_FUNC2 +#define PM8917_GPIO25_26_EXT_REG_EN PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO37_38_XO_SLEEP_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8917_GPIO37_38_MP3_CLK PMIC_GPIO_FUNC_FUNC2 + +#define PM8941_GPIO9_14_KYPD_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO15_18_DIV_CLK PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO15_18_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2 +#define PM8941_GPIO23_26_KYPD_DRV PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO23_26_LPG_DRV_HI PMIC_GPIO_FUNC_FUNC2 +#define PM8941_GPIO31_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO33_36_LPG_DRV_3D PMIC_GPIO_FUNC_FUNC1 +#define PM8941_GPIO33_36_LPG_DRV_HI PMIC_GPIO_FUNC_FUNC2 + +#define PMA8084_GPIO4_5_LPG_DRV PMIC_GPIO_FUNC_FUNC1 +#define PMA8084_GPIO7_10_LPG_DRV PMIC_GPIO_FUNC_FUNC1 +#define PMA8084_GPIO5_14_KEYP_DRV PMIC_GPIO_FUNC_FUNC2 +#define PMA8084_GPIO19_21_KEYP_DRV PMIC_GPIO_FUNC_FUNC2 +#define PMA8084_GPIO15_18_DIV_CLK PMIC_GPIO_FUNC_FUNC1 +#define PMA8084_GPIO15_18_SLEEP_CLK PMIC_GPIO_FUNC_FUNC2 +#define PMA8084_GPIO22_BAT_ALRM_OUT PMIC_GPIO_FUNC_FUNC1 + +#endif diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h new file mode 100644 index 000000000000..d2c7dabe3223 --- /dev/null +++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h @@ -0,0 +1,44 @@ +/* + * This header provides constants for the Qualcomm PMIC's + * Multi-Purpose Pin binding. + */ + +#ifndef _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H +#define _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H + +/* power-source */ +#define PM8841_MPP_VPH 0 +#define PM8841_MPP_S3 2 + +#define PM8941_MPP_VPH 0 +#define PM8941_MPP_L1 1 +#define PM8941_MPP_S3 2 +#define PM8941_MPP_L6 3 + +#define PMA8084_MPP_VPH 0 +#define PMA8084_MPP_L1 1 +#define PMA8084_MPP_S4 2 +#define PMA8084_MPP_L6 3 + +/* + * Analog Input - Set the source for analog input. + * To be used with "qcom,amux-route" property + */ +#define PMIC_MPP_AMUX_ROUTE_CH5 0 +#define PMIC_MPP_AMUX_ROUTE_CH6 1 +#define PMIC_MPP_AMUX_ROUTE_CH7 2 +#define PMIC_MPP_AMUX_ROUTE_CH8 3 +#define PMIC_MPP_AMUX_ROUTE_ABUS1 4 +#define PMIC_MPP_AMUX_ROUTE_ABUS2 5 +#define PMIC_MPP_AMUX_ROUTE_ABUS3 6 +#define PMIC_MPP_AMUX_ROUTE_ABUS4 7 + +/* To be used with "function" */ +#define PMIC_MPP_FUNC_NORMAL "normal" +#define PMIC_MPP_FUNC_PAIRED "paired" +#define PMIC_MPP_FUNC_DTEST1 "dtest1" +#define PMIC_MPP_FUNC_DTEST2 "dtest2" +#define PMIC_MPP_FUNC_DTEST3 "dtest3" +#define PMIC_MPP_FUNC_DTEST4 "dtest4" + +#endif diff --git a/include/dt-bindings/regulator/maxim,max77802.h b/include/dt-bindings/regulator/maxim,max77802.h new file mode 100644 index 000000000000..cf28631d7109 --- /dev/null +++ b/include/dt-bindings/regulator/maxim,max77802.h @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2014 Google, Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Device Tree binding constants for the Maxim 77802 PMIC regulators + */ + +#ifndef _DT_BINDINGS_REGULATOR_MAXIM_MAX77802_H +#define _DT_BINDINGS_REGULATOR_MAXIM_MAX77802_H + +/* Regulator operating modes */ +#define MAX77802_OPMODE_LP 1 +#define MAX77802_OPMODE_NORMAL 3 + +#endif /* _DT_BINDINGS_REGULATOR_MAXIM_MAX77802_H */ diff --git a/include/dt-bindings/reset-controller/stih407-resets.h b/include/dt-bindings/reset-controller/stih407-resets.h new file mode 100644 index 000000000000..02d4328fe479 --- /dev/null +++ b/include/dt-bindings/reset-controller/stih407-resets.h @@ -0,0 +1,61 @@ +/* + * This header provides constants for the reset controller + * based peripheral powerdown requests on the STMicroelectronics + * STiH407 SoC. + */ +#ifndef _DT_BINDINGS_RESET_CONTROLLER_STIH407 +#define _DT_BINDINGS_RESET_CONTROLLER_STIH407 + +/* Powerdown requests control 0 */ +#define STIH407_EMISS_POWERDOWN 0 +#define STIH407_NAND_POWERDOWN 1 + +/* Synp GMAC PowerDown */ +#define STIH407_ETH1_POWERDOWN 2 + +/* Powerdown requests control 1 */ +#define STIH407_USB3_POWERDOWN 3 +#define STIH407_USB2_PORT1_POWERDOWN 4 +#define STIH407_USB2_PORT0_POWERDOWN 5 +#define STIH407_PCIE1_POWERDOWN 6 +#define STIH407_PCIE0_POWERDOWN 7 +#define STIH407_SATA1_POWERDOWN 8 +#define STIH407_SATA0_POWERDOWN 9 + +/* Reset defines */ +#define STIH407_ETH1_SOFTRESET 0 +#define STIH407_MMC1_SOFTRESET 1 +#define STIH407_PICOPHY_SOFTRESET 2 +#define STIH407_IRB_SOFTRESET 3 +#define STIH407_PCIE0_SOFTRESET 4 +#define STIH407_PCIE1_SOFTRESET 5 +#define STIH407_SATA0_SOFTRESET 6 +#define STIH407_SATA1_SOFTRESET 7 +#define STIH407_MIPHY0_SOFTRESET 8 +#define STIH407_MIPHY1_SOFTRESET 9 +#define STIH407_MIPHY2_SOFTRESET 10 +#define STIH407_SATA0_PWR_SOFTRESET 11 +#define STIH407_SATA1_PWR_SOFTRESET 12 +#define STIH407_DELTA_SOFTRESET 13 +#define STIH407_BLITTER_SOFTRESET 14 +#define STIH407_HDTVOUT_SOFTRESET 15 +#define STIH407_HDQVDP_SOFTRESET 16 +#define STIH407_VDP_AUX_SOFTRESET 17 +#define STIH407_COMPO_SOFTRESET 18 +#define STIH407_HDMI_TX_PHY_SOFTRESET 19 +#define STIH407_JPEG_DEC_SOFTRESET 20 +#define STIH407_VP8_DEC_SOFTRESET 21 +#define STIH407_GPU_SOFTRESET 22 +#define STIH407_HVA_SOFTRESET 23 +#define STIH407_ERAM_HVA_SOFTRESET 24 +#define STIH407_LPM_SOFTRESET 25 +#define STIH407_KEYSCAN_SOFTRESET 26 +#define STIH407_USB2_PORT0_SOFTRESET 27 +#define STIH407_USB2_PORT1_SOFTRESET 28 + +/* Picophy reset defines */ +#define STIH407_PICOPHY0_RESET 0 +#define STIH407_PICOPHY1_RESET 1 +#define STIH407_PICOPHY2_RESET 2 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_STIH407 */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 407a12f663eb..6bff83b1f298 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -28,6 +28,7 @@ #include <linux/errno.h> #include <linux/ioport.h> /* for struct resource */ #include <linux/device.h> +#include <linux/property.h> #ifndef _LINUX #define _LINUX @@ -123,6 +124,10 @@ int acpi_numa_init (void); int acpi_table_init (void); int acpi_table_parse(char *id, acpi_tbl_table_handler handler); +int __init acpi_parse_entries(char *id, unsigned long table_size, + acpi_tbl_entry_handler handler, + struct acpi_table_header *table_header, + int entry_id, unsigned int max_entries); int __init acpi_table_parse_entries(char *id, unsigned long table_size, int entry_id, acpi_tbl_entry_handler handler, @@ -423,14 +428,11 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, const struct device *dev); -static inline bool acpi_driver_match_device(struct device *dev, - const struct device_driver *drv) -{ - return !!acpi_match_device(drv->acpi_match_table, dev); -} - +extern bool acpi_driver_match_device(struct device *dev, + const struct device_driver *drv); int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); int acpi_device_modalias(struct device *, char *, int); +void acpi_walk_dep_device_list(acpi_handle handle); struct platform_device *acpi_create_platform_device(struct acpi_device *); #define ACPI_PTR(_ptr) (_ptr) @@ -443,6 +445,23 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *); #define ACPI_COMPANION_SET(dev, adev) do { } while (0) #define ACPI_HANDLE(dev) (NULL) +struct fwnode_handle; + +static inline bool is_acpi_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode) +{ + return NULL; +} + +static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) +{ + return NULL; +} + static inline const char *acpi_dev_name(struct acpi_device *adev) { return NULL; @@ -553,16 +572,26 @@ static inline void arch_reserve_mem_area(acpi_physical_address addr, #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) #endif -#if defined(CONFIG_ACPI) && defined(CONFIG_PM_RUNTIME) +#if defined(CONFIG_ACPI) && defined(CONFIG_PM) int acpi_dev_runtime_suspend(struct device *dev); int acpi_dev_runtime_resume(struct device *dev); int acpi_subsys_runtime_suspend(struct device *dev); int acpi_subsys_runtime_resume(struct device *dev); +struct acpi_device *acpi_dev_pm_get_node(struct device *dev); +int acpi_dev_pm_attach(struct device *dev, bool power_on); #else static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } +static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) +{ + return NULL; +} +static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) +{ + return -ENODEV; +} #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) @@ -585,20 +614,6 @@ static inline int acpi_subsys_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_freeze(struct device *dev) { return 0; } #endif -#if defined(CONFIG_ACPI) && defined(CONFIG_PM) -struct acpi_device *acpi_dev_pm_get_node(struct device *dev); -int acpi_dev_pm_attach(struct device *dev, bool power_on); -#else -static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev) -{ - return NULL; -} -static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) -{ - return -ENODEV; -} -#endif - #ifdef CONFIG_ACPI __printf(3, 4) void acpi_handle_printk(const char *level, acpi_handle handle, @@ -659,4 +674,114 @@ do { \ #endif #endif +struct acpi_gpio_params { + unsigned int crs_entry_index; + unsigned int line_index; + bool active_low; +}; + +struct acpi_gpio_mapping { + const char *name; + const struct acpi_gpio_params *data; + unsigned int size; +}; + +#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) +int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios); + +static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) +{ + if (adev) + adev->driver_gpios = NULL; +} +#else +static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios) +{ + return -ENXIO; +} +static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} +#endif + +/* Device properties */ + +#define MAX_ACPI_REFERENCE_ARGS 8 +struct acpi_reference_args { + struct acpi_device *adev; + size_t nargs; + u64 args[MAX_ACPI_REFERENCE_ARGS]; +}; + +#ifdef CONFIG_ACPI +int acpi_dev_get_property(struct acpi_device *adev, const char *name, + acpi_object_type type, const union acpi_object **obj); +int acpi_dev_get_property_array(struct acpi_device *adev, const char *name, + acpi_object_type type, + const union acpi_object **obj); +int acpi_dev_get_property_reference(struct acpi_device *adev, + const char *name, size_t index, + struct acpi_reference_args *args); + +int acpi_dev_prop_get(struct acpi_device *adev, const char *propname, + void **valptr); +int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname, + enum dev_prop_type proptype, void *val); +int acpi_dev_prop_read(struct acpi_device *adev, const char *propname, + enum dev_prop_type proptype, void *val, size_t nval); + +struct acpi_device *acpi_get_next_child(struct device *dev, + struct acpi_device *child); +#else +static inline int acpi_dev_get_property(struct acpi_device *adev, + const char *name, acpi_object_type type, + const union acpi_object **obj) +{ + return -ENXIO; +} +static inline int acpi_dev_get_property_array(struct acpi_device *adev, + const char *name, + acpi_object_type type, + const union acpi_object **obj) +{ + return -ENXIO; +} +static inline int acpi_dev_get_property_reference(struct acpi_device *adev, + const char *name, const char *cells_name, + size_t index, struct acpi_reference_args *args) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_get(struct acpi_device *adev, + const char *propname, + void **valptr) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_read_single(struct acpi_device *adev, + const char *propname, + enum dev_prop_type proptype, + void *val) +{ + return -ENXIO; +} + +static inline int acpi_dev_prop_read(struct acpi_device *adev, + const char *propname, + enum dev_prop_type proptype, + void *val, size_t nval) +{ + return -ENXIO; +} + +static inline struct acpi_device *acpi_get_next_child(struct device *dev, + struct acpi_device *child) +{ + return NULL; +} + +#endif + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index c324f5700d1a..ac02f9bd63dc 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -97,6 +97,16 @@ void amba_release_regions(struct amba_device *); #define amba_pclk_disable(d) \ do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) +static inline int amba_pclk_prepare(struct amba_device *dev) +{ + return clk_prepare(dev->pclk); +} + +static inline void amba_pclk_unprepare(struct amba_device *dev) +{ + clk_unprepare(dev->pclk); +} + /* Some drivers don't use the struct amba_device */ #define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) #define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h index a495a959e8a7..33eb274cd0e6 100644 --- a/include/linux/ath9k_platform.h +++ b/include/linux/ath9k_platform.h @@ -31,8 +31,11 @@ struct ath9k_platform_data { u32 gpio_mask; u32 gpio_val; + bool endian_check; bool is_clk_25mhz; bool tx_gain_buffalo; + bool disable_2ghz; + bool disable_5ghz; int (*get_mac_revision)(void); int (*external_reset)(void); diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 91b77f8d495d..9177947bf032 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h @@ -11,6 +11,7 @@ * @detect_pin: GPIO pin wired to the card detect switch * @wp_pin: GPIO pin wired to the write protect sensor * @detect_is_active_high: The state of the detect pin when it is active + * @non_removable: The slot is not removable, only detect once * * If a given slot is not present on the board, @bus_width should be * set to 0. The other fields are ignored in this case. @@ -26,6 +27,7 @@ struct mci_slot_pdata { int detect_pin; int wp_pin; bool detect_is_active_high; + bool non_removable; }; /** diff --git a/include/linux/audit.h b/include/linux/audit.h index e58fe7df8b9c..0c04917c2f12 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -130,6 +130,7 @@ extern void audit_putname(struct filename *name); #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ extern void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags); +extern void __audit_file(const struct file *); extern void __audit_inode_child(const struct inode *parent, const struct dentry *dentry, const unsigned char type); @@ -183,6 +184,11 @@ static inline void audit_inode(struct filename *name, __audit_inode(name, dentry, flags); } } +static inline void audit_file(struct file *file) +{ + if (unlikely(!audit_dummy_context())) + __audit_file(file); +} static inline void audit_inode_parent_hidden(struct filename *name, const struct dentry *dentry) { @@ -357,6 +363,9 @@ static inline void audit_inode(struct filename *name, const struct dentry *dentry, unsigned int parent) { } +static inline void audit_file(struct file *file) +{ +} static inline void audit_inode_parent_hidden(struct filename *name, const struct dentry *dentry) { } diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 729f48e6b20b..eb1c6a47b67f 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -447,4 +447,6 @@ extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset); #define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ extern u32 bcma_core_dma_translation(struct bcma_device *core); +extern unsigned int bcma_core_irq(struct bcma_device *core, int num); + #endif /* LINUX_BCMA_H_ */ diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h index fb61f3fb4ddb..0b3b32aeeb8a 100644 --- a/include/linux/bcma/bcma_driver_mips.h +++ b/include/linux/bcma/bcma_driver_mips.h @@ -43,12 +43,12 @@ struct bcma_drv_mips { extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); -extern unsigned int bcma_core_irq(struct bcma_device *core); +extern unsigned int bcma_core_mips_irq(struct bcma_device *dev); #else static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { } -static inline unsigned int bcma_core_irq(struct bcma_device *core) +static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev) { return 0; } diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 61f29e5ea840..576e4639ca60 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -53,6 +53,10 @@ struct linux_binprm { #define BINPRM_FLAGS_EXECFD_BIT 1 #define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) +/* filename of the binary will be inaccessible after exec */ +#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2 +#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT) + /* Function parameter for binfmt->coredump */ struct coredump_params { const siginfo_t *siginfo; diff --git a/include/linux/bio.h b/include/linux/bio.h index 7347f486ceca..efead0b532c4 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -443,6 +443,11 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); +void generic_start_io_acct(int rw, unsigned long sectors, + struct hd_struct *part); +void generic_end_io_acct(int rw, struct hd_struct *part, + unsigned long start_time); + #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" #endif diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index e1c8d080c427..34e020c23644 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -45,6 +45,7 @@ * bitmap_set(dst, pos, nbits) Set specified bit area * bitmap_clear(dst, pos, nbits) Clear specified bit area * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area + * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) @@ -114,11 +115,36 @@ extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); extern void bitmap_set(unsigned long *map, unsigned int start, int len); extern void bitmap_clear(unsigned long *map, unsigned int start, int len); -extern unsigned long bitmap_find_next_zero_area(unsigned long *map, - unsigned long size, - unsigned long start, - unsigned int nr, - unsigned long align_mask); + +extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask, + unsigned long align_offset); + +/** + * bitmap_find_next_zero_area - find a contiguous aligned zero area + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @align_mask: Alignment mask for zero area + * + * The @align_mask should be one less than a power of 2; the effect is that + * the bit offset of all zero areas this function finds is multiples of that + * power of 2. A @align_mask of 0 means no alignment is required. + */ +static inline unsigned long +bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr, + unsigned long align_mask) +{ + return bitmap_find_next_zero_area_off(map, size, start, nr, + align_mask, 0); +} extern int bitmap_scnprintf(char *buf, unsigned int len, const unsigned long *src, int nbits); diff --git a/include/linux/bitops.h b/include/linux/bitops.h index be5fd38bd5a0..5d858e02997f 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -18,8 +18,11 @@ * position @h. For example * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ -#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) -#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) + +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index c9be1589415a..8aded9ab2e4e 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -79,7 +79,13 @@ struct blk_mq_tag_set { struct list_head tag_list; }; -typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *, bool); +struct blk_mq_queue_data { + struct request *rq; + struct list_head *list; + bool last; +}; + +typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); @@ -140,6 +146,7 @@ enum { BLK_MQ_F_TAG_SHARED = 1 << 1, BLK_MQ_F_SG_MERGE = 1 << 2, BLK_MQ_F_SYSFS_UP = 1 << 3, + BLK_MQ_F_DEFER_ISSUE = 1 << 4, BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, @@ -162,11 +169,29 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); void blk_mq_insert_request(struct request *, bool, bool, bool); void blk_mq_run_queues(struct request_queue *q, bool async); void blk_mq_free_request(struct request *rq); +void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); +enum { + BLK_MQ_UNIQUE_TAG_BITS = 16, + BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1, +}; + +u32 blk_mq_unique_tag(struct request *rq); + +static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag) +{ + return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS; +} + +static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) +{ + return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; +} + struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aac0f9ea952a..92f4b4b288dd 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -398,7 +398,7 @@ struct request_queue { */ struct kobject mq_kobj; -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM struct device *dev; int rpm_status; unsigned int nr_pending; @@ -1057,7 +1057,7 @@ extern void blk_put_queue(struct request_queue *); /* * block layer runtime pm functions */ -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); extern int blk_pre_runtime_suspend(struct request_queue *q); extern void blk_post_runtime_suspend(struct request_queue *q, int err); @@ -1136,7 +1136,6 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) /* * tag stuff */ -#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) extern int blk_queue_start_tag(struct request_queue *, struct request *); extern struct request *blk_queue_find_tag(struct request_queue *, int); extern void blk_queue_end_tag(struct request_queue *, struct request *); @@ -1185,7 +1184,6 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); enum blk_default_limits { BLK_MAX_SEGMENTS = 128, BLK_SAFE_MAX_SECTORS = 255, - BLK_DEF_MAX_SECTORS = 1024, BLK_MAX_SEGMENT_SIZE = 65536, BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, }; diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 4e2bd4c95b66..0995c2de8162 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -46,6 +46,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat, extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); extern unsigned long free_all_bootmem(void); +extern void reset_node_managed_pages(pg_data_t *pgdat); extern void reset_all_zones_managed_pages(void); extern void free_bootmem_node(pg_data_t *pgdat, diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3cf91754a957..bbfceb756452 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -22,7 +22,7 @@ struct bpf_map_ops { /* funcs callable from userspace and from eBPF programs */ void *(*map_lookup_elem)(struct bpf_map *map, void *key); - int (*map_update_elem)(struct bpf_map *map, void *key, void *value); + int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); int (*map_delete_elem)(struct bpf_map *map, void *key); }; @@ -128,9 +128,18 @@ struct bpf_prog_aux { struct work_struct work; }; +#ifdef CONFIG_BPF_SYSCALL void bpf_prog_put(struct bpf_prog *prog); +#else +static inline void bpf_prog_put(struct bpf_prog *prog) {} +#endif struct bpf_prog *bpf_prog_get(u32 ufd); /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog *fp, union bpf_attr *attr); +/* verifier prototypes for helper functions called from eBPF programs */ +extern struct bpf_func_proto bpf_map_lookup_elem_proto; +extern struct bpf_func_proto bpf_map_update_elem_proto; +extern struct bpf_func_proto bpf_map_delete_elem_proto; + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 6992afc6ba7f..c05ff0f9f9a5 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -99,6 +99,12 @@ inval_skb: return 1; } +static inline bool can_is_canfd_skb(const struct sk_buff *skb) +{ + /* the CAN specific type of skb is identified by its data length */ + return skb->len == CANFD_MTU; +} + /* get data length from can_dlc with sanitized can_dlc */ u8 can_dlc2len(u8 can_dlc); @@ -121,6 +127,9 @@ void unregister_candev(struct net_device *dev); int can_restart_now(struct net_device *dev); void can_bus_off(struct net_device *dev); +void can_change_state(struct net_device *dev, struct can_frame *cf, + enum can_state tx_state, enum can_state rx_state); + void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, unsigned int idx); unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 1d5196889048..da0dae0600e6 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -113,6 +113,19 @@ static inline void css_get(struct cgroup_subsys_state *css) } /** + * css_get_many - obtain references on the specified css + * @css: target css + * @n: number of references to get + * + * The caller must already have a reference. + */ +static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_get_many(&css->refcnt, n); +} + +/** * css_tryget - try to obtain a reference on the specified css * @css: target css * @@ -159,6 +172,19 @@ static inline void css_put(struct cgroup_subsys_state *css) percpu_ref_put(&css->refcnt); } +/** + * css_put_many - put css references + * @css: target css + * @n: number of references to put + * + * Put references obtained via css_get() and css_tryget_online(). + */ +static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_put_many(&css->refcnt, n); +} + /* bits in struct cgroup flags field */ enum { /* Control Group requires release notifications to userspace */ @@ -367,8 +393,8 @@ struct css_set { * struct cftype: handler definitions for cgroup control files * * When reading/writing to a file: - * - the cgroup to use is file->f_dentry->d_parent->d_fsdata - * - the 'cftype' of the file is file->f_dentry->d_fsdata + * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata + * - the 'cftype' of the file is file->f_path.dentry->d_fsdata */ /* cftype->flags */ @@ -612,8 +638,10 @@ struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); int (*css_online)(struct cgroup_subsys_state *css); void (*css_offline)(struct cgroup_subsys_state *css); + void (*css_released)(struct cgroup_subsys_state *css); void (*css_free)(struct cgroup_subsys_state *css); void (*css_reset)(struct cgroup_subsys_state *css); + void (*css_e_css_changed)(struct cgroup_subsys_state *css); int (*can_attach)(struct cgroup_subsys_state *css, struct cgroup_taskset *tset); @@ -908,6 +936,8 @@ void css_task_iter_end(struct css_task_iter *it); int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); +struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, + struct cgroup_subsys *ss); struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, struct cgroup_subsys *ss); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index be21af149f11..2839c639f092 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -352,7 +352,6 @@ struct clk_divider { #define CLK_DIVIDER_READ_ONLY BIT(5) extern const struct clk_ops clk_divider_ops; -extern const struct clk_ops clk_divider_ro_ops; struct clk *clk_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index f75acbf70e96..74e5341463c9 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -254,13 +254,26 @@ extern const struct clk_ops ti_clk_mux_ops; void omap2_init_clk_hw_omap_clocks(struct clk *clk); int omap3_noncore_dpll_enable(struct clk_hw *hw); void omap3_noncore_dpll_disable(struct clk_hw *hw); +int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index); int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate); +int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate, + u8 index); +long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *best_parent_rate, + struct clk **best_parent_clk); unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, unsigned long parent_rate); long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, unsigned long target_rate, unsigned long *parent_rate); +long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *best_parent_rate, + struct clk **best_parent_clk); u8 omap2_init_dpll_parent(struct clk_hw *hw); unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate); long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, @@ -278,6 +291,8 @@ int omap2_clk_disable_autoidle_all(void); void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks); int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate, unsigned long parent_rate); +int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate, u8 index); int omap2_dflt_clk_enable(struct clk_hw *hw); void omap2_dflt_clk_disable(struct clk_hw *hw); int omap2_dflt_clk_is_enabled(struct clk_hw *hw); diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 60bdf8dc02a3..3238ffa33f68 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -33,10 +33,11 @@ extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, enum migrate_mode mode, int *contended, - struct zone **candidate_zone); + int alloc_flags, int classzone_idx); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); -extern unsigned long compaction_suitable(struct zone *zone, int order); +extern unsigned long compaction_suitable(struct zone *zone, int order, + int alloc_flags, int classzone_idx); /* Do not skip compaction more than 64 times */ #define COMPACT_MAX_DEFER_SHIFT 6 @@ -103,7 +104,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, enum migrate_mode mode, int *contended, - struct zone **candidate_zone) + int alloc_flags, int classzone_idx) { return COMPACT_CONTINUE; } @@ -116,7 +117,8 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) { } -static inline unsigned long compaction_suitable(struct zone *zone, int order) +static inline unsigned long compaction_suitable(struct zone *zone, int order, + int alloc_flags, int classzone_idx) { return COMPACT_SKIPPED; } diff --git a/include/linux/compat.h b/include/linux/compat.h index e6494261eaff..7450ca2ac1fc 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -357,6 +357,9 @@ asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int); asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, const compat_uptr_t __user *envp); +asmlinkage long compat_sys_execveat(int dfd, const char __user *filename, + const compat_uptr_t __user *argv, + const compat_uptr_t __user *envp, int flags); asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, compat_ulong_t __user *outp, compat_ulong_t __user *exp, diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 503b085b7832..4d078cebafd2 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -217,26 +217,26 @@ __ATTR(_name, 0644, show_##_name, store_##_name) struct cpufreq_driver { - char name[CPUFREQ_NAME_LEN]; - u8 flags; - void *driver_data; + char name[CPUFREQ_NAME_LEN]; + u8 flags; + void *driver_data; /* needed by all drivers */ - int (*init) (struct cpufreq_policy *policy); - int (*verify) (struct cpufreq_policy *policy); + int (*init)(struct cpufreq_policy *policy); + int (*verify)(struct cpufreq_policy *policy); /* define one out of two */ - int (*setpolicy) (struct cpufreq_policy *policy); + int (*setpolicy)(struct cpufreq_policy *policy); /* * On failure, should always restore frequency to policy->restore_freq * (i.e. old freq). */ - int (*target) (struct cpufreq_policy *policy, /* Deprecated */ - unsigned int target_freq, - unsigned int relation); - int (*target_index) (struct cpufreq_policy *policy, - unsigned int index); + int (*target)(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation); /* Deprecated */ + int (*target_index)(struct cpufreq_policy *policy, + unsigned int index); /* * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION * unset. @@ -252,27 +252,31 @@ struct cpufreq_driver { * wish to switch to intermediate frequency for some target frequency. * In that case core will directly call ->target_index(). */ - unsigned int (*get_intermediate)(struct cpufreq_policy *policy, - unsigned int index); - int (*target_intermediate)(struct cpufreq_policy *policy, - unsigned int index); + unsigned int (*get_intermediate)(struct cpufreq_policy *policy, + unsigned int index); + int (*target_intermediate)(struct cpufreq_policy *policy, + unsigned int index); /* should be defined, if possible */ - unsigned int (*get) (unsigned int cpu); + unsigned int (*get)(unsigned int cpu); /* optional */ - int (*bios_limit) (int cpu, unsigned int *limit); + int (*bios_limit)(int cpu, unsigned int *limit); + + int (*exit)(struct cpufreq_policy *policy); + void (*stop_cpu)(struct cpufreq_policy *policy); + int (*suspend)(struct cpufreq_policy *policy); + int (*resume)(struct cpufreq_policy *policy); + + /* Will be called after the driver is fully initialized */ + void (*ready)(struct cpufreq_policy *policy); - int (*exit) (struct cpufreq_policy *policy); - void (*stop_cpu) (struct cpufreq_policy *policy); - int (*suspend) (struct cpufreq_policy *policy); - int (*resume) (struct cpufreq_policy *policy); - struct freq_attr **attr; + struct freq_attr **attr; /* platform specific boost support code */ - bool boost_supported; - bool boost_enabled; - int (*set_boost) (int state); + bool boost_supported; + bool boost_enabled; + int (*set_boost)(int state); }; /* flags */ diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 25e0df6155a4..a07e087f54b2 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -53,7 +53,7 @@ struct cpuidle_state { }; /* Idle State Flags */ -#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ +#define CPUIDLE_FLAG_TIME_INVALID (0x01) /* is residency time measurable? */ #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ #define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ @@ -90,7 +90,7 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); * cpuidle_get_last_residency - retrieves the last state's residency time * @dev: the target CPU * - * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_VALID isn't set + * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set */ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) { diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 2f073db7392e..1b357997cac5 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -48,29 +48,16 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p); void cpuset_init_current_mems_allowed(void); int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); -extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); -extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); +extern int __cpuset_node_allowed(int node, gfp_t gfp_mask); -static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) +static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) { - return nr_cpusets() <= 1 || - __cpuset_node_allowed_softwall(node, gfp_mask); + return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask); } -static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) +static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { - return nr_cpusets() <= 1 || - __cpuset_node_allowed_hardwall(node, gfp_mask); -} - -static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) -{ - return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); -} - -static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) -{ - return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); + return cpuset_node_allowed(zone_to_nid(z), gfp_mask); } extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, @@ -179,22 +166,12 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) return 1; } -static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) -{ - return 1; -} - -static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) -{ - return 1; -} - -static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) +static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) { return 1; } -static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) +static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { return 1; } diff --git a/include/linux/crypto.h b/include/linux/crypto.h index d45e949699ea..9c8776d0ada8 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -26,6 +26,19 @@ #include <linux/uaccess.h> /* + * Autoloaded crypto modules should only use a prefixed name to avoid allowing + * arbitrary modules to be loaded. Loading from userspace may still need the + * unprefixed names, so retains those aliases as well. + * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 + * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro + * expands twice on the same line. Instead, use a separate base name for the + * alias. + */ +#define MODULE_ALIAS_CRYPTO(name) \ + __MODULE_INFO(alias, alias_userspace, name); \ + __MODULE_INFO(alias, alias_crypto, "crypto-" name) + +/* * Algorithm masks and types. */ #define CRYPTO_ALG_TYPE_MASK 0x0000000f @@ -127,6 +140,13 @@ struct skcipher_givcrypt_request; typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); +/** + * DOC: Block Cipher Context Data Structures + * + * These data structures define the operating context for each block cipher + * type. + */ + struct crypto_async_request { struct list_head list; crypto_completion_t complete; @@ -194,9 +214,63 @@ struct hash_desc { u32 flags; }; -/* - * Algorithms: modular crypto algorithm implementations, managed - * via crypto_register_alg() and crypto_unregister_alg(). +/** + * DOC: Block Cipher Algorithm Definitions + * + * These data structures define modular crypto algorithm implementations, + * managed via crypto_register_alg() and crypto_unregister_alg(). + */ + +/** + * struct ablkcipher_alg - asynchronous block cipher definition + * @min_keysize: Minimum key size supported by the transformation. This is the + * smallest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MIN_KEY_SIZE" include/crypto/ + * @max_keysize: Maximum key size supported by the transformation. This is the + * largest key length supported by this transformation algorithm. + * This must be set to one of the pre-defined values as this is + * not hardware specific. Possible values for this field can be + * found via git grep "_MAX_KEY_SIZE" include/crypto/ + * @setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function can + * be called multiple times during the existence of the transformation + * object, so one must make sure the key is properly reprogrammed into + * the hardware. This function is also responsible for checking the key + * length for validity. In case a software fallback was put in place in + * the @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. + * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt + * the supplied scatterlist containing the blocks of data. The crypto + * API consumer is responsible for aligning the entries of the + * scatterlist properly and making sure the chunks are correctly + * sized. In case a software fallback was put in place in the + * @cra_init call, this function might need to use the fallback if + * the algorithm doesn't support all of the key sizes. In case the + * key was stored in transformation context, the key might need to be + * re-programmed into the hardware in this function. This function + * shall not modify the transformation context, as this function may + * be called in parallel with the same transformation object. + * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt + * and the conditions are exactly the same. + * @givencrypt: Update the IV for encryption. With this function, a cipher + * implementation may provide the function on how to update the IV + * for encryption. + * @givdecrypt: Update the IV for decryption. This is the reverse of + * @givencrypt . + * @geniv: The transformation implementation may use an "IV generator" provided + * by the kernel crypto API. Several use cases have a predefined + * approach how IVs are to be updated. For such use cases, the kernel + * crypto API provides ready-to-use implementations that can be + * referenced with this variable. + * @ivsize: IV size applicable for transformation. The consumer must provide an + * IV of exactly that size to perform the encrypt or decrypt operation. + * + * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are + * mandatory and must be filled. */ struct ablkcipher_alg { int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, @@ -213,6 +287,32 @@ struct ablkcipher_alg { unsigned int ivsize; }; +/** + * struct aead_alg - AEAD cipher definition + * @maxauthsize: Set the maximum authentication tag size supported by the + * transformation. A transformation may support smaller tag sizes. + * As the authentication tag is a message digest to ensure the + * integrity of the encrypted data, a consumer typically wants the + * largest authentication tag possible as defined by this + * variable. + * @setauthsize: Set authentication size for the AEAD transformation. This + * function is used to specify the consumer requested size of the + * authentication tag to be either generated by the transformation + * during encryption or the size of the authentication tag to be + * supplied during the decryption operation. This function is also + * responsible for checking the authentication tag size for + * validity. + * @setkey: see struct ablkcipher_alg + * @encrypt: see struct ablkcipher_alg + * @decrypt: see struct ablkcipher_alg + * @givencrypt: see struct ablkcipher_alg + * @givdecrypt: see struct ablkcipher_alg + * @geniv: see struct ablkcipher_alg + * @ivsize: see struct ablkcipher_alg + * + * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are + * mandatory and must be filled. + */ struct aead_alg { int (*setkey)(struct crypto_aead *tfm, const u8 *key, unsigned int keylen); @@ -228,6 +328,18 @@ struct aead_alg { unsigned int maxauthsize; }; +/** + * struct blkcipher_alg - synchronous block cipher definition + * @min_keysize: see struct ablkcipher_alg + * @max_keysize: see struct ablkcipher_alg + * @setkey: see struct ablkcipher_alg + * @encrypt: see struct ablkcipher_alg + * @decrypt: see struct ablkcipher_alg + * @geniv: see struct ablkcipher_alg + * @ivsize: see struct ablkcipher_alg + * + * All fields except @geniv and @ivsize are mandatory and must be filled. + */ struct blkcipher_alg { int (*setkey)(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen); @@ -245,6 +357,53 @@ struct blkcipher_alg { unsigned int ivsize; }; +/** + * struct cipher_alg - single-block symmetric ciphers definition + * @cia_min_keysize: Minimum key size supported by the transformation. This is + * the smallest key length supported by this transformation + * algorithm. This must be set to one of the pre-defined + * values as this is not hardware specific. Possible values + * for this field can be found via git grep "_MIN_KEY_SIZE" + * include/crypto/ + * @cia_max_keysize: Maximum key size supported by the transformation. This is + * the largest key length supported by this transformation + * algorithm. This must be set to one of the pre-defined values + * as this is not hardware specific. Possible values for this + * field can be found via git grep "_MAX_KEY_SIZE" + * include/crypto/ + * @cia_setkey: Set key for the transformation. This function is used to either + * program a supplied key into the hardware or store the key in the + * transformation context for programming it later. Note that this + * function does modify the transformation context. This function + * can be called multiple times during the existence of the + * transformation object, so one must make sure the key is properly + * reprogrammed into the hardware. This function is also + * responsible for checking the key length for validity. + * @cia_encrypt: Encrypt a single block. This function is used to encrypt a + * single block of data, which must be @cra_blocksize big. This + * always operates on a full @cra_blocksize and it is not possible + * to encrypt a block of smaller size. The supplied buffers must + * therefore also be at least of @cra_blocksize size. Both the + * input and output buffers are always aligned to @cra_alignmask. + * In case either of the input or output buffer supplied by user + * of the crypto API is not aligned to @cra_alignmask, the crypto + * API will re-align the buffers. The re-alignment means that a + * new buffer will be allocated, the data will be copied into the + * new buffer, then the processing will happen on the new buffer, + * then the data will be copied back into the original buffer and + * finally the new buffer will be freed. In case a software + * fallback was put in place in the @cra_init call, this function + * might need to use the fallback if the algorithm doesn't support + * all of the key sizes. In case the key was stored in + * transformation context, the key might need to be re-programmed + * into the hardware in this function. This function shall not + * modify the transformation context, as this function may be + * called in parallel with the same transformation object. + * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to + * @cia_encrypt, and the conditions are exactly the same. + * + * All fields are mandatory and must be filled. + */ struct cipher_alg { unsigned int cia_min_keysize; unsigned int cia_max_keysize; @@ -261,6 +420,25 @@ struct compress_alg { unsigned int slen, u8 *dst, unsigned int *dlen); }; +/** + * struct rng_alg - random number generator definition + * @rng_make_random: The function defined by this variable obtains a random + * number. The random number generator transform must generate + * the random number out of the context provided with this + * call. + * @rng_reset: Reset of the random number generator by clearing the entire state. + * With the invocation of this function call, the random number + * generator shall completely reinitialize its state. If the random + * number generator requires a seed for setting up a new state, + * the seed must be provided by the consumer while invoking this + * function. The required size of the seed is defined with + * @seedsize . + * @seedsize: The seed size required for a random number generator + * initialization defined with this variable. Some random number + * generators like the SP800-90A DRBG does not require a seed as the + * seeding is implemented internally without the need of support by + * the consumer. In this case, the seed size is set to zero. + */ struct rng_alg { int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, unsigned int dlen); @@ -277,6 +455,81 @@ struct rng_alg { #define cra_compress cra_u.compress #define cra_rng cra_u.rng +/** + * struct crypto_alg - definition of a cryptograpic cipher algorithm + * @cra_flags: Flags describing this transformation. See include/linux/crypto.h + * CRYPTO_ALG_* flags for the flags which go in here. Those are + * used for fine-tuning the description of the transformation + * algorithm. + * @cra_blocksize: Minimum block size of this transformation. The size in bytes + * of the smallest possible unit which can be transformed with + * this algorithm. The users must respect this value. + * In case of HASH transformation, it is possible for a smaller + * block than @cra_blocksize to be passed to the crypto API for + * transformation, in case of any other transformation type, an + * error will be returned upon any attempt to transform smaller + * than @cra_blocksize chunks. + * @cra_ctxsize: Size of the operational context of the transformation. This + * value informs the kernel crypto API about the memory size + * needed to be allocated for the transformation context. + * @cra_alignmask: Alignment mask for the input and output data buffer. The data + * buffer containing the input data for the algorithm must be + * aligned to this alignment mask. The data buffer for the + * output data must be aligned to this alignment mask. Note that + * the Crypto API will do the re-alignment in software, but + * only under special conditions and there is a performance hit. + * The re-alignment happens at these occasions for different + * @cra_u types: cipher -- For both input data and output data + * buffer; ahash -- For output hash destination buf; shash -- + * For output hash destination buf. + * This is needed on hardware which is flawed by design and + * cannot pick data from arbitrary addresses. + * @cra_priority: Priority of this transformation implementation. In case + * multiple transformations with same @cra_name are available to + * the Crypto API, the kernel will use the one with highest + * @cra_priority. + * @cra_name: Generic name (usable by multiple implementations) of the + * transformation algorithm. This is the name of the transformation + * itself. This field is used by the kernel when looking up the + * providers of particular transformation. + * @cra_driver_name: Unique name of the transformation provider. This is the + * name of the provider of the transformation. This can be any + * arbitrary value, but in the usual case, this contains the + * name of the chip or provider and the name of the + * transformation algorithm. + * @cra_type: Type of the cryptographic transformation. This is a pointer to + * struct crypto_type, which implements callbacks common for all + * trasnformation types. There are multiple options: + * &crypto_blkcipher_type, &crypto_ablkcipher_type, + * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type. + * This field might be empty. In that case, there are no common + * callbacks. This is the case for: cipher, compress, shash. + * @cra_u: Callbacks implementing the transformation. This is a union of + * multiple structures. Depending on the type of transformation selected + * by @cra_type and @cra_flags above, the associated structure must be + * filled with callbacks. This field might be empty. This is the case + * for ahash, shash. + * @cra_init: Initialize the cryptographic transformation object. This function + * is used to initialize the cryptographic transformation object. + * This function is called only once at the instantiation time, right + * after the transformation context was allocated. In case the + * cryptographic hardware has some special requirements which need to + * be handled by software, this function shall check for the precise + * requirement of the transformation and put any software fallbacks + * in place. + * @cra_exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @cra_init, used to remove various changes set in + * @cra_init. + * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE + * @cra_list: internally used + * @cra_users: internally used + * @cra_refcnt: internally used + * @cra_destroy: internally used + * + * The struct crypto_alg describes a generic Crypto API algorithm and is common + * for all of the transformations. Any variable not documented here shall not + * be used by a cipher implementation as it is internal to the Crypto API. + */ struct crypto_alg { struct list_head cra_list; struct list_head cra_users; @@ -581,6 +834,50 @@ static inline u32 crypto_skcipher_mask(u32 mask) return mask; } +/** + * DOC: Asynchronous Block Cipher API + * + * Asynchronous block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). + * + * Asynchronous cipher operations imply that the function invocation for a + * cipher request returns immediately before the completion of the operation. + * The cipher request is scheduled as a separate kernel thread and therefore + * load-balanced on the different CPUs via the process scheduler. To allow + * the kernel crypto API to inform the caller about the completion of a cipher + * request, the caller must provide a callback function. That function is + * invoked with the cipher handle when the request completes. + * + * To support the asynchronous operation, additional information than just the + * cipher handle must be supplied to the kernel crypto API. That additional + * information is given by filling in the ablkcipher_request data structure. + * + * For the asynchronous block cipher API, the state is maintained with the tfm + * cipher handle. A single tfm can be used across multiple calls and in + * parallel. For asynchronous block cipher calls, context data supplied and + * only used by the caller can be referenced the request data structure in + * addition to the IV used for the cipher request. The maintenance of such + * state information would be important for a crypto driver implementer to + * have, because when calling the callback function upon completion of the + * cipher operation, that callback function may need some information about + * which operation just finished if it invoked multiple in parallel. This + * state information is unused by the kernel crypto API. + */ + +/** + * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ablkcipher cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an ablkcipher. The returned struct + * crypto_ablkcipher is the cipher handle that is required for any subsequent + * API invocation for that ablkcipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask); @@ -590,11 +887,25 @@ static inline struct crypto_tfm *crypto_ablkcipher_tfm( return &tfm->base; } +/** + * crypto_free_ablkcipher() - zeroize and free cipher handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) { crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); } +/** + * crypto_has_ablkcipher() - Search for the availability of an ablkcipher. + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * ablkcipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the ablkcipher is known to the kernel crypto API; false + * otherwise + */ static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, u32 mask) { @@ -608,12 +919,31 @@ static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; } +/** + * crypto_ablkcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the ablkcipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ static inline unsigned int crypto_ablkcipher_ivsize( struct crypto_ablkcipher *tfm) { return crypto_ablkcipher_crt(tfm)->ivsize; } +/** + * crypto_ablkcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the ablkcipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ static inline unsigned int crypto_ablkcipher_blocksize( struct crypto_ablkcipher *tfm) { @@ -643,6 +973,22 @@ static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); } +/** + * crypto_ablkcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the ablkcipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { @@ -651,12 +997,32 @@ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, return crt->setkey(crt->base, key, keylen); } +/** + * crypto_ablkcipher_reqtfm() - obtain cipher handle from request + * @req: ablkcipher_request out of which the cipher handle is to be obtained + * + * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request + * data structure. + * + * Return: crypto_ablkcipher handle + */ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( struct ablkcipher_request *req) { return __crypto_ablkcipher_cast(req->base.tfm); } +/** + * crypto_ablkcipher_encrypt() - encrypt plaintext + * @req: reference to the ablkcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Encrypt plaintext data using the ablkcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * ablkcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) { struct ablkcipher_tfm *crt = @@ -664,6 +1030,17 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) return crt->encrypt(req); } +/** + * crypto_ablkcipher_decrypt() - decrypt ciphertext + * @req: reference to the ablkcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Decrypt ciphertext data using the ablkcipher_request handle. That data + * structure and how it is filled with data is discussed with the + * ablkcipher_request_* functions. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) { struct ablkcipher_tfm *crt = @@ -671,12 +1048,37 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) return crt->decrypt(req); } +/** + * DOC: Asynchronous Cipher Request Handle + * + * The ablkcipher_request data structure contains all pointers to data + * required for the asynchronous cipher operation. This includes the cipher + * handle (which can be used by multiple ablkcipher_request instances), pointer + * to plaintext and ciphertext, asynchronous callback function, etc. It acts + * as a handle to the ablkcipher_request_* API calls in a similar way as + * ablkcipher handle to the crypto_ablkcipher_* API calls. + */ + +/** + * crypto_ablkcipher_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: number of bytes + */ static inline unsigned int crypto_ablkcipher_reqsize( struct crypto_ablkcipher *tfm) { return crypto_ablkcipher_crt(tfm)->reqsize; } +/** + * ablkcipher_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing ablkcipher handle in the request + * data structure with a different one. + */ static inline void ablkcipher_request_set_tfm( struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) { @@ -689,6 +1091,18 @@ static inline struct ablkcipher_request *ablkcipher_request_cast( return container_of(req, struct ablkcipher_request, base); } +/** + * ablkcipher_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the ablkcipher + * encrypt and decrypt API calls. During the allocation, the provided ablkcipher + * handle is registered in the request data structure. + * + * Return: allocated request handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ static inline struct ablkcipher_request *ablkcipher_request_alloc( struct crypto_ablkcipher *tfm, gfp_t gfp) { @@ -703,11 +1117,40 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc( return req; } +/** + * ablkcipher_request_free() - zeroize and free request data structure + * @req: request data structure cipher handle to be freed + */ static inline void ablkcipher_request_free(struct ablkcipher_request *req) { kzfree(req); } +/** + * ablkcipher_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * crypto_async_request data structure provided to the callback function. + * + * This function allows setting the callback function that is triggered once the + * cipher operation completes. + * + * The callback function is registered with the ablkcipher_request handle and + * must comply with the following template: + * + * void callback_function(struct crypto_async_request *req, int error) + */ static inline void ablkcipher_request_set_callback( struct ablkcipher_request *req, u32 flags, crypto_completion_t compl, void *data) @@ -717,6 +1160,22 @@ static inline void ablkcipher_request_set_callback( req->base.flags = flags; } +/** + * ablkcipher_request_set_crypt() - set data buffers + * @req: request handle + * @src: source scatter / gather list + * @dst: destination scatter / gather list + * @nbytes: number of bytes to process from @src + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_ablkcipher_ivsize + * + * This function allows setting of the source data and destination data + * scatter / gather lists. + * + * For encryption, the source is treated as the plaintext and the + * destination is the ciphertext. For a decryption operation, the use is + * reversed: the source is the ciphertext and the destination is the plaintext. + */ static inline void ablkcipher_request_set_crypt( struct ablkcipher_request *req, struct scatterlist *src, struct scatterlist *dst, @@ -728,11 +1187,55 @@ static inline void ablkcipher_request_set_crypt( req->info = iv; } +/** + * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API + * + * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD + * (listed as type "aead" in /proc/crypto) + * + * The most prominent examples for this type of encryption is GCM and CCM. + * However, the kernel supports other types of AEAD ciphers which are defined + * with the following cipher string: + * + * authenc(keyed message digest, block cipher) + * + * For example: authenc(hmac(sha256), cbc(aes)) + * + * The example code provided for the asynchronous block cipher operation + * applies here as well. Naturally all *ablkcipher* symbols must be exchanged + * the *aead* pendants discussed in the following. In addtion, for the AEAD + * operation, the aead_request_set_assoc function must be used to set the + * pointer to the associated data memory location before performing the + * encryption or decryption operation. In case of an encryption, the associated + * data memory is filled during the encryption operation. For decryption, the + * associated data memory must contain data that is used to verify the integrity + * of the decrypted data. Another deviation from the asynchronous block cipher + * operation is that the caller should explicitly check for -EBADMSG of the + * crypto_aead_decrypt. That error indicates an authentication error, i.e. + * a breach in the integrity of the message. In essence, that -EBADMSG error + * code is the key bonus an AEAD cipher has over "standard" block chaining + * modes. + */ + static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) { return (struct crypto_aead *)tfm; } +/** + * crypto_alloc_aead() - allocate AEAD cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * AEAD cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for an AEAD. The returned struct + * crypto_aead is the cipher handle that is required for any subsequent + * API invocation for that AEAD. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) @@ -740,6 +1243,10 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) return &tfm->base; } +/** + * crypto_free_aead() - zeroize and free aead handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_aead(struct crypto_aead *tfm) { crypto_free_tfm(crypto_aead_tfm(tfm)); @@ -750,16 +1257,47 @@ static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm) return &crypto_aead_tfm(tfm)->crt_aead; } +/** + * crypto_aead_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the aead referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) { return crypto_aead_crt(tfm)->ivsize; } +/** + * crypto_aead_authsize() - obtain maximum authentication data size + * @tfm: cipher handle + * + * The maximum size of the authentication data for the AEAD cipher referenced + * by the AEAD cipher handle is returned. The authentication data size may be + * zero if the cipher implements a hard-coded maximum. + * + * The authentication data may also be known as "tag value". + * + * Return: authentication data size / tag size in bytes + */ static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) { return crypto_aead_crt(tfm)->authsize; } +/** + * crypto_aead_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the AEAD referenced with the cipher handle is returned. + * The caller may use that information to allocate appropriate memory for the + * data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) { return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); @@ -785,6 +1323,22 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags) crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); } +/** + * crypto_aead_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the AEAD referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { @@ -793,6 +1347,16 @@ static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, return crt->setkey(crt->base, key, keylen); } +/** + * crypto_aead_setauthsize() - set authentication data size + * @tfm: cipher handle + * @authsize: size of the authentication data / tag in bytes + * + * Set the authentication data size / tag size. AEAD requires an authentication + * tag (or MAC) in addition to the associated data. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) @@ -800,27 +1364,105 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) return __crypto_aead_cast(req->base.tfm); } +/** + * crypto_aead_encrypt() - encrypt plaintext + * @req: reference to the aead_request handle that holds all information + * needed to perform the cipher operation + * + * Encrypt plaintext data using the aead_request handle. That data structure + * and how it is filled with data is discussed with the aead_request_* + * functions. + * + * IMPORTANT NOTE The encryption operation creates the authentication data / + * tag. That data is concatenated with the created ciphertext. + * The ciphertext memory size is therefore the given number of + * block cipher blocks + the size defined by the + * crypto_aead_setauthsize invocation. The caller must ensure + * that sufficient memory is available for the ciphertext and + * the authentication tag. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ static inline int crypto_aead_encrypt(struct aead_request *req) { return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); } +/** + * crypto_aead_decrypt() - decrypt ciphertext + * @req: reference to the ablkcipher_request handle that holds all information + * needed to perform the cipher operation + * + * Decrypt ciphertext data using the aead_request handle. That data structure + * and how it is filled with data is discussed with the aead_request_* + * functions. + * + * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the + * authentication data / tag. That authentication data / tag + * must have the size defined by the crypto_aead_setauthsize + * invocation. + * + * + * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD + * cipher operation performs the authentication of the data during the + * decryption operation. Therefore, the function returns this error if + * the authentication of the ciphertext was unsuccessful (i.e. the + * integrity of the ciphertext or the associated data was violated); + * < 0 if an error occurred. + */ static inline int crypto_aead_decrypt(struct aead_request *req) { return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); } +/** + * DOC: Asynchronous AEAD Request Handle + * + * The aead_request data structure contains all pointers to data required for + * the AEAD cipher operation. This includes the cipher handle (which can be + * used by multiple aead_request instances), pointer to plaintext and + * ciphertext, asynchronous callback function, etc. It acts as a handle to the + * aead_request_* API calls in a similar way as AEAD handle to the + * crypto_aead_* API calls. + */ + +/** + * crypto_aead_reqsize() - obtain size of the request data structure + * @tfm: cipher handle + * + * Return: number of bytes + */ static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) { return crypto_aead_crt(tfm)->reqsize; } +/** + * aead_request_set_tfm() - update cipher handle reference in request + * @req: request handle to be modified + * @tfm: cipher handle that shall be added to the request handle + * + * Allow the caller to replace the existing aead handle in the request + * data structure with a different one. + */ static inline void aead_request_set_tfm(struct aead_request *req, struct crypto_aead *tfm) { req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); } +/** + * aead_request_alloc() - allocate request data structure + * @tfm: cipher handle to be registered with the request + * @gfp: memory allocation flag that is handed to kmalloc by the API call. + * + * Allocate the request data structure that must be used with the AEAD + * encrypt and decrypt API calls. During the allocation, the provided aead + * handle is registered in the request data structure. + * + * Return: allocated request handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, gfp_t gfp) { @@ -834,11 +1476,40 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, return req; } +/** + * aead_request_free() - zeroize and free request data structure + * @req: request data structure cipher handle to be freed + */ static inline void aead_request_free(struct aead_request *req) { kzfree(req); } +/** + * aead_request_set_callback() - set asynchronous callback function + * @req: request handle + * @flags: specify zero or an ORing of the flags + * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and + * increase the wait queue beyond the initial maximum size; + * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep + * @compl: callback function pointer to be registered with the request handle + * @data: The data pointer refers to memory that is not used by the kernel + * crypto API, but provided to the callback function for it to use. Here, + * the caller can provide a reference to memory the callback function can + * operate on. As the callback function is invoked asynchronously to the + * related functionality, it may need to access data structures of the + * related functionality which can be referenced using this pointer. The + * callback function can access the memory via the "data" field in the + * crypto_async_request data structure provided to the callback function. + * + * Setting the callback function that is triggered once the cipher operation + * completes + * + * The callback function is registered with the aead_request handle and + * must comply with the following template: + * + * void callback_function(struct crypto_async_request *req, int error) + */ static inline void aead_request_set_callback(struct aead_request *req, u32 flags, crypto_completion_t compl, @@ -849,6 +1520,36 @@ static inline void aead_request_set_callback(struct aead_request *req, req->base.flags = flags; } +/** + * aead_request_set_crypt - set data buffers + * @req: request handle + * @src: source scatter / gather list + * @dst: destination scatter / gather list + * @cryptlen: number of bytes to process from @src + * @iv: IV for the cipher operation which must comply with the IV size defined + * by crypto_aead_ivsize() + * + * Setting the source data and destination data scatter / gather lists. + * + * For encryption, the source is treated as the plaintext and the + * destination is the ciphertext. For a decryption operation, the use is + * reversed: the source is the ciphertext and the destination is the plaintext. + * + * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption, + * the caller must concatenate the ciphertext followed by the + * authentication tag and provide the entire data stream to the + * decryption operation (i.e. the data length used for the + * initialization of the scatterlist and the data length for the + * decryption operation is identical). For encryption, however, + * the authentication tag is created while encrypting the data. + * The destination buffer must hold sufficient space for the + * ciphertext and the authentication tag while the encryption + * invocation must only point to the plaintext data size. The + * following code snippet illustrates the memory usage + * buffer = kmalloc(ptbuflen + (enc ? authsize : 0)); + * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0)); + * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv); + */ static inline void aead_request_set_crypt(struct aead_request *req, struct scatterlist *src, struct scatterlist *dst, @@ -860,6 +1561,15 @@ static inline void aead_request_set_crypt(struct aead_request *req, req->iv = iv; } +/** + * aead_request_set_assoc() - set the associated data scatter / gather list + * @req: request handle + * @assoc: associated data scatter / gather list + * @assoclen: number of bytes to process from @assoc + * + * For encryption, the memory is filled with the associated data. For + * decryption, the memory must point to the associated data. + */ static inline void aead_request_set_assoc(struct aead_request *req, struct scatterlist *assoc, unsigned int assoclen) @@ -868,6 +1578,36 @@ static inline void aead_request_set_assoc(struct aead_request *req, req->assoclen = assoclen; } +/** + * DOC: Synchronous Block Cipher API + * + * The synchronous block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto) + * + * Synchronous calls, have a context in the tfm. But since a single tfm can be + * used in multiple calls and in parallel, this info should not be changeable + * (unless a lock is used). This applies, for example, to the symmetric key. + * However, the IV is changeable, so there is an iv field in blkcipher_tfm + * structure for synchronous blkcipher api. So, its the only state info that can + * be kept for synchronous calls without using a big lock across a tfm. + * + * The block cipher API allows the use of a complete cipher, i.e. a cipher + * consisting of a template (a block chaining mode) and a single block cipher + * primitive (e.g. AES). + * + * The plaintext data buffer and the ciphertext data buffer are pointed to + * by using scatter/gather lists. The cipher operation is performed + * on all segments of the provided scatter/gather lists. + * + * The kernel crypto API supports a cipher operation "in-place" which means that + * the caller may provide the same scatter/gather list for the plaintext and + * cipher text. After the completion of the cipher operation, the plaintext + * data is replaced with the ciphertext data in case of an encryption and vice + * versa for a decryption. The caller must ensure that the scatter/gather lists + * for the output data point to sufficiently large buffers, i.e. multiples of + * the block size of the cipher. + */ + static inline struct crypto_blkcipher *__crypto_blkcipher_cast( struct crypto_tfm *tfm) { @@ -881,6 +1621,20 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast( return __crypto_blkcipher_cast(tfm); } +/** + * crypto_alloc_blkcipher() - allocate synchronous block cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * blkcipher cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a block cipher. The returned struct + * crypto_blkcipher is the cipher handle that is required for any subsequent + * API invocation for that block cipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ static inline struct crypto_blkcipher *crypto_alloc_blkcipher( const char *alg_name, u32 type, u32 mask) { @@ -897,11 +1651,25 @@ static inline struct crypto_tfm *crypto_blkcipher_tfm( return &tfm->base; } +/** + * crypto_free_blkcipher() - zeroize and free the block cipher handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) { crypto_free_tfm(crypto_blkcipher_tfm(tfm)); } +/** + * crypto_has_blkcipher() - Search for the availability of a block cipher + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the block cipher is known to the kernel crypto API; false + * otherwise + */ static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; @@ -911,6 +1679,12 @@ static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) return crypto_has_alg(alg_name, type, mask); } +/** + * crypto_blkcipher_name() - return the name / cra_name from the cipher handle + * @tfm: cipher handle + * + * Return: The character string holding the name of the cipher + */ static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) { return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); @@ -928,11 +1702,30 @@ static inline struct blkcipher_alg *crypto_blkcipher_alg( return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; } +/** + * crypto_blkcipher_ivsize() - obtain IV size + * @tfm: cipher handle + * + * The size of the IV for the block cipher referenced by the cipher handle is + * returned. This IV size may be zero if the cipher does not need an IV. + * + * Return: IV size in bytes + */ static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) { return crypto_blkcipher_alg(tfm)->ivsize; } +/** + * crypto_blkcipher_blocksize() - obtain block size of cipher + * @tfm: cipher handle + * + * The block size for the block cipher referenced with the cipher handle is + * returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation. + * + * Return: block size of cipher + */ static inline unsigned int crypto_blkcipher_blocksize( struct crypto_blkcipher *tfm) { @@ -962,6 +1755,22 @@ static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); } +/** + * crypto_blkcipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the block cipher referenced by the cipher + * handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, const u8 *key, unsigned int keylen) { @@ -969,6 +1778,24 @@ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, key, keylen); } +/** + * crypto_blkcipher_encrypt() - encrypt plaintext + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * ciphertext + * @src: scatter/gather list that holds the plaintext + * @nbytes: number of bytes of the plaintext to encrypt. + * + * Encrypt plaintext data using the IV set by the caller with a preceding + * call of crypto_blkcipher_set_iv. + * + * The blkcipher_desc data structure must be filled by the caller and can + * reside on the stack. The caller must fill desc as follows: desc.tfm is filled + * with the block cipher handle; desc.flags is filled with either + * CRYPTO_TFM_REQ_MAY_SLEEP or 0. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, @@ -978,6 +1805,25 @@ static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); } +/** + * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * ciphertext + * @src: scatter/gather list that holds the plaintext + * @nbytes: number of bytes of the plaintext to encrypt. + * + * Encrypt plaintext data with the use of an IV that is solely used for this + * cipher operation. Any previously set IV is not used. + * + * The blkcipher_desc data structure must be filled by the caller and can + * reside on the stack. The caller must fill desc as follows: desc.tfm is filled + * with the block cipher handle; desc.info is filled with the IV to be used for + * the current operation; desc.flags is filled with either + * CRYPTO_TFM_REQ_MAY_SLEEP or 0. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, @@ -986,6 +1832,23 @@ static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); } +/** + * crypto_blkcipher_decrypt() - decrypt ciphertext + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * plaintext + * @src: scatter/gather list that holds the ciphertext + * @nbytes: number of bytes of the ciphertext to decrypt. + * + * Decrypt ciphertext data using the IV set by the caller with a preceding + * call of crypto_blkcipher_set_iv. + * + * The blkcipher_desc data structure must be filled by the caller as documented + * for the crypto_blkcipher_encrypt call above. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + * + */ static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, @@ -995,6 +1858,22 @@ static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); } +/** + * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV + * @desc: reference to the block cipher handle with meta data + * @dst: scatter/gather list that is filled by the cipher operation with the + * plaintext + * @src: scatter/gather list that holds the ciphertext + * @nbytes: number of bytes of the ciphertext to decrypt. + * + * Decrypt ciphertext data with the use of an IV that is solely used for this + * cipher operation. Any previously set IV is not used. + * + * The blkcipher_desc data structure must be filled by the caller as documented + * for the crypto_blkcipher_encrypt_iv call above. + * + * Return: 0 if the cipher operation was successful; < 0 if an error occurred + */ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, @@ -1003,18 +1882,54 @@ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); } +/** + * crypto_blkcipher_set_iv() - set IV for cipher + * @tfm: cipher handle + * @src: buffer holding the IV + * @len: length of the IV in bytes + * + * The caller provided IV is set for the block cipher referenced by the cipher + * handle. + */ static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, const u8 *src, unsigned int len) { memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); } +/** + * crypto_blkcipher_get_iv() - obtain IV from cipher + * @tfm: cipher handle + * @dst: buffer filled with the IV + * @len: length of the buffer dst + * + * The caller can obtain the IV set for the block cipher referenced by the + * cipher handle and store it into the user-provided buffer. If the buffer + * has an insufficient space, the IV is truncated to fit the buffer. + */ static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, u8 *dst, unsigned int len) { memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); } +/** + * DOC: Single Block Cipher API + * + * The single block cipher API is used with the ciphers of type + * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). + * + * Using the single block cipher API calls, operations with the basic cipher + * primitive can be implemented. These cipher primitives exclude any block + * chaining operations including IV handling. + * + * The purpose of this single block cipher API is to support the implementation + * of templates or other concepts that only need to perform the cipher operation + * on one block at a time. Templates invoke the underlying cipher primitive + * block-wise and process either the input or the output data of these cipher + * operations. + */ + static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) { return (struct crypto_cipher *)tfm; @@ -1026,6 +1941,20 @@ static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) return __crypto_cipher_cast(tfm); } +/** + * crypto_alloc_cipher() - allocate single block cipher handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * single block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a single block cipher. The returned struct + * crypto_cipher is the cipher handle that is required for any subsequent API + * invocation for that single block cipher. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, u32 type, u32 mask) { @@ -1041,11 +1970,25 @@ static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) return &tfm->base; } +/** + * crypto_free_cipher() - zeroize and free the single block cipher handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_cipher(struct crypto_cipher *tfm) { crypto_free_tfm(crypto_cipher_tfm(tfm)); } +/** + * crypto_has_cipher() - Search for the availability of a single block cipher + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * single block cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the single block cipher is known to the kernel crypto API; + * false otherwise + */ static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; @@ -1060,6 +2003,16 @@ static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) return &crypto_cipher_tfm(tfm)->crt_cipher; } +/** + * crypto_cipher_blocksize() - obtain block size for cipher + * @tfm: cipher handle + * + * The block size for the single block cipher referenced with the cipher handle + * tfm is returned. The caller may use that information to allocate appropriate + * memory for the data returned by the encryption or decryption operation + * + * Return: block size of cipher + */ static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) { return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); @@ -1087,6 +2040,22 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); } +/** + * crypto_cipher_setkey() - set key for cipher + * @tfm: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the single block cipher referenced by the + * cipher handle. + * + * Note, the key length determines the cipher type. Many block ciphers implement + * different cipher modes depending on the key size, such as AES-128 vs AES-192 + * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 + * is performed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, const u8 *key, unsigned int keylen) { @@ -1094,6 +2063,15 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, key, keylen); } +/** + * crypto_cipher_encrypt_one() - encrypt one block of plaintext + * @tfm: cipher handle + * @dst: points to the buffer that will be filled with the ciphertext + * @src: buffer holding the plaintext to be encrypted + * + * Invoke the encryption operation of one block. The caller must ensure that + * the plaintext and ciphertext buffers are at least one block in size. + */ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src) { @@ -1101,6 +2079,15 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, dst, src); } +/** + * crypto_cipher_decrypt_one() - decrypt one block of ciphertext + * @tfm: cipher handle + * @dst: points to the buffer that will be filled with the plaintext + * @src: buffer holding the ciphertext to be decrypted + * + * Invoke the decryption operation of one block. The caller must ensure that + * the plaintext and ciphertext buffers are at least one block in size. + */ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src) { @@ -1108,6 +2095,13 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, dst, src); } +/** + * DOC: Synchronous Message Digest API + * + * The synchronous message digest API is used with the ciphers of type + * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto) + */ + static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) { return (struct crypto_hash *)tfm; @@ -1120,6 +2114,20 @@ static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm) return __crypto_hash_cast(tfm); } +/** + * crypto_alloc_hash() - allocate synchronous message digest handle + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Allocate a cipher handle for a message digest. The returned struct + * crypto_hash is the cipher handle that is required for any subsequent + * API invocation for that message digest. + * + * Return: allocated cipher handle in case of success; IS_ERR() is true in case + * of an error, PTR_ERR() returns the error code. + */ static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, u32 type, u32 mask) { @@ -1136,11 +2144,25 @@ static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm) return &tfm->base; } +/** + * crypto_free_hash() - zeroize and free message digest handle + * @tfm: cipher handle to be freed + */ static inline void crypto_free_hash(struct crypto_hash *tfm) { crypto_free_tfm(crypto_hash_tfm(tfm)); } +/** + * crypto_has_hash() - Search for the availability of a message digest + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the + * message digest cipher + * @type: specifies the type of the cipher + * @mask: specifies the mask for the cipher + * + * Return: true when the message digest cipher is known to the kernel crypto + * API; false otherwise + */ static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; @@ -1156,6 +2178,15 @@ static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm) return &crypto_hash_tfm(tfm)->crt_hash; } +/** + * crypto_hash_blocksize() - obtain block size for message digest + * @tfm: cipher handle + * + * The block size for the message digest cipher referenced with the cipher + * handle is returned. + * + * Return: block size of cipher + */ static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) { return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); @@ -1166,6 +2197,15 @@ static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm) return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); } +/** + * crypto_hash_digestsize() - obtain message digest size + * @tfm: cipher handle + * + * The size for the message digest created by the message digest cipher + * referenced with the cipher handle is returned. + * + * Return: message digest size + */ static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) { return crypto_hash_crt(tfm)->digestsize; @@ -1186,11 +2226,38 @@ static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags) crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); } +/** + * crypto_hash_init() - (re)initialize message digest handle + * @desc: cipher request handle that to be filled by caller -- + * desc.tfm is filled with the hash cipher handle; + * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0. + * + * The call (re-)initializes the message digest referenced by the hash cipher + * request handle. Any potentially existing state created by previous + * operations is discarded. + * + * Return: 0 if the message digest initialization was successful; < 0 if an + * error occurred + */ static inline int crypto_hash_init(struct hash_desc *desc) { return crypto_hash_crt(desc->tfm)->init(desc); } +/** + * crypto_hash_update() - add data to message digest for processing + * @desc: cipher request handle + * @sg: scatter / gather list pointing to the data to be added to the message + * digest + * @nbytes: number of bytes to be processed from @sg + * + * Updates the message digest state of the cipher handle pointed to by the + * hash cipher request handle with the input data pointed to by the + * scatter/gather list. + * + * Return: 0 if the message digest update was successful; < 0 if an error + * occurred + */ static inline int crypto_hash_update(struct hash_desc *desc, struct scatterlist *sg, unsigned int nbytes) @@ -1198,11 +2265,39 @@ static inline int crypto_hash_update(struct hash_desc *desc, return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); } +/** + * crypto_hash_final() - calculate message digest + * @desc: cipher request handle + * @out: message digest output buffer -- The caller must ensure that the out + * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize + * function). + * + * Finalize the message digest operation and create the message digest + * based on all data added to the cipher handle. The message digest is placed + * into the output buffer. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) { return crypto_hash_crt(desc->tfm)->final(desc, out); } +/** + * crypto_hash_digest() - calculate message digest for a buffer + * @desc: see crypto_hash_final() + * @sg: see crypto_hash_update() + * @nbytes: see crypto_hash_update() + * @out: see crypto_hash_final() + * + * This function is a "short-hand" for the function calls of crypto_hash_init, + * crypto_hash_update and crypto_hash_final. The parameters have the same + * meaning as discussed for those separate three functions. + * + * Return: 0 if the message digest creation was successful; < 0 if an error + * occurred + */ static inline int crypto_hash_digest(struct hash_desc *desc, struct scatterlist *sg, unsigned int nbytes, u8 *out) @@ -1210,6 +2305,17 @@ static inline int crypto_hash_digest(struct hash_desc *desc, return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); } +/** + * crypto_hash_setkey() - set key for message digest + * @hash: cipher handle + * @key: buffer holding the key + * @keylen: length of the key in bytes + * + * The caller provided key is set for the message digest cipher. The cipher + * handle must point to a keyed hash in order for this function to succeed. + * + * Return: 0 if the setting of the key was successful; < 0 if an error occurred + */ static inline int crypto_hash_setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen) { diff --git a/include/linux/dcache.h b/include/linux/dcache.h index b2a2a08523bf..5a813988e6d4 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -124,15 +124,15 @@ struct dentry { void *d_fsdata; /* fs-specific data */ struct list_head d_lru; /* LRU list */ + struct list_head d_child; /* child of parent list */ + struct list_head d_subdirs; /* our children */ /* - * d_child and d_rcu can share memory + * d_alias and d_rcu can share memory */ union { - struct list_head d_child; /* child of parent list */ + struct hlist_node d_alias; /* inode alias list */ struct rcu_head d_rcu; } d_u; - struct list_head d_subdirs; /* our children */ - struct hlist_node d_alias; /* inode alias list */ }; /* @@ -230,7 +230,6 @@ extern seqlock_t rename_lock; */ extern void d_instantiate(struct dentry *, struct inode *); extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); -extern struct dentry * d_materialise_unique(struct dentry *, struct inode *); extern int d_instantiate_no_diralias(struct dentry *, struct inode *); extern void __d_drop(struct dentry *dentry); extern void d_drop(struct dentry *dentry); diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 4d0b4d1aa132..d84f8c254a87 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -92,8 +92,8 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode, struct dentry *parent, struct debugfs_regset32 *regset); -int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, - int nregs, void __iomem *base, char *prefix); +void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, + int nregs, void __iomem *base, char *prefix); struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, struct dentry *parent, @@ -233,10 +233,9 @@ static inline struct dentry *debugfs_create_regset32(const char *name, return ERR_PTR(-ENODEV); } -static inline int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, +static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, int nregs, void __iomem *base, char *prefix) { - return 0; } static inline bool debugfs_initialized(void) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index e1707de043ae..ca6d2acc5eb7 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -64,6 +64,7 @@ typedef int (*dm_request_endio_fn) (struct dm_target *ti, union map_info *map_context); typedef void (*dm_presuspend_fn) (struct dm_target *ti); +typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); typedef void (*dm_postsuspend_fn) (struct dm_target *ti); typedef int (*dm_preresume_fn) (struct dm_target *ti); typedef void (*dm_resume_fn) (struct dm_target *ti); @@ -145,6 +146,7 @@ struct target_type { dm_endio_fn end_io; dm_request_endio_fn rq_end_io; dm_presuspend_fn presuspend; + dm_presuspend_undo_fn presuspend_undo; dm_postsuspend_fn postsuspend; dm_preresume_fn preresume; dm_resume_fn resume; diff --git a/include/linux/device.h b/include/linux/device.h index ce1f21608b16..41d6a7555c6b 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -911,6 +911,11 @@ static inline void device_unlock(struct device *dev) mutex_unlock(&dev->mutex); } +static inline void device_lock_assert(struct device *dev) +{ + lockdep_assert_held(&dev->mutex); +} + void driver_init(void); /* diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 653a1fd07ae8..40cd75e21ea2 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -447,7 +447,8 @@ struct dmaengine_unmap_data { * communicate status * @phys: physical address of the descriptor * @chan: target channel for this operation - * @tx_submit: set the prepared descriptor(s) to be executed by the engine + * @tx_submit: accept the descriptor, assign ordered cookie and mark the + * descriptor pending. To be pushed on .issue_pending() call * @callback: routine to call after this operation is complete * @callback_param: general parameter to pass to the callback routine * ---async_tx api specific fields--- diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 593fff99e6bf..30624954dec5 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -30,6 +30,12 @@ struct acpi_dmar_header; +#ifdef CONFIG_X86 +# define DMAR_UNITS_SUPPORTED MAX_IO_APICS +#else +# define DMAR_UNITS_SUPPORTED 64 +#endif + /* DMAR Flags */ #define DMAR_INTR_REMAP 0x1 #define DMAR_X2APIC_OPT_OUT 0x2 @@ -120,28 +126,60 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, /* Intel IOMMU detection */ extern int detect_intel_iommu(void); extern int enable_drhd_fault_handling(void); +extern int dmar_device_add(acpi_handle handle); +extern int dmar_device_remove(acpi_handle handle); + +static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg) +{ + return 0; +} #ifdef CONFIG_INTEL_IOMMU extern int iommu_detected, no_iommu; extern int intel_iommu_init(void); -extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); -extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); +extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg); +extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg); +extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); +extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg); +extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); #else /* !CONFIG_INTEL_IOMMU: */ static inline int intel_iommu_init(void) { return -ENODEV; } -static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header) + +#define dmar_parse_one_rmrr dmar_res_noop +#define dmar_parse_one_atsr dmar_res_noop +#define dmar_check_one_atsr dmar_res_noop +#define dmar_release_one_atsr dmar_res_noop + +static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { return 0; } -static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) + +static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) { return 0; } -static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) +#endif /* CONFIG_INTEL_IOMMU */ + +#ifdef CONFIG_IRQ_REMAP +extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert); +#else /* CONFIG_IRQ_REMAP */ +static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) +{ return 0; } +#endif /* CONFIG_IRQ_REMAP */ + +#else /* CONFIG_DMAR_TABLE */ + +static inline int dmar_device_add(void *handle) +{ + return 0; +} + +static inline int dmar_device_remove(void *handle) { return 0; } -#endif /* CONFIG_INTEL_IOMMU */ #endif /* CONFIG_DMAR_TABLE */ diff --git a/include/linux/drbd.h b/include/linux/drbd.h index debb70d40547..8723f2a99e15 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -172,7 +172,7 @@ enum drbd_ret_code { ERR_RES_NOT_KNOWN = 158, ERR_RES_IN_USE = 159, ERR_MINOR_CONFIGURED = 160, - ERR_MINOR_EXISTS = 161, + ERR_MINOR_OR_VOLUME_EXISTS = 161, ERR_INVALID_REQUEST = 162, ERR_NEED_APV_100 = 163, ERR_NEED_ALLOW_TWO_PRI = 164, diff --git a/include/linux/edac.h b/include/linux/edac.h index e1e68da6f35c..da3b72e95db3 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -194,7 +194,8 @@ static inline char *mc_event_error_type(const unsigned int err_type) * @MEM_DDR3: DDR3 RAM * @MEM_RDDR3: Registered DDR3 RAM * This is a variant of the DDR3 memories. - * @MEM_DDR4: DDR4 RAM + * @MEM_LRDDR3 Load-Reduced DDR3 memory. + * @MEM_DDR4: Unbuffered DDR4 RAM * @MEM_RDDR4: Registered DDR4 RAM * This is a variant of the DDR4 memories. */ @@ -216,6 +217,7 @@ enum mem_type { MEM_XDR, MEM_DDR3, MEM_RDDR3, + MEM_LRDDR3, MEM_DDR4, MEM_RDDR4, }; diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h index e50f98b0297a..eb0b1988050a 100644 --- a/include/linux/eeprom_93cx6.h +++ b/include/linux/eeprom_93cx6.h @@ -75,6 +75,10 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word, u16 *data); extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word, __le16 *data, const u16 words); +extern void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, + const u8 byte, u8 *data); +extern void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom, + const u8 byte, u8 *data, const u16 bytes); extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); diff --git a/include/linux/efi.h b/include/linux/efi.h index 0949f9c7e872..0238d612750e 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -547,6 +547,9 @@ void efi_native_runtime_setup(void); #define SMBIOS_TABLE_GUID \ EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) +#define SMBIOS3_TABLE_GUID \ + EFI_GUID( 0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94 ) + #define SAL_SYSTEM_TABLE_GUID \ EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) @@ -810,7 +813,8 @@ extern struct efi { unsigned long mps; /* MPS table */ unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ unsigned long acpi20; /* ACPI table (ACPI 2.0) */ - unsigned long smbios; /* SM BIOS table */ + unsigned long smbios; /* SMBIOS table (32 bit entry point) */ + unsigned long smbios3; /* SMBIOS table (64 bit entry point) */ unsigned long sal_systab; /* SAL system table */ unsigned long boot_info; /* boot info table */ unsigned long hcdp; /* HCDP table */ diff --git a/include/linux/elf.h b/include/linux/elf.h index 67a5fa7830c4..20fa8d8ae313 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -15,6 +15,11 @@ set_personality(PER_LINUX | (current->personality & (~PER_MASK))) #endif +#ifndef SET_PERSONALITY2 +#define SET_PERSONALITY2(ex, state) \ + SET_PERSONALITY(ex) +#endif + #if ELF_CLASS == ELFCLASS32 extern Elf32_Dyn _DYNAMIC []; diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 733980fce8e3..41c891d05f04 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -392,4 +392,16 @@ static inline unsigned long compare_ether_header(const void *a, const void *b) #endif } +/** + * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame + * @skb: Buffer to pad + * + * An Ethernet frame should have a minimum size of 60 bytes. This function + * takes short frames and pads them with zeros up to the 60 byte limit. + */ +static inline int eth_skb_pad(struct sk_buff *skb) +{ + return skb_put_padto(skb, ETH_ZLEN); +} + #endif /* _LINUX_ETHERDEVICE_H */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index c1a2d60dfb82..653dc9c4ebac 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -59,6 +59,26 @@ enum ethtool_phys_id_state { ETHTOOL_ID_OFF }; +enum { + ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */ + ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */ + + /* + * Add your fresh new hash function bits above and remember to update + * rss_hash_func_strings[] in ethtool.c + */ + ETH_RSS_HASH_FUNCS_COUNT +}; + +#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) +#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT) + +#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) +#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) + +#define ETH_RSS_HASH_UNKNOWN 0 +#define ETH_RSS_HASH_NO_CHANGE 0 + struct net_device; /* Some generic methods drivers may use in their ethtool_ops */ @@ -158,17 +178,14 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) * Returns zero if not supported for this specific device. * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. * Returns zero if not supported for this specific device. - * @get_rxfh: Get the contents of the RX flow hash indirection table and hash - * key. - * Will only be called if one or both of @get_rxfh_indir_size and - * @get_rxfh_key_size are implemented and return non-zero. - * Returns a negative error code or zero. - * @set_rxfh: Set the contents of the RX flow hash indirection table and/or - * hash key. In case only the indirection table or hash key is to be - * changed, the other argument will be %NULL. - * Will only be called if one or both of @get_rxfh_indir_size and - * @get_rxfh_key_size are implemented and return non-zero. + * @get_rxfh: Get the contents of the RX flow hash indirection table, hash key + * and/or hash function. * Returns a negative error code or zero. + * @set_rxfh: Set the contents of the RX flow hash indirection table, hash + * key, and/or hash function. Arguments which are set to %NULL or zero + * will remain unchanged. + * Returns a negative error code or zero. An error code must be returned + * if at least one unsupported change was requested. * @get_channels: Get number of channels. * @set_channels: Set number of channels. Returns a negative error code or * zero. @@ -241,9 +258,10 @@ struct ethtool_ops { int (*reset)(struct net_device *, u32 *); u32 (*get_rxfh_key_size)(struct net_device *); u32 (*get_rxfh_indir_size)(struct net_device *); - int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key); + int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key, + u8 *hfunc); int (*set_rxfh)(struct net_device *, const u32 *indir, - const u8 *key); + const u8 *key, const u8 hfunc); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 860313a33a43..87f14e90e984 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -33,7 +33,8 @@ #define F2FS_META_INO(sbi) (sbi->meta_ino_num) /* This flag is used by node and meta inodes, and by recovery */ -#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) +#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) +#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM) /* * For further optimization on multi-head logs, on-disk layout supports maximum @@ -170,14 +171,12 @@ struct f2fs_extent { #define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */ #define F2FS_INLINE_DATA 0x02 /* file inline data flag */ +#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */ +#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ #define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \ F2FS_INLINE_XATTR_ADDRS - 1)) -#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\ - sizeof(__le32) * (DEF_ADDRS_PER_INODE + \ - DEF_NIDS_PER_INODE - 1)) - struct f2fs_inode { __le16 i_mode; /* file mode */ __u8 i_advise; /* file hints */ @@ -435,6 +434,24 @@ struct f2fs_dentry_block { __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN]; } __packed; +/* for inline dir */ +#define NR_INLINE_DENTRY (MAX_INLINE_DATA * BITS_PER_BYTE / \ + ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ + BITS_PER_BYTE + 1)) +#define INLINE_DENTRY_BITMAP_SIZE ((NR_INLINE_DENTRY + \ + BITS_PER_BYTE - 1) / BITS_PER_BYTE) +#define INLINE_RESERVED_SIZE (MAX_INLINE_DATA - \ + ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ + NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE)) + +/* inline directory entry structure */ +struct f2fs_inline_dentry { + __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE]; + __u8 reserved[INLINE_RESERVED_SIZE]; + struct f2fs_dir_entry dentry[NR_INLINE_DENTRY]; + __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN]; +} __packed; + /* file types used in inode_info->flags */ enum { F2FS_FT_UNKNOWN, diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index c6f996f2abb6..798fad9e420d 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -5,6 +5,7 @@ #include <linux/types.h> #include <linux/debugfs.h> +#include <linux/ratelimit.h> #include <linux/atomic.h> /* @@ -25,14 +26,18 @@ struct fault_attr { unsigned long reject_end; unsigned long count; + struct ratelimit_state ratelimit_state; + struct dentry *dname; }; -#define FAULT_ATTR_INITIALIZER { \ - .interval = 1, \ - .times = ATOMIC_INIT(1), \ - .require_end = ULONG_MAX, \ - .stacktrace_depth = 32, \ - .verbose = 2, \ +#define FAULT_ATTR_INITIALIZER { \ + .interval = 1, \ + .times = ATOMIC_INIT(1), \ + .require_end = ULONG_MAX, \ + .stacktrace_depth = 32, \ + .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \ + .verbose = 2, \ + .dname = NULL, \ } #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER diff --git a/include/linux/fence.h b/include/linux/fence.h index d174585b874b..39efee130d2b 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h @@ -128,8 +128,8 @@ struct fence_cb { * from irq context, so normal spinlocks can be used. * * A return value of false indicates the fence already passed, - * or some failure occured that made it impossible to enable - * signaling. True indicates succesful enabling. + * or some failure occurred that made it impossible to enable + * signaling. True indicates successful enabling. * * fence->status may be set in enable_signaling, but only when false is * returned. diff --git a/include/linux/file.h b/include/linux/file.h index 4d69123377a2..f87d30882a24 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -66,7 +66,6 @@ extern void set_close_on_exec(unsigned int fd, int flag); extern bool get_close_on_exec(unsigned int fd); extern void put_filp(struct file *); extern int get_unused_fd_flags(unsigned flags); -#define get_unused_fd() get_unused_fd_flags(0) extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); diff --git a/include/linux/filter.h b/include/linux/filter.h index ca95abd2bed1..caac2087a4d5 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -381,6 +381,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); void bpf_prog_destroy(struct bpf_prog *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); +int sk_attach_bpf(u32 ufd, struct sock *sk); int sk_detach_filter(struct sock *sk); int bpf_check_classic(const struct sock_filter *filter, unsigned int flen); diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 7fd81b8c4897..6b7fd9cf5ea2 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -246,15 +246,6 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, * defined in <linux/wait.h> */ -#define wait_event_freezekillable(wq, condition) \ -({ \ - int __retval; \ - freezer_do_not_count(); \ - __retval = wait_event_killable(wq, (condition)); \ - freezer_count(); \ - __retval; \ -}) - /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ #define wait_event_freezekillable_unsafe(wq, condition) \ ({ \ @@ -265,35 +256,6 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires, __retval; \ }) -#define wait_event_freezable(wq, condition) \ -({ \ - int __retval; \ - freezer_do_not_count(); \ - __retval = wait_event_interruptible(wq, (condition)); \ - freezer_count(); \ - __retval; \ -}) - -#define wait_event_freezable_timeout(wq, condition, timeout) \ -({ \ - long __retval = timeout; \ - freezer_do_not_count(); \ - __retval = wait_event_interruptible_timeout(wq, (condition), \ - __retval); \ - freezer_count(); \ - __retval; \ -}) - -#define wait_event_freezable_exclusive(wq, condition) \ -({ \ - int __retval; \ - freezer_do_not_count(); \ - __retval = wait_event_interruptible_exclusive(wq, condition); \ - freezer_count(); \ - __retval; \ -}) - - #else /* !CONFIG_FREEZER */ static inline bool frozen(struct task_struct *p) { return false; } static inline bool freezing(struct task_struct *p) { return false; } @@ -331,18 +293,6 @@ static inline void set_freezable(void) {} #define freezable_schedule_hrtimeout_range(expires, delta, mode) \ schedule_hrtimeout_range(expires, delta, mode) -#define wait_event_freezable(wq, condition) \ - wait_event_interruptible(wq, condition) - -#define wait_event_freezable_timeout(wq, condition, timeout) \ - wait_event_interruptible_timeout(wq, condition, timeout) - -#define wait_event_freezable_exclusive(wq, condition) \ - wait_event_interruptible_exclusive(wq, condition) - -#define wait_event_freezekillable(wq, condition) \ - wait_event_killable(wq, condition) - #define wait_event_freezekillable_unsafe(wq, condition) \ wait_event_killable(wq, condition) diff --git a/include/linux/fs.h b/include/linux/fs.h index 9ab779e8a63c..4193a0bd99b0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -18,6 +18,7 @@ #include <linux/pid.h> #include <linux/bug.h> #include <linux/mutex.h> +#include <linux/rwsem.h> #include <linux/capability.h> #include <linux/semaphore.h> #include <linux/fiemap.h> @@ -401,7 +402,7 @@ struct address_space { atomic_t i_mmap_writable;/* count VM_SHARED mappings */ struct rb_root i_mmap; /* tree of private and shared mappings */ struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ - struct mutex i_mmap_mutex; /* protect tree, count, list */ + struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ /* Protected by tree_lock together with the radix tree */ unsigned long nrpages; /* number of total pages */ unsigned long nrshadows; /* number of shadow entries */ @@ -467,6 +468,26 @@ struct block_device { int mapping_tagged(struct address_space *mapping, int tag); +static inline void i_mmap_lock_write(struct address_space *mapping) +{ + down_write(&mapping->i_mmap_rwsem); +} + +static inline void i_mmap_unlock_write(struct address_space *mapping) +{ + up_write(&mapping->i_mmap_rwsem); +} + +static inline void i_mmap_lock_read(struct address_space *mapping) +{ + down_read(&mapping->i_mmap_rwsem); +} + +static inline void i_mmap_unlock_read(struct address_space *mapping) +{ + up_read(&mapping->i_mmap_rwsem); +} + /* * Might pages of this file be mapped into userspace? */ @@ -606,9 +627,6 @@ struct inode { const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ struct file_lock *i_flock; struct address_space i_data; -#ifdef CONFIG_QUOTA - struct dquot *i_dquot[MAXQUOTAS]; -#endif struct list_head i_devices; union { struct pipe_inode_info *i_pipe; @@ -789,7 +807,6 @@ struct file { struct rcu_head fu_rcuhead; } f_u; struct path f_path; -#define f_dentry f_path.dentry struct inode *f_inode; /* cached value */ const struct file_operations *f_op; @@ -1224,6 +1241,7 @@ struct super_block { struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; struct hlist_node s_instances; + unsigned int s_quota_types; /* Bitmask of supported quota types */ struct quota_info s_dquot; /* Diskquota specific options */ struct sb_writers s_writers; @@ -1467,7 +1485,10 @@ int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); * This allows the kernel to read directories into kernel space or * to have different dirent layouts depending on the binary type. */ -typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); +struct dir_context; +typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, + unsigned); + struct dir_context { const filldir_t actor; loff_t pos; @@ -1513,7 +1534,7 @@ struct file_operations { int (*setlease)(struct file *, long, struct file_lock **, void **); long (*fallocate)(struct file *file, int mode, loff_t offset, loff_t len); - int (*show_fdinfo)(struct seq_file *m, struct file *f); + void (*show_fdinfo)(struct seq_file *m, struct file *f); }; struct inode_operations { @@ -1577,7 +1598,9 @@ struct super_operations { void (*evict_inode) (struct inode *); void (*put_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); + int (*freeze_super) (struct super_block *); int (*freeze_fs) (struct super_block *); + int (*thaw_super) (struct super_block *); int (*unfreeze_fs) (struct super_block *); int (*statfs) (struct dentry *, struct kstatfs *); int (*remount_fs) (struct super_block *, int *, char *); @@ -1590,6 +1613,7 @@ struct super_operations { #ifdef CONFIG_QUOTA ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); + struct dquot **(*get_dquots)(struct inode *); #endif int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); long (*nr_cached_objects)(struct super_block *, int); @@ -2072,6 +2096,7 @@ extern int vfs_open(const struct path *, struct file *, const struct cred *); extern struct file * dentry_open(const struct path *, int, const struct cred *); extern int filp_close(struct file *, fl_owner_t id); +extern struct filename *getname_flags(const char __user *, int, int *); extern struct filename *getname(const char __user *); extern struct filename *getname_kernel(const char *); @@ -2786,6 +2811,11 @@ static inline void inode_has_no_xattr(struct inode *inode) inode->i_flags |= S_NOSEC; } +static inline bool is_root_inode(struct inode *inode) +{ + return inode == inode->i_sb->s_root->d_inode; +} + static inline bool dir_emit(struct dir_context *ctx, const char *name, int namelen, u64 ino, unsigned type) diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index ca060d7c4fa6..0f313f93c586 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -197,24 +197,6 @@ struct fsnotify_group { #define FSNOTIFY_EVENT_INODE 2 /* - * Inode specific fields in an fsnotify_mark - */ -struct fsnotify_inode_mark { - struct inode *inode; /* inode this mark is associated with */ - struct hlist_node i_list; /* list of marks by inode->i_fsnotify_marks */ - struct list_head free_i_list; /* tmp list used when freeing this mark */ -}; - -/* - * Mount point specific fields in an fsnotify_mark - */ -struct fsnotify_vfsmount_mark { - struct vfsmount *mnt; /* vfsmount this mark is associated with */ - struct hlist_node m_list; /* list of marks by inode->i_fsnotify_marks */ - struct list_head free_m_list; /* tmp list used when freeing this mark */ -}; - -/* * a mark is simply an object attached to an in core inode which allows an * fsnotify listener to indicate they are either no longer interested in events * of a type matching mask or only interested in those events. @@ -230,11 +212,17 @@ struct fsnotify_mark { * in kernel that found and may be using this mark. */ atomic_t refcnt; /* active things looking at this mark */ struct fsnotify_group *group; /* group this mark is for */ - struct list_head g_list; /* list of marks by group->i_fsnotify_marks */ + struct list_head g_list; /* list of marks by group->i_fsnotify_marks + * Also reused for queueing mark into + * destroy_list when it's waiting for + * the end of SRCU period before it can + * be freed */ spinlock_t lock; /* protect group and inode */ + struct hlist_node obj_list; /* list of marks for inode / vfsmount */ + struct list_head free_list; /* tmp list used when freeing this mark */ union { - struct fsnotify_inode_mark i; - struct fsnotify_vfsmount_mark m; + struct inode *inode; /* inode this mark is associated with */ + struct vfsmount *mnt; /* vfsmount this mark is associated with */ }; __u32 ignored_mask; /* events types to ignore */ #define FSNOTIFY_MARK_FLAG_INODE 0x01 @@ -243,7 +231,6 @@ struct fsnotify_mark { #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08 #define FSNOTIFY_MARK_FLAG_ALIVE 0x10 unsigned int flags; /* vfsmount or inode mark? */ - struct list_head destroy_list; void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */ }; diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 662697babd48..ed501953f0b2 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -61,6 +61,11 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); /* * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are * set in the flags member. + * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and + * IPMODIFY are a kind of attribute flags which can be set only before + * registering the ftrace_ops, and can not be modified while registered. + * Changing those attribute flags after regsitering ftrace_ops will + * cause unexpected results. * * ENABLED - set/unset when ftrace_ops is registered/unregistered * DYNAMIC - set when ftrace_ops is registered to denote dynamically @@ -94,6 +99,17 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * ADDING - The ops is in the process of being added. * REMOVING - The ops is in the process of being removed. * MODIFYING - The ops is in the process of changing its filter functions. + * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. + * The arch specific code sets this flag when it allocated a + * trampoline. This lets the arch know that it can update the + * trampoline in case the callback function changes. + * The ftrace_ops trampoline can be set by the ftrace users, and + * in such cases the arch must not modify it. Only the arch ftrace + * core code should set this flag. + * IPMODIFY - The ops can modify the IP register. This can only be set with + * SAVE_REGS. If another ops with this flag set is already registered + * for any of the functions that this ops will be registered for, then + * this ops will fail to register or set_filter_ip. */ enum { FTRACE_OPS_FL_ENABLED = 1 << 0, @@ -108,6 +124,8 @@ enum { FTRACE_OPS_FL_ADDING = 1 << 9, FTRACE_OPS_FL_REMOVING = 1 << 10, FTRACE_OPS_FL_MODIFYING = 1 << 11, + FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, + FTRACE_OPS_FL_IPMODIFY = 1 << 13, }; #ifdef CONFIG_DYNAMIC_FTRACE @@ -142,6 +160,7 @@ struct ftrace_ops { struct ftrace_ops_hash *func_hash; struct ftrace_ops_hash old_hash; unsigned long trampoline; + unsigned long trampoline_size; #endif }; @@ -255,7 +274,9 @@ struct ftrace_func_command { int ftrace_arch_code_modify_prepare(void); int ftrace_arch_code_modify_post_process(void); -void ftrace_bug(int err, unsigned long ip); +struct dyn_ftrace; + +void ftrace_bug(int err, struct dyn_ftrace *rec); struct seq_file; @@ -287,6 +308,8 @@ extern int ftrace_text_reserved(const void *start, const void *end); extern int ftrace_nr_registered_ops(void); +bool is_ftrace_trampoline(unsigned long addr); + /* * The dyn_ftrace record's flags field is split into two parts. * the first part which is '0-FTRACE_REF_MAX' is a counter of @@ -297,6 +320,7 @@ extern int ftrace_nr_registered_ops(void); * ENABLED - the function is being traced * REGS - the record wants the function to save regs * REGS_EN - the function is set up to save regs. + * IPMODIFY - the record allows for the IP address to be changed. * * When a new ftrace_ops is registered and wants a function to save * pt_regs, the rec->flag REGS is set. When the function has been @@ -310,10 +334,11 @@ enum { FTRACE_FL_REGS_EN = (1UL << 29), FTRACE_FL_TRAMP = (1UL << 28), FTRACE_FL_TRAMP_EN = (1UL << 27), + FTRACE_FL_IPMODIFY = (1UL << 26), }; -#define FTRACE_REF_MAX_SHIFT 27 -#define FTRACE_FL_BITS 5 +#define FTRACE_REF_MAX_SHIFT 26 +#define FTRACE_FL_BITS 6 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) @@ -586,6 +611,11 @@ static inline ssize_t ftrace_notrace_write(struct file *file, const char __user size_t cnt, loff_t *ppos) { return -ENODEV; } static inline int ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } + +static inline bool is_ftrace_trampoline(unsigned long addr) +{ + return false; +} #endif /* CONFIG_DYNAMIC_FTRACE */ /* totally disable ftrace - can not re-enable after this */ diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 28672e87e910..0bebb5c348b8 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -138,6 +138,17 @@ enum print_line_t { TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ }; +/* + * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq + * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function + * simplifies those functions and keeps them in sync. + */ +static inline enum print_line_t trace_handle_return(struct trace_seq *s) +{ + return trace_seq_has_overflowed(s) ? + TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; +} + void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, int pc); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 41b30fd4d041..b840e3b2770d 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -110,11 +110,8 @@ struct vm_area_struct; #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ __GFP_RECLAIMABLE) #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) -#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ - __GFP_HIGHMEM) -#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ - __GFP_HARDWALL | __GFP_HIGHMEM | \ - __GFP_MOVABLE) +#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) +#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) #define GFP_IOFS (__GFP_IO | __GFP_FS) #define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ @@ -381,8 +378,8 @@ extern void free_kmem_pages(unsigned long addr, unsigned int order); void page_alloc_init(void); void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); -void drain_all_pages(void); -void drain_local_pages(void *dummy); +void drain_all_pages(struct zone *zone); +void drain_local_pages(struct zone *zone); /* * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 12f146fa6604..00b1b70d68ba 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -94,6 +94,13 @@ int gpiod_to_irq(const struct gpio_desc *desc); struct gpio_desc *gpio_to_desc(unsigned gpio); int desc_to_gpio(const struct gpio_desc *desc); +/* Child properties interface */ +struct fwnode_handle; + +struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, + const char *propname); +struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, + struct fwnode_handle *child); #else /* CONFIG_GPIOLIB */ static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev, diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index 8b622468952c..ee2d8c6f9130 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h @@ -2,6 +2,7 @@ #define _GPIO_KEYS_H struct device; +struct gpio_desc; /** * struct gpio_keys_button - configuration parameters @@ -17,6 +18,7 @@ struct device; * disable button via sysfs * @value: axis value for %EV_ABS * @irq: Irq number in case of interrupt keys + * @gpiod: GPIO descriptor */ struct gpio_keys_button { unsigned int code; @@ -29,6 +31,7 @@ struct gpio_keys_button { bool can_disable; int value; unsigned int irq; + struct gpio_desc *gpiod; }; /** diff --git a/include/linux/hash.h b/include/linux/hash.h index d0494c399392..1afde47e1528 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -15,7 +15,6 @@ */ #include <asm/types.h> -#include <asm/hash.h> #include <linux/compiler.h> /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ @@ -84,38 +83,4 @@ static inline u32 hash32_ptr(const void *ptr) return (u32)val; } -struct fast_hash_ops { - u32 (*hash)(const void *data, u32 len, u32 seed); - u32 (*hash2)(const u32 *data, u32 len, u32 seed); -}; - -/** - * arch_fast_hash - Caclulates a hash over a given buffer that can have - * arbitrary size. This function will eventually use an - * architecture-optimized hashing implementation if - * available, and trades off distribution for speed. - * - * @data: buffer to hash - * @len: length of buffer in bytes - * @seed: start seed - * - * Returns 32bit hash. - */ -extern u32 arch_fast_hash(const void *data, u32 len, u32 seed); - -/** - * arch_fast_hash2 - Caclulates a hash over a given buffer that has a - * size that is of a multiple of 32bit words. This - * function will eventually use an architecture- - * optimized hashing implementation if available, - * and trades off distribution for speed. - * - * @data: buffer to hash (must be 32bit padded) - * @len: number of 32bit words - * @seed: start seed - * - * Returns 32bit hash. - */ -extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed); - #endif /* _LINUX_HASH_H */ diff --git a/include/linux/hid.h b/include/linux/hid.h index 78ea9bf941cd..06c4607744f6 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -234,6 +234,33 @@ struct hid_item { #define HID_DG_BARRELSWITCH 0x000d0044 #define HID_DG_ERASER 0x000d0045 #define HID_DG_TABLETPICK 0x000d0046 + +#define HID_CP_CONSUMERCONTROL 0x000c0001 +#define HID_CP_NUMERICKEYPAD 0x000c0002 +#define HID_CP_PROGRAMMABLEBUTTONS 0x000c0003 +#define HID_CP_MICROPHONE 0x000c0004 +#define HID_CP_HEADPHONE 0x000c0005 +#define HID_CP_GRAPHICEQUALIZER 0x000c0006 +#define HID_CP_FUNCTIONBUTTONS 0x000c0036 +#define HID_CP_SELECTION 0x000c0080 +#define HID_CP_MEDIASELECTION 0x000c0087 +#define HID_CP_SELECTDISC 0x000c00ba +#define HID_CP_PLAYBACKSPEED 0x000c00f1 +#define HID_CP_PROXIMITY 0x000c0109 +#define HID_CP_SPEAKERSYSTEM 0x000c0160 +#define HID_CP_CHANNELLEFT 0x000c0161 +#define HID_CP_CHANNELRIGHT 0x000c0162 +#define HID_CP_CHANNELCENTER 0x000c0163 +#define HID_CP_CHANNELFRONT 0x000c0164 +#define HID_CP_CHANNELCENTERFRONT 0x000c0165 +#define HID_CP_CHANNELSIDE 0x000c0166 +#define HID_CP_CHANNELSURROUND 0x000c0167 +#define HID_CP_CHANNELLOWFREQUENCYENHANCEMENT 0x000c0168 +#define HID_CP_CHANNELTOP 0x000c0169 +#define HID_CP_CHANNELUNKNOWN 0x000c016a +#define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180 +#define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200 + #define HID_DG_CONFIDENCE 0x000d0047 #define HID_DG_WIDTH 0x000d0048 #define HID_DG_HEIGHT 0x000d0049 @@ -312,11 +339,8 @@ struct hid_item { * Vendor specific HID device groups */ #define HID_GROUP_RMI 0x0100 - -/* - * Vendor specific HID device groups - */ #define HID_GROUP_WACOM 0x0101 +#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102 /* * This is the global environment of the parser. This information is @@ -1063,6 +1087,17 @@ static inline void hid_hw_wait(struct hid_device *hdev) hdev->ll_driver->wait(hdev); } +/** + * hid_report_len - calculate the report length + * + * @report: the report we want to know the length + */ +static inline int hid_report_len(struct hid_report *report) +{ + /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ + return ((report->size - 1) >> 3) + 1 + (report->id > 0); +} + int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, int interrupt); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 6e6d338641fe..431b7fc605c9 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -175,6 +175,52 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb, } #endif /* !CONFIG_HUGETLB_PAGE */ +/* + * hugepages at page global directory. If arch support + * hugepages at pgd level, they need to define this. + */ +#ifndef pgd_huge +#define pgd_huge(x) 0 +#endif + +#ifndef pgd_write +static inline int pgd_write(pgd_t pgd) +{ + BUG(); + return 0; +} +#endif + +#ifndef pud_write +static inline int pud_write(pud_t pud) +{ + BUG(); + return 0; +} +#endif + +#ifndef is_hugepd +/* + * Some architectures requires a hugepage directory format that is + * required to support multiple hugepage sizes. For example + * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables" + * introduced the same on powerpc. This allows for a more flexible hugepage + * pagetable layout. + */ +typedef struct { unsigned long pd; } hugepd_t; +#define is_hugepd(hugepd) (0) +#define __hugepd(x) ((hugepd_t) { (x) }) +static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, + unsigned pdshift, unsigned long end, + int write, struct page **pages, int *nr) +{ + return 0; +} +#else +extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr, + unsigned pdshift, unsigned long end, + int write, struct page **pages, int *nr); +#endif #define HUGETLB_ANON_FILE "anon_hugepage" @@ -311,7 +357,8 @@ static inline struct hstate *hstate_sizelog(int page_size_log) { if (!page_size_log) return &default_hstate; - return size_to_hstate(1 << page_size_log); + + return size_to_hstate(1UL << page_size_log); } static inline struct hstate *hstate_vma(struct vm_area_struct *vma) diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 0129f89cf98d..bcc853eccc85 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -16,7 +16,6 @@ #define _LINUX_HUGETLB_CGROUP_H #include <linux/mmdebug.h> -#include <linux/res_counter.h> struct hugetlb_cgroup; /* diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 08cfaff8a072..476c685ca6f9 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -650,6 +650,8 @@ struct vmbus_channel { u8 monitor_grp; u8 monitor_bit; + bool rescind; /* got rescind msg */ + u32 ringbuffer_gpadlhandle; /* Allocated memory for ring buffer */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index a720d9921b47..e3a1721c8354 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -385,7 +385,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, * to name two of the most common. * * The return codes from the @master_xfer field should indicate the type of - * error code that occured during the transfer, as documented in the kernel + * error code that occurred during the transfer, as documented in the kernel * Documentation file Documentation/i2c/fault-codes. */ struct i2c_algorithm { diff --git a/include/linux/i2c/pmbus.h b/include/linux/i2c/pmbus.h index 69280db02c41..ee3c2aba2a8e 100644 --- a/include/linux/i2c/pmbus.h +++ b/include/linux/i2c/pmbus.h @@ -40,6 +40,10 @@ struct pmbus_platform_data { u32 flags; /* Device specific flags */ + + /* regulator support */ + int num_regulators; + struct regulator_init_data *reg_init_data; }; #endif /* _PMBUS_H_ */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index b1be39c76931..4f4eea8a6288 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -19,6 +19,7 @@ #include <linux/types.h> #include <linux/if_ether.h> #include <asm/byteorder.h> +#include <asm/unaligned.h> /* * DS bit usage @@ -1066,6 +1067,12 @@ struct ieee80211_pspoll { /* TDLS */ +/* Channel switch timing */ +struct ieee80211_ch_switch_timing { + __le16 switch_time; + __le16 switch_timeout; +} __packed; + /* Link-id information element */ struct ieee80211_tdls_lnkie { u8 ie_type; /* Link Identifier IE */ @@ -1107,6 +1114,15 @@ struct ieee80211_tdls_data { u8 dialog_token; u8 variable[0]; } __packed discover_req; + struct { + u8 target_channel; + u8 oper_class; + u8 variable[0]; + } __packed chan_switch_req; + struct { + __le16 status_code; + u8 variable[0]; + } __packed chan_switch_resp; } u; } __packed; @@ -1274,7 +1290,7 @@ struct ieee80211_ht_cap { #define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2 /* - * Maximum length of AMPDU that the STA can receive. + * Maximum length of AMPDU that the STA can receive in high-throughput (HT). * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) */ enum ieee80211_max_ampdu_length_exp { @@ -1284,6 +1300,21 @@ enum ieee80211_max_ampdu_length_exp { IEEE80211_HT_MAX_AMPDU_64K = 3 }; +/* + * Maximum length of AMPDU that the STA can receive in VHT. + * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) + */ +enum ieee80211_vht_max_ampdu_length_exp { + IEEE80211_VHT_MAX_AMPDU_8K = 0, + IEEE80211_VHT_MAX_AMPDU_16K = 1, + IEEE80211_VHT_MAX_AMPDU_32K = 2, + IEEE80211_VHT_MAX_AMPDU_64K = 3, + IEEE80211_VHT_MAX_AMPDU_128K = 4, + IEEE80211_VHT_MAX_AMPDU_256K = 5, + IEEE80211_VHT_MAX_AMPDU_512K = 6, + IEEE80211_VHT_MAX_AMPDU_1024K = 7 +}; + #define IEEE80211_HT_MAX_AMPDU_FACTOR 13 /* Minimum MPDU start spacing */ @@ -1998,6 +2029,16 @@ enum ieee80211_tdls_actioncode { WLAN_TDLS_DISCOVERY_REQUEST = 10, }; +/* Extended Channel Switching capability to be set in the 1st byte of + * the @WLAN_EID_EXT_CAPABILITY information element + */ +#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2) + +/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */ +#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) +#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) +#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6) + /* Interworking capabilities are set in 7th bit of 4th byte of the * @WLAN_EID_EXT_CAPABILITY information element */ @@ -2009,6 +2050,7 @@ enum ieee80211_tdls_actioncode { */ #define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5) #define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) +#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7) #define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) #define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7) @@ -2016,6 +2058,9 @@ enum ieee80211_tdls_actioncode { /* TDLS specific payload type in the LLC/SNAP header */ #define WLAN_TDLS_SNAP_RFTYPE 0x2 +/* BSS Coex IE information field bits */ +#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0) + /** * enum - mesh synchronization method identifier * @@ -2398,6 +2443,30 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, return !!(tim->virtual_map[index] & mask); } +/** + * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet) + * @skb: the skb containing the frame, length will not be checked + * @hdr_size: the size of the ieee80211_hdr that starts at skb->data + * + * This function assumes the frame is a data frame, and that the network header + * is in the correct place. + */ +static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size) +{ + if (!skb_is_nonlinear(skb) && + skb->len > (skb_network_offset(skb) + 2)) { + /* Point to where the indication of TDLS should start */ + const u8 *tdls_data = skb_network_header(skb) - 2; + + if (get_unaligned_be16(tdls_data) == ETH_P_TDLS && + tdls_data[2] == WLAN_TDLS_SNAP_RFTYPE && + tdls_data[3] == WLAN_CATEGORY_TDLS) + return tdls_data[4]; + } + + return -1; +} + /* convert time units */ #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) diff --git a/include/net/ieee802154.h b/include/linux/ieee802154.h index 0aa7122e8f15..6e82d888287c 100644 --- a/include/net/ieee802154.h +++ b/include/linux/ieee802154.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Pavel Smolenskiy <pavel.smolenskiy@gmail.com> * Maxim Gorbachyov <maxim.gorbachev@siemens.com> @@ -24,10 +20,27 @@ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ -#ifndef NET_IEEE802154_H -#define NET_IEEE802154_H +#ifndef LINUX_IEEE802154_H +#define LINUX_IEEE802154_H + +#include <linux/types.h> +#include <linux/random.h> +#include <asm/byteorder.h> #define IEEE802154_MTU 127 +#define IEEE802154_MIN_PSDU_LEN 5 + +#define IEEE802154_PAN_ID_BROADCAST 0xffff +#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff +#define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe + +#define IEEE802154_EXTENDED_ADDR_LEN 8 + +#define IEEE802154_LIFS_PERIOD 40 +#define IEEE802154_SIFS_PERIOD 12 + +#define IEEE802154_MAX_CHANNEL 26 +#define IEEE802154_MAX_PAGE 31 #define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */ #define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */ @@ -189,7 +202,41 @@ enum { IEEE802154_SCAN_IN_PROGRESS = 0xfc, }; +/** + * ieee802154_is_valid_psdu_len - check if psdu len is valid + * @len: psdu len with (MHR + payload + MFR) + */ +static inline bool ieee802154_is_valid_psdu_len(const u8 len) +{ + return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU); +} + +/** + * ieee802154_is_valid_psdu_len - check if extended addr is valid + * @addr: extended addr to check + */ +static inline bool ieee802154_is_valid_extended_addr(const __le64 addr) +{ + /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff + * is used internally as extended to short address broadcast mapping. + * This is currently a workaround because neighbor discovery can't + * deal with short addresses types right now. + */ + return ((addr != cpu_to_le64(0x0000000000000000ULL)) && + (addr != cpu_to_le64(0xffffffffffffffffULL))); +} -#endif +/** + * ieee802154_random_extended_addr - generates a random extended address + * @addr: extended addr pointer to place the random address + */ +static inline void ieee802154_random_extended_addr(__le64 *addr) +{ + get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN); + /* toggle some bit if we hit an invalid extended addr */ + if (!ieee802154_is_valid_extended_addr(*addr)) + ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01; +} +#endif /* LINUX_IEEE802154_H */ diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 808dcb8cc04f..0a8ce762a47f 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -15,6 +15,7 @@ #include <linux/netdevice.h> #include <uapi/linux/if_bridge.h> +#include <linux/bitops.h> struct br_ip { union { @@ -32,11 +33,41 @@ struct br_ip_list { struct br_ip addr; }; +#define BR_HAIRPIN_MODE BIT(0) +#define BR_BPDU_GUARD BIT(1) +#define BR_ROOT_BLOCK BIT(2) +#define BR_MULTICAST_FAST_LEAVE BIT(3) +#define BR_ADMIN_COST BIT(4) +#define BR_LEARNING BIT(5) +#define BR_FLOOD BIT(6) +#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING) +#define BR_PROMISC BIT(7) +#define BR_PROXYARP BIT(8) +#define BR_LEARNING_SYNC BIT(9) + extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); typedef int br_should_route_hook_t(struct sk_buff *skb); extern br_should_route_hook_t __rcu *br_should_route_hook; +#if IS_ENABLED(CONFIG_BRIDGE) +int br_fdb_external_learn_add(struct net_device *dev, + const unsigned char *addr, u16 vid); +int br_fdb_external_learn_del(struct net_device *dev, + const unsigned char *addr, u16 vid); +#else +static inline int br_fdb_external_learn_add(struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + return 0; +} +static inline int br_fdb_external_learn_del(struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + return 0; +} +#endif + #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index d69f0577a319..515a35e2a48a 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -282,28 +282,24 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features, } /** - * vlan_insert_tag - regular VLAN tag inserting + * __vlan_insert_tag - regular VLAN tag inserting * @skb: skbuff to tag * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload - * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. - * - * Following the skb_unshare() example, in case of error, the calling function - * doesn't have to worry about freeing the original skb. + * Returns error if skb_cow_head failes. * * Does not change skb->protocol so this function can be used during receive. */ -static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline int __vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) { struct vlan_ethhdr *veth; - if (skb_cow_head(skb, VLAN_HLEN) < 0) { - dev_kfree_skb_any(skb); - return NULL; - } + if (skb_cow_head(skb, VLAN_HLEN) < 0) + return -ENOMEM; + veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); /* Move the mac addresses to the beginning of the new header. */ @@ -316,12 +312,40 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, /* now, the TCI */ veth->h_vlan_TCI = htons(vlan_tci); + return 0; +} + +/** + * vlan_insert_tag - regular VLAN tag inserting + * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol + * @vlan_tci: VLAN TCI to insert + * + * Inserts the VLAN tag into @skb as part of the payload + * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + * + * Does not change skb->protocol so this function can be used during receive. + */ +static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) +{ + int err; + + err = __vlan_insert_tag(skb, vlan_proto, vlan_tci); + if (err) { + dev_kfree_skb_any(skb); + return NULL; + } return skb; } /** - * __vlan_put_tag - regular VLAN tag inserting + * vlan_insert_tag_set_proto - regular VLAN tag inserting * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * * Inserts the VLAN tag into @skb as part of the payload @@ -330,8 +354,9 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, * Following the skb_unshare() example, in case of error, the calling function * doesn't have to worry about freeing the original skb. */ -static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, + __be16 vlan_proto, + u16 vlan_tci) { skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); if (skb) @@ -339,39 +364,53 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, return skb; } -/** - * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting +/* + * __vlan_hwaccel_push_inside - pushes vlan tag to the payload * @skb: skbuff to tag - * @vlan_proto: VLAN encapsulation protocol - * @vlan_tci: VLAN TCI to insert * - * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest + * Pushes the VLAN tag from @skb->vlan_tci inside to the payload. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. */ -static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, - __be16 vlan_proto, - u16 vlan_tci) +static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) { - skb->vlan_proto = vlan_proto; - skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, + vlan_tx_tag_get(skb)); + if (likely(skb)) + skb->vlan_tci = 0; + return skb; +} +/* + * vlan_hwaccel_push_inside - pushes vlan tag to the payload + * @skb: skbuff to tag + * + * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the + * VLAN tag from @skb->vlan_tci inside to the payload. + * + * Following the skb_unshare() example, in case of error, the calling function + * doesn't have to worry about freeing the original skb. + */ +static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb)) + skb = __vlan_hwaccel_push_inside(skb); return skb; } /** - * vlan_put_tag - inserts VLAN tag according to device features + * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting * @skb: skbuff to tag + * @vlan_proto: VLAN encapsulation protocol * @vlan_tci: VLAN TCI to insert * - * Assumes skb->dev is the target that will xmit this frame. - * Returns a VLAN tagged skb. + * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest */ -static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, - __be16 vlan_proto, u16 vlan_tci) +static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, + __be16 vlan_proto, u16 vlan_tci) { - if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) { - return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); - } else { - return __vlan_put_tag(skb, vlan_proto, vlan_tci); - } + skb->vlan_proto = vlan_proto; + skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; } /** diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h index 8bbd7bc1043d..03fa332ad2a8 100644 --- a/include/linux/iio/events.h +++ b/include/linux/iio/events.h @@ -72,7 +72,7 @@ struct iio_event_data { #define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF) -#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF) +#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F) #define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF) diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 0068708161ff..0a21fbefdfbe 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev) static __inline__ __be32 inet_make_mask(int logmask) { if (logmask) - return htonl(~((1<<(32-logmask))-1)); + return htonl(~((1U<<(32-logmask))-1)); return 0; } diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 77fc43f8fb72..3037fc085e8e 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -102,7 +102,7 @@ extern struct group_info init_groups; #define INIT_IDS #endif -#ifdef CONFIG_TREE_PREEMPT_RCU +#ifdef CONFIG_PREEMPT_RCU #define INIT_TASK_RCU_TREE_PREEMPT() \ .rcu_blocked_node = NULL, #else @@ -166,6 +166,15 @@ extern struct task_group root_task_group; # define INIT_RT_MUTEXES(tsk) #endif +#ifdef CONFIG_NUMA_BALANCING +# define INIT_NUMA_BALANCING(tsk) \ + .numa_preferred_nid = -1, \ + .numa_group = NULL, \ + .numa_faults = NULL, +#else +# define INIT_NUMA_BALANCING(tsk) +#endif + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) @@ -237,6 +246,7 @@ extern struct task_group root_task_group; INIT_CPUSET_SEQ(tsk) \ INIT_RT_MUTEXES(tsk) \ INIT_VTIME(tsk) \ + INIT_NUMA_BALANCING(tsk) \ } diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 69517a24bc50..d9b05b5bf8c7 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -556,12 +556,6 @@ static inline void tasklet_enable(struct tasklet_struct *t) atomic_dec(&t->count); } -static inline void tasklet_hi_enable(struct tasklet_struct *t) -{ - smp_mb__before_atomic(); - atomic_dec(&t->count); -} - extern void tasklet_kill(struct tasklet_struct *t); extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index e6a7c9ff72f2..7a7bd15e54f1 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -22,12 +22,13 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/types.h> +#include <linux/scatterlist.h> #include <trace/events/iommu.h> #define IOMMU_READ (1 << 0) #define IOMMU_WRITE (1 << 1) #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ -#define IOMMU_EXEC (1 << 3) +#define IOMMU_NOEXEC (1 << 3) struct iommu_ops; struct iommu_group; @@ -61,6 +62,7 @@ enum iommu_cap { IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA transactions */ IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ + IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ }; /* @@ -97,6 +99,8 @@ enum iommu_attr { * @detach_dev: detach device from an iommu domain * @map: map a physically contiguous memory region to an iommu domain * @unmap: unmap a physically contiguous memory region from an iommu domain + * @map_sg: map a scatter-gather list of physically contiguous memory chunks + * to an iommu domain * @iova_to_phys: translate iova to physical address * @add_device: add device to iommu grouping * @remove_device: remove device from iommu grouping @@ -114,6 +118,8 @@ struct iommu_ops { phys_addr_t paddr, size_t size, int prot); size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, size_t size); + size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); int (*add_device)(struct device *dev); void (*remove_device)(struct device *dev); @@ -156,6 +162,9 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size); +extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg,unsigned int nents, + int prot); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); extern void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token); @@ -241,6 +250,13 @@ static inline int report_iommu_fault(struct iommu_domain *domain, return ret; } +static inline size_t iommu_map_sg(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot) +{ + return domain->ops->map_sg(domain, iova, sg, nents, prot); +} + #else /* CONFIG_IOMMU_API */ struct iommu_ops {}; @@ -293,6 +309,13 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, return -ENODEV; } +static inline size_t iommu_map_sg(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot) +{ + return -ENODEV; +} + static inline int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot) diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index 35e7eca4e33b..e365d5ec69cb 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -7,15 +7,6 @@ #include <linux/notifier.h> #include <linux/nsproxy.h> -/* - * ipc namespace events - */ -#define IPCNS_MEMCHANGED 0x00000001 /* Notify lowmem size changed */ -#define IPCNS_CREATED 0x00000002 /* Notify new ipc namespace created */ -#define IPCNS_REMOVED 0x00000003 /* Notify ipc namespace removed */ - -#define IPCNS_CALLBACK_PRI 0 - struct user_namespace; struct ipc_ids { @@ -38,7 +29,6 @@ struct ipc_namespace { unsigned int msg_ctlmni; atomic_t msg_bytes; atomic_t msg_hdrs; - int auto_msgmni; size_t shm_ctlmax; size_t shm_ctlall; @@ -77,18 +67,8 @@ extern atomic_t nr_ipc_ns; extern spinlock_t mq_lock; #ifdef CONFIG_SYSVIPC -extern int register_ipcns_notifier(struct ipc_namespace *); -extern int cond_register_ipcns_notifier(struct ipc_namespace *); -extern void unregister_ipcns_notifier(struct ipc_namespace *); -extern int ipcns_notify(unsigned long); extern void shm_destroy_orphaned(struct ipc_namespace *ns); #else /* CONFIG_SYSVIPC */ -static inline int register_ipcns_notifier(struct ipc_namespace *ns) -{ return 0; } -static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns) -{ return 0; } -static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { } -static inline int ipcns_notify(unsigned long l) { return 0; } static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {} #endif /* CONFIG_SYSVIPC */ diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 76d2acbfa7c6..838dbfa3c331 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -37,6 +37,7 @@ #include <linux/list.h> #include <linux/proc_fs.h> +#include <linux/acpi.h> /* For acpi_handle */ struct module; struct device; @@ -278,15 +279,18 @@ enum ipmi_addr_src { SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, SI_PCI, SI_DEVICETREE, SI_DEFAULT }; +const char *ipmi_addr_src_to_str(enum ipmi_addr_src src); union ipmi_smi_info_union { +#ifdef CONFIG_ACPI /* * the acpi_info element is defined for the SI_ACPI * address type */ struct { - void *acpi_handle; + acpi_handle acpi_handle; } acpi_info; +#endif }; struct ipmi_smi_info { diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index bd349240d50e..0b1e569f5ff5 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h @@ -98,12 +98,11 @@ struct ipmi_smi_handlers { operation is not allowed to fail. If an error occurs, it should report back the error in a received message. It may do this in the current call context, since no write locks - are held when this is run. If the priority is > 0, the - message will go into a high-priority queue and be sent - first. Otherwise, it goes into a normal-priority queue. */ + are held when this is run. Message are delivered one at + a time by the message handler, a new message will not be + delivered until the previous message is returned. */ void (*sender)(void *send_info, - struct ipmi_smi_msg *msg, - int priority); + struct ipmi_smi_msg *msg); /* Called by the upper layer to request that we try to get events from the BMC we are attached to. */ @@ -212,7 +211,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, void *send_info, struct ipmi_device_id *device_id, struct device *dev, - const char *sysfs_name, unsigned char slave_addr); /* diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ff560537dd61..c694e7baa621 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -42,6 +42,7 @@ struct ipv6_devconf { __s32 accept_ra_from_local; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD __s32 optimistic_dad; + __s32 use_optimistic; #endif #ifdef CONFIG_IPV6_MROUTE __s32 mc_forwarding; @@ -316,14 +317,4 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) #define tcp_twsk_ipv6only(__sk) 0 #define inet_v6_ipv6only(__sk) 0 #endif /* IS_ENABLED(CONFIG_IPV6) */ - -#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ - (((__sk)->sk_portpair == (__ports)) && \ - ((__sk)->sk_family == AF_INET6) && \ - ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \ - ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \ - (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif))) && \ - net_eq(sock_net(__sk), (__net))) - #endif /* _IPV6_H */ diff --git a/include/linux/irq.h b/include/linux/irq.h index 03f48d936f66..d09ec7a1243e 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -15,11 +15,13 @@ #include <linux/spinlock.h> #include <linux/cpumask.h> #include <linux/gfp.h> +#include <linux/irqhandler.h> #include <linux/irqreturn.h> #include <linux/irqnr.h> #include <linux/errno.h> #include <linux/topology.h> #include <linux/wait.h> +#include <linux/io.h> #include <asm/irq.h> #include <asm/ptrace.h> @@ -27,11 +29,7 @@ struct seq_file; struct module; -struct irq_desc; -struct irq_data; -typedef void (*irq_flow_handler_t)(unsigned int irq, - struct irq_desc *desc); -typedef void (*irq_preflow_handler_t)(struct irq_data *data); +struct msi_msg; /* * IRQ line status. @@ -113,10 +111,14 @@ enum { * * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity + * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to + * support stacked irqchips, which indicates skipping + * all descendent irqchips. */ enum { IRQ_SET_MASK_OK = 0, IRQ_SET_MASK_OK_NOCOPY, + IRQ_SET_MASK_OK_DONE, }; struct msi_desc; @@ -133,6 +135,8 @@ struct irq_domain; * @chip: low level interrupt hardware access * @domain: Interrupt translation domain; responsible for mapping * between hwirq number and linux irq number. + * @parent_data: pointer to parent struct irq_data to support hierarchy + * irq_domain * @handler_data: per-IRQ data for the irq_chip methods * @chip_data: platform-specific per-chip private data for the chip * methods, to allow shared chip implementations @@ -151,6 +155,9 @@ struct irq_data { unsigned int state_use_accessors; struct irq_chip *chip; struct irq_domain *domain; +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + struct irq_data *parent_data; +#endif void *handler_data; void *chip_data; struct msi_desc *msi_desc; @@ -315,6 +322,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) * any other callback related to this irq * @irq_release_resources: optional to release resources acquired with * irq_request_resources + * @irq_compose_msi_msg: optional to compose message content for MSI + * @irq_write_msi_msg: optional to write message content for MSI * @flags: chip specific flags */ struct irq_chip { @@ -351,6 +360,9 @@ struct irq_chip { int (*irq_request_resources)(struct irq_data *data); void (*irq_release_resources)(struct irq_data *data); + void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg); + void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg); + unsigned long flags; }; @@ -438,6 +450,18 @@ extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); +extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY +extern void irq_chip_ack_parent(struct irq_data *data); +extern int irq_chip_retrigger_hierarchy(struct irq_data *data); +extern void irq_chip_mask_parent(struct irq_data *data); +extern void irq_chip_unmask_parent(struct irq_data *data); +extern void irq_chip_eoi_parent(struct irq_data *data); +extern int irq_chip_set_affinity_parent(struct irq_data *data, + const struct cpumask *dest, + bool force); +#endif + /* Handling of unhandled and spurious interrupts: */ extern void note_interrupt(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret); @@ -639,13 +663,6 @@ void arch_teardown_hwirq(unsigned int irq); void irq_init_desc(unsigned int irq); #endif -#ifndef irq_reg_writel -# define irq_reg_writel(val, addr) writel(val, addr) -#endif -#ifndef irq_reg_readl -# define irq_reg_readl(addr) readl(addr) -#endif - /** * struct irq_chip_regs - register offsets for struct irq_gci * @enable: Enable register offset to reg_base @@ -692,6 +709,8 @@ struct irq_chip_type { * struct irq_chip_generic - Generic irq chip data structure * @lock: Lock to protect register and cache data access * @reg_base: Register base address (virtual) + * @reg_readl: Alternate I/O accessor (defaults to readl if NULL) + * @reg_writel: Alternate I/O accessor (defaults to writel if NULL) * @irq_base: Interrupt base nr for this chip * @irq_cnt: Number of interrupts handled by this chip * @mask_cache: Cached mask register shared between all chip types @@ -716,6 +735,8 @@ struct irq_chip_type { struct irq_chip_generic { raw_spinlock_t lock; void __iomem *reg_base; + u32 (*reg_readl)(void __iomem *addr); + void (*reg_writel)(u32 val, void __iomem *addr); unsigned int irq_base; unsigned int irq_cnt; u32 mask_cache; @@ -740,12 +761,14 @@ struct irq_chip_generic { * the parent irq. Usually GPIO implementations * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask + * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE) */ enum irq_gc_flags { IRQ_GC_INIT_MASK_CACHE = 1 << 0, IRQ_GC_INIT_NESTED_LOCK = 1 << 1, IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, IRQ_GC_NO_MASK = 1 << 3, + IRQ_GC_BE_IO = 1 << 4, }; /* @@ -821,4 +844,22 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { } static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } #endif +static inline void irq_reg_writel(struct irq_chip_generic *gc, + u32 val, int reg_offset) +{ + if (gc->reg_writel) + gc->reg_writel(val, gc->reg_base + reg_offset); + else + writel(val, gc->reg_base + reg_offset); +} + +static inline u32 irq_reg_readl(struct irq_chip_generic *gc, + int reg_offset) +{ + if (gc->reg_readl) + return gc->reg_readl(gc->reg_base + reg_offset); + else + return readl(gc->reg_base + reg_offset); +} + #endif /* _LINUX_IRQ_H */ diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h new file mode 100644 index 000000000000..420f77b34d02 --- /dev/null +++ b/include/linux/irqchip/mips-gic.h @@ -0,0 +1,249 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000, 07 MIPS Technologies, Inc. + */ +#ifndef __LINUX_IRQCHIP_MIPS_GIC_H +#define __LINUX_IRQCHIP_MIPS_GIC_H + +#include <linux/clocksource.h> + +#define GIC_MAX_INTRS 256 + +/* Constants */ +#define GIC_POL_POS 1 +#define GIC_POL_NEG 0 +#define GIC_TRIG_EDGE 1 +#define GIC_TRIG_LEVEL 0 +#define GIC_TRIG_DUAL_ENABLE 1 +#define GIC_TRIG_DUAL_DISABLE 0 + +#define MSK(n) ((1 << (n)) - 1) + +/* Accessors */ +#define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS) + +/* GIC Address Space */ +#define SHARED_SECTION_OFS 0x0000 +#define SHARED_SECTION_SIZE 0x8000 +#define VPE_LOCAL_SECTION_OFS 0x8000 +#define VPE_LOCAL_SECTION_SIZE 0x4000 +#define VPE_OTHER_SECTION_OFS 0xc000 +#define VPE_OTHER_SECTION_SIZE 0x4000 +#define USM_VISIBLE_SECTION_OFS 0x10000 +#define USM_VISIBLE_SECTION_SIZE 0x10000 + +/* Register Map for Shared Section */ + +#define GIC_SH_CONFIG_OFS 0x0000 + +/* Shared Global Counter */ +#define GIC_SH_COUNTER_31_00_OFS 0x0010 +#define GIC_SH_COUNTER_63_32_OFS 0x0014 +#define GIC_SH_REVISIONID_OFS 0x0020 + +/* Convert an interrupt number to a byte offset/bit for multi-word registers */ +#define GIC_INTR_OFS(intr) (((intr) / 32) * 4) +#define GIC_INTR_BIT(intr) ((intr) % 32) + +/* Polarity : Reset Value is always 0 */ +#define GIC_SH_SET_POLARITY_OFS 0x0100 + +/* Triggering : Reset Value is always 0 */ +#define GIC_SH_SET_TRIGGER_OFS 0x0180 + +/* Dual edge triggering : Reset Value is always 0 */ +#define GIC_SH_SET_DUAL_OFS 0x0200 + +/* Set/Clear corresponding bit in Edge Detect Register */ +#define GIC_SH_WEDGE_OFS 0x0280 + +/* Mask manipulation */ +#define GIC_SH_RMASK_OFS 0x0300 +#define GIC_SH_SMASK_OFS 0x0380 + +/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */ +#define GIC_SH_MASK_OFS 0x0400 + +/* Pending Global Interrupts (RO) */ +#define GIC_SH_PEND_OFS 0x0480 + +/* Maps Interrupt X to a Pin */ +#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500 +#define GIC_SH_MAP_TO_PIN(intr) (4 * (intr)) + +/* Maps Interrupt X to a VPE */ +#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000 +#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \ + ((32 * (intr)) + (((vpe) / 32) * 4)) +#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32)) + +/* Register Map for Local Section */ +#define GIC_VPE_CTL_OFS 0x0000 +#define GIC_VPE_PEND_OFS 0x0004 +#define GIC_VPE_MASK_OFS 0x0008 +#define GIC_VPE_RMASK_OFS 0x000c +#define GIC_VPE_SMASK_OFS 0x0010 +#define GIC_VPE_WD_MAP_OFS 0x0040 +#define GIC_VPE_COMPARE_MAP_OFS 0x0044 +#define GIC_VPE_TIMER_MAP_OFS 0x0048 +#define GIC_VPE_FDC_MAP_OFS 0x004c +#define GIC_VPE_PERFCTR_MAP_OFS 0x0050 +#define GIC_VPE_SWINT0_MAP_OFS 0x0054 +#define GIC_VPE_SWINT1_MAP_OFS 0x0058 +#define GIC_VPE_OTHER_ADDR_OFS 0x0080 +#define GIC_VPE_WD_CONFIG0_OFS 0x0090 +#define GIC_VPE_WD_COUNT0_OFS 0x0094 +#define GIC_VPE_WD_INITIAL0_OFS 0x0098 +#define GIC_VPE_COMPARE_LO_OFS 0x00a0 +#define GIC_VPE_COMPARE_HI_OFS 0x00a4 + +#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100 +#define GIC_VPE_EIC_SS(intr) (4 * (intr)) + +#define GIC_VPE_EIC_VEC_BASE_OFS 0x0800 +#define GIC_VPE_EIC_VEC(intr) (4 * (intr)) + +#define GIC_VPE_TENABLE_NMI_OFS 0x1000 +#define GIC_VPE_TENABLE_YQ_OFS 0x1004 +#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080 +#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084 + +/* User Mode Visible Section Register Map */ +#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000 +#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004 + +/* Masks */ +#define GIC_SH_CONFIG_COUNTSTOP_SHF 28 +#define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF) + +#define GIC_SH_CONFIG_COUNTBITS_SHF 24 +#define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF) + +#define GIC_SH_CONFIG_NUMINTRS_SHF 16 +#define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF) + +#define GIC_SH_CONFIG_NUMVPES_SHF 0 +#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF) + +#define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31)) +#define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31)) + +#define GIC_MAP_TO_PIN_SHF 31 +#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF) +#define GIC_MAP_TO_NMI_SHF 30 +#define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF) +#define GIC_MAP_TO_YQ_SHF 29 +#define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF) +#define GIC_MAP_SHF 0 +#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF) + +/* GIC_VPE_CTL Masks */ +#define GIC_VPE_CTL_FDC_RTBL_SHF 4 +#define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF) +#define GIC_VPE_CTL_SWINT_RTBL_SHF 3 +#define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF) +#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2 +#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF) +#define GIC_VPE_CTL_TIMER_RTBL_SHF 1 +#define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF) +#define GIC_VPE_CTL_EIC_MODE_SHF 0 +#define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF) + +/* GIC_VPE_PEND Masks */ +#define GIC_VPE_PEND_WD_SHF 0 +#define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF) +#define GIC_VPE_PEND_CMP_SHF 1 +#define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF) +#define GIC_VPE_PEND_TIMER_SHF 2 +#define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF) +#define GIC_VPE_PEND_PERFCOUNT_SHF 3 +#define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF) +#define GIC_VPE_PEND_SWINT0_SHF 4 +#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF) +#define GIC_VPE_PEND_SWINT1_SHF 5 +#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF) + +/* GIC_VPE_RMASK Masks */ +#define GIC_VPE_RMASK_WD_SHF 0 +#define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF) +#define GIC_VPE_RMASK_CMP_SHF 1 +#define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF) +#define GIC_VPE_RMASK_TIMER_SHF 2 +#define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF) +#define GIC_VPE_RMASK_PERFCNT_SHF 3 +#define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF) +#define GIC_VPE_RMASK_SWINT0_SHF 4 +#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF) +#define GIC_VPE_RMASK_SWINT1_SHF 5 +#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF) + +/* GIC_VPE_SMASK Masks */ +#define GIC_VPE_SMASK_WD_SHF 0 +#define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF) +#define GIC_VPE_SMASK_CMP_SHF 1 +#define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF) +#define GIC_VPE_SMASK_TIMER_SHF 2 +#define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF) +#define GIC_VPE_SMASK_PERFCNT_SHF 3 +#define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF) +#define GIC_VPE_SMASK_SWINT0_SHF 4 +#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF) +#define GIC_VPE_SMASK_SWINT1_SHF 5 +#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF) + +/* GIC nomenclature for Core Interrupt Pins. */ +#define GIC_CPU_INT0 0 /* Core Interrupt 2 */ +#define GIC_CPU_INT1 1 /* . */ +#define GIC_CPU_INT2 2 /* . */ +#define GIC_CPU_INT3 3 /* . */ +#define GIC_CPU_INT4 4 /* . */ +#define GIC_CPU_INT5 5 /* Core Interrupt 7 */ + +/* Add 2 to convert GIC CPU pin to core interrupt */ +#define GIC_CPU_PIN_OFFSET 2 + +/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */ +#define GIC_CPU_TO_VEC_OFFSET 2 + +/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ +#define GIC_PIN_TO_VEC_OFFSET 1 + +/* Local GIC interrupts. */ +#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */ +#define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */ +#define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */ +#define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */ +#define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */ +#define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */ +#define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */ +#define GIC_NUM_LOCAL_INTRS 7 + +/* Convert between local/shared IRQ number and GIC HW IRQ number. */ +#define GIC_LOCAL_HWIRQ_BASE 0 +#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) +#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) +#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS +#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) +#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) + +extern unsigned int gic_present; + +extern void gic_init(unsigned long gic_base_addr, + unsigned long gic_addrspace_size, unsigned int cpu_vec, + unsigned int irqbase); +extern void gic_clocksource_init(unsigned int); +extern cycle_t gic_read_count(void); +extern unsigned int gic_get_count_width(void); +extern cycle_t gic_read_compare(void); +extern void gic_write_compare(cycle_t cnt); +extern void gic_write_cpu_compare(cycle_t cnt, int cpu); +extern void gic_send_ipi(unsigned int intr); +extern unsigned int plat_ipi_call_int_xlate(unsigned int); +extern unsigned int plat_ipi_resched_int_xlate(unsigned int); +extern unsigned int gic_get_timer_pending(void); +extern int gic_get_c0_compare_int(void); +extern int gic_get_c0_perfcount_int(void); +#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index b0f9d16e48f6..676d7306a360 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -33,11 +33,14 @@ #define _LINUX_IRQDOMAIN_H #include <linux/types.h> +#include <linux/irqhandler.h> #include <linux/radix-tree.h> struct device_node; struct irq_domain; struct of_device_id; +struct irq_chip; +struct irq_data; /* Number of irqs reserved for a legacy isa controller */ #define NUM_ISA_INTERRUPTS 16 @@ -64,6 +67,16 @@ struct irq_domain_ops { int (*xlate)(struct irq_domain *d, struct device_node *node, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type); + +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + /* extended V2 interfaces to support hierarchy irq_domains */ + int (*alloc)(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *arg); + void (*free)(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs); + void (*activate)(struct irq_domain *d, struct irq_data *irq_data); + void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); +#endif }; extern struct irq_domain_ops irq_generic_chip_ops; @@ -77,6 +90,7 @@ struct irq_domain_chip_generic; * @ops: pointer to irq_domain methods * @host_data: private data pointer for use by owner. Not touched by irq_domain * core code. + * @flags: host per irq_domain flags * * Optional elements * @of_node: Pointer to device tree nodes associated with the irq_domain. Used @@ -84,6 +98,7 @@ struct irq_domain_chip_generic; * @gc: Pointer to a list of generic chips. There is a helper function for * setting up one or more generic chips for interrupt controllers * drivers using the generic chip library which uses this pointer. + * @parent: Pointer to parent irq_domain to support hierarchy irq_domains * * Revmap data, used internally by irq_domain * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that @@ -97,10 +112,14 @@ struct irq_domain { const char *name; const struct irq_domain_ops *ops; void *host_data; + unsigned int flags; /* Optional data */ struct device_node *of_node; struct irq_domain_chip_generic *gc; +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + struct irq_domain *parent; +#endif /* reverse map data. The linear map gets appended to the irq_domain */ irq_hw_number_t hwirq_max; @@ -110,6 +129,22 @@ struct irq_domain { unsigned int linear_revmap[]; }; +/* Irq domain flags */ +enum { + /* Irq domain is hierarchical */ + IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0), + + /* Core calls alloc/free recursive through the domain hierarchy. */ + IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1), + + /* + * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved + * for implementation specific purposes and ignored by the + * core code. + */ + IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), +}; + #ifdef CONFIG_IRQ_DOMAIN struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, irq_hw_number_t hwirq_max, int direct_max, @@ -220,8 +255,74 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type); +/* V2 interfaces to support hierarchy IRQ domains. */ +extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, + unsigned int virq); +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY +extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, + unsigned int flags, unsigned int size, + struct device_node *node, + const struct irq_domain_ops *ops, void *host_data); +extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, + unsigned int nr_irqs, int node, void *arg, + bool realloc); +extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); +extern void irq_domain_activate_irq(struct irq_data *irq_data); +extern void irq_domain_deactivate_irq(struct irq_data *irq_data); + +static inline int irq_domain_alloc_irqs(struct irq_domain *domain, + unsigned int nr_irqs, int node, void *arg) +{ + return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); +} + +extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, + unsigned int virq, + irq_hw_number_t hwirq, + struct irq_chip *chip, + void *chip_data); +extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hwirq, struct irq_chip *chip, + void *chip_data, irq_flow_handler_t handler, + void *handler_data, const char *handler_name); +extern void irq_domain_reset_irq_data(struct irq_data *irq_data); +extern void irq_domain_free_irqs_common(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs); +extern void irq_domain_free_irqs_top(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs); + +extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, + unsigned int irq_base, + unsigned int nr_irqs, void *arg); + +extern void irq_domain_free_irqs_parent(struct irq_domain *domain, + unsigned int irq_base, + unsigned int nr_irqs); + +static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) +{ + return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; +} +#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ +static inline void irq_domain_activate_irq(struct irq_data *data) { } +static inline void irq_domain_deactivate_irq(struct irq_data *data) { } +static inline int irq_domain_alloc_irqs(struct irq_domain *domain, + unsigned int nr_irqs, int node, void *arg) +{ + return -1; +} + +static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) +{ + return false; +} +#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ + #else /* CONFIG_IRQ_DOMAIN */ static inline void irq_dispose_mapping(unsigned int virq) { } +static inline void irq_domain_activate_irq(struct irq_data *data) { } +static inline void irq_domain_deactivate_irq(struct irq_data *data) { } #endif /* !CONFIG_IRQ_DOMAIN */ #endif /* _LINUX_IRQDOMAIN_H */ diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h new file mode 100644 index 000000000000..62d543004197 --- /dev/null +++ b/include/linux/irqhandler.h @@ -0,0 +1,14 @@ +#ifndef _LINUX_IRQHANDLER_H +#define _LINUX_IRQHANDLER_H + +/* + * Interrupt flow handler typedefs are defined here to avoid circular + * include dependencies. + */ + +struct irq_desc; +struct irq_data; +typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); +typedef void (*irq_preflow_handler_t)(struct irq_data *data); + +#endif diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h index 866caaa9e2bb..c2ce155d83cc 100644 --- a/include/linux/kern_levels.h +++ b/include/linux/kern_levels.h @@ -22,4 +22,17 @@ */ #define KERN_CONT "" +/* integer equivalents of KERN_<LEVEL> */ +#define LOGLEVEL_SCHED -2 /* Deferred messages from sched code + * are set to this special level */ +#define LOGLEVEL_DEFAULT -1 /* default (or last) loglevel */ +#define LOGLEVEL_EMERG 0 /* system is unusable */ +#define LOGLEVEL_ALERT 1 /* action must be taken immediately */ +#define LOGLEVEL_CRIT 2 /* critical conditions */ +#define LOGLEVEL_ERR 3 /* error conditions */ +#define LOGLEVEL_WARNING 4 /* warning conditions */ +#define LOGLEVEL_NOTICE 5 /* normal but significant condition */ +#define LOGLEVEL_INFO 6 /* informational */ +#define LOGLEVEL_DEBUG 7 /* debug-level messages */ + #endif diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3d770f5564b8..233ea8107038 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -162,6 +162,7 @@ extern int _cond_resched(void); #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP + void ___might_sleep(const char *file, int line, int preempt_offset); void __might_sleep(const char *file, int line, int preempt_offset); /** * might_sleep - annotation for functions that can sleep @@ -175,10 +176,14 @@ extern int _cond_resched(void); */ # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) +# define sched_annotate_sleep() __set_current_state(TASK_RUNNING) #else + static inline void ___might_sleep(const char *file, int line, + int preempt_offset) { } static inline void __might_sleep(const char *file, int line, int preempt_offset) { } # define might_sleep() do { might_resched(); } while (0) +# define sched_annotate_sleep() do { } while (0) #endif #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) @@ -422,6 +427,7 @@ extern int panic_timeout; extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; +extern int panic_on_warn; extern int sysctl_panic_on_stackoverflow; /* * Only to be used by arch init code. If the user over-wrote the default diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 8422b4ed6882..b9376cd5a187 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -77,11 +77,6 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) return kstat_cpu(cpu).irqs_sum; } -/* - * Lock/unlock the current runqueue - to extract task statistics: - */ -extern unsigned long long task_delta_exec(struct task_struct *); - extern void account_user_time(struct task_struct *, cputime_t, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); extern void account_steal_time(cputime_t); diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 057e95971014..e705467ddb47 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h @@ -21,6 +21,8 @@ #ifndef __KMEMLEAK_H #define __KMEMLEAK_H +#include <linux/slab.h> + #ifdef CONFIG_DEBUG_KMEMLEAK extern void kmemleak_init(void) __ref; diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index f7296e57d614..5297f9fa0ef2 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -335,6 +335,7 @@ extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, extern int arch_prepare_kprobe_ftrace(struct kprobe *p); #endif +int arch_check_ftrace_location(struct kprobe *p); /* Get the kprobe at this addr (if any) - called with preemption disabled */ struct kprobe *get_kprobe(void *addr); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ea53b04993f2..a6059bdf7b03 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm); int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); -bool kvm_is_mmio_pfn(pfn_t pfn); +bool kvm_is_reserved_pfn(pfn_t pfn); struct kvm_irq_ack_notifier { struct hlist_node link; diff --git a/include/linux/leds.h b/include/linux/leds.h index a57611d0c94e..361101fef270 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -261,6 +261,7 @@ struct gpio_led { unsigned retain_state_suspended : 1; unsigned default_state : 2; /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ + struct gpio_desc *gpiod; }; #define LEDS_GPIO_DEFSTATE_OFF 0 #define LEDS_GPIO_DEFSTATE_ON 1 @@ -273,7 +274,7 @@ struct gpio_led_platform_data { #define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */ #define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */ #define GPIO_LED_BLINK 2 /* Please, blink */ - int (*gpio_blink_set)(unsigned gpio, int state, + int (*gpio_blink_set)(struct gpio_desc *desc, int state, unsigned long *delay_on, unsigned long *delay_off); }; diff --git a/include/linux/libata.h b/include/linux/libata.h index bd5fefeaf548..2d182413b1db 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -191,7 +191,8 @@ enum { ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */ ATA_DEV_SEMB = 7, /* SEMB */ ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ - ATA_DEV_NONE = 9, /* no device */ + ATA_DEV_ZAC = 9, /* ZAC device */ + ATA_DEV_NONE = 10, /* no device */ /* struct ata_link flags */ ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ @@ -1191,9 +1192,9 @@ extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev); extern int ata_scsi_slave_config(struct scsi_device *sdev); extern void ata_scsi_slave_destroy(struct scsi_device *sdev); extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, - int queue_depth, int reason); + int queue_depth); extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, - int queue_depth, int reason); + int queue_depth); extern struct ata_device *ata_dev_pair(struct ata_device *adev); extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); @@ -1491,7 +1492,8 @@ static inline unsigned int ata_tag_internal(unsigned int tag) static inline unsigned int ata_class_enabled(unsigned int class) { return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI || - class == ATA_DEV_PMP || class == ATA_DEV_SEMB; + class == ATA_DEV_PMP || class == ATA_DEV_SEMB || + class == ATA_DEV_ZAC; } static inline unsigned int ata_class_disabled(unsigned int class) diff --git a/include/linux/list.h b/include/linux/list.h index f33f831eb3c8..feb773c76ee0 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -346,7 +346,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_entry - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_entry(ptr, type, member) \ container_of(ptr, type, member) @@ -355,7 +355,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_first_entry - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Note, that list is expected to be not empty. */ @@ -366,7 +366,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_last_entry - get the last element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Note, that list is expected to be not empty. */ @@ -377,7 +377,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_first_entry_or_null - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Note that if the list is empty, it returns NULL. */ @@ -387,7 +387,7 @@ static inline void list_splice_tail_init(struct list_head *list, /** * list_next_entry - get the next element in list * @pos: the type * to cursor - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_next_entry(pos, member) \ list_entry((pos)->member.next, typeof(*(pos)), member) @@ -395,7 +395,7 @@ static inline void list_splice_tail_init(struct list_head *list, /** * list_prev_entry - get the prev element in list * @pos: the type * to cursor - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_prev_entry(pos, member) \ list_entry((pos)->member.prev, typeof(*(pos)), member) @@ -441,7 +441,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry(pos, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member); \ @@ -452,7 +452,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_reverse - iterate backwards over list of given type. * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member); \ @@ -463,7 +463,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() * @pos: the type * to use as a start point * @head: the head of the list - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). */ @@ -474,7 +474,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_continue - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after * the current position. @@ -488,7 +488,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_continue_reverse - iterate backwards from the given point * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Start to iterate over list of given type backwards, continuing after * the current position. @@ -502,7 +502,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_for_each_entry_from - iterate over list of given type from the current point * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type, continuing from current position. */ @@ -515,7 +515,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. */ #define list_for_each_entry_safe(pos, n, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member), \ @@ -528,7 +528,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type, continuing after current point, * safe against removal of list entry. @@ -544,7 +544,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate over list of given type from current point, safe against * removal of list entry. @@ -559,7 +559,7 @@ static inline void list_splice_tail_init(struct list_head *list, * @pos: the type * to use as a loop cursor. * @n: another type * to use as temporary storage * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Iterate backwards over list of given type, safe against removal * of list entry. @@ -574,7 +574,7 @@ static inline void list_splice_tail_init(struct list_head *list, * list_safe_reset_next - reset a stale list_for_each_entry_safe loop * @pos: the loop cursor used in the list_for_each_entry_safe loop * @n: temporary storage used in list_for_each_entry_safe - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * list_safe_reset_next is not safe to use in general if the list may be * modified concurrently (eg. the lock is dropped in the loop body). An diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h index 257d3779f2ab..0ca8109934e4 100644 --- a/include/linux/lockd/debug.h +++ b/include/linux/lockd/debug.h @@ -17,12 +17,8 @@ * Enable lockd debugging. * Requires RPC_DEBUG. */ -#ifdef RPC_DEBUG -# define LOCKD_DEBUG 1 -#endif - #undef ifdebug -#if defined(RPC_DEBUG) && defined(LOCKD_DEBUG) +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag)) #else # define ifdebug(flag) if (0) diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h index 307d9cab2026..1726ccbd8009 100644 --- a/include/linux/mailbox_client.h +++ b/include/linux/mailbox_client.h @@ -25,6 +25,8 @@ struct mbox_chan; * if the client receives some ACK packet for transmission. * Unused if the controller already has TX_Done/RTR IRQ. * @rx_callback: Atomic callback to provide client the data received + * @tx_prepare: Atomic callback to ask client to prepare the payload + * before initiating the transmission if required. * @tx_done: Atomic callback to tell client of data transmission */ struct mbox_client { @@ -34,6 +36,7 @@ struct mbox_client { bool knows_txdone; void (*rx_callback)(struct mbox_client *cl, void *mssg); + void (*tx_prepare)(struct mbox_client *cl, void *mssg); void (*tx_done)(struct mbox_client *cl, void *mssg, int r); }; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 8e9a029e093d..e6982ac3200d 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -16,6 +16,7 @@ #define MARVELL_PHY_ID_88E1318S 0x01410e90 #define MARVELL_PHY_ID_88E1116R 0x01410e40 #define MARVELL_PHY_ID_88E1510 0x01410dd0 +#define MARVELL_PHY_ID_88E3016 0x01410e60 /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 550c88fb0267..611b69fa8594 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h @@ -61,6 +61,7 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void) } #endif +int mvebu_mbus_save_cpu_target(u32 *store_addr); void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); void mvebu_mbus_get_pcie_io_aperture(struct resource *res); int mvebu_mbus_add_window_remap_by_id(unsigned int target, diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6b75640ef5ab..7c95af8d552c 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -25,7 +25,6 @@ #include <linux/jump_label.h> struct mem_cgroup; -struct page_cgroup; struct page; struct mm_struct; struct kmem_cache; @@ -68,10 +67,9 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); -bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, - struct mem_cgroup *memcg); -bool task_in_mem_cgroup(struct task_struct *task, - const struct mem_cgroup *memcg); +bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, + struct mem_cgroup *root); +bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); @@ -79,15 +77,16 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); -static inline -bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) +static inline bool mm_match_cgroup(struct mm_struct *mm, + struct mem_cgroup *memcg) { struct mem_cgroup *task_memcg; - bool match; + bool match = false; rcu_read_lock(); task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); - match = __mem_cgroup_same_or_subtree(memcg, task_memcg); + if (task_memcg) + match = mem_cgroup_is_descendant(task_memcg, memcg); rcu_read_unlock(); return match; } @@ -141,8 +140,8 @@ static inline bool mem_cgroup_disabled(void) struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, unsigned long *flags); -void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, - unsigned long flags); +void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked, + unsigned long *flags); void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx, int val); @@ -174,10 +173,6 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, void mem_cgroup_split_huge_fixup(struct page *head); #endif -#ifdef CONFIG_DEBUG_VM -bool mem_cgroup_bad_page_check(struct page *page); -void mem_cgroup_print_bad_page(struct page *page); -#endif #else /* CONFIG_MEMCG */ struct mem_cgroup; @@ -297,7 +292,7 @@ static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, } static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, - bool locked, unsigned long flags) + bool *locked, unsigned long *flags) { } @@ -347,19 +342,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) } #endif /* CONFIG_MEMCG */ -#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) -static inline bool -mem_cgroup_bad_page_check(struct page *page) -{ - return false; -} - -static inline void -mem_cgroup_print_bad_page(struct page *page) -{ -} -#endif - enum { UNDER_LIMIT, SOFT_LIMIT, @@ -418,8 +400,8 @@ int memcg_cache_id(struct mem_cgroup *memcg); void memcg_update_array_size(int num_groups); -struct kmem_cache * -__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); +struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); +void __memcg_kmem_put_cache(struct kmem_cache *cachep); int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); void __memcg_uncharge_slab(struct kmem_cache *cachep, int order); @@ -447,9 +429,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) /* * __GFP_NOFAIL allocations will move on even if charging is not * possible. Therefore we don't even try, and have this allocation - * unaccounted. We could in theory charge it with - * res_counter_charge_nofail, but we hope those allocations are rare, - * and won't be worth the trouble. + * unaccounted. We could in theory charge it forcibly, but we hope + * those allocations are rare, and won't be worth the trouble. */ if (gfp & __GFP_NOFAIL) return true; @@ -467,8 +448,6 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) * memcg_kmem_uncharge_pages: uncharge pages from memcg * @page: pointer to struct page being freed * @order: allocation order. - * - * there is no need to specify memcg here, since it is embedded in page_cgroup */ static inline void memcg_kmem_uncharge_pages(struct page *page, int order) @@ -485,8 +464,7 @@ memcg_kmem_uncharge_pages(struct page *page, int order) * * Needs to be called after memcg_kmem_newpage_charge, regardless of success or * failure of the allocation. if @page is NULL, this function will revert the - * charges. Otherwise, it will commit the memcg given by @memcg to the - * corresponding page_cgroup. + * charges. Otherwise, it will commit @page to @memcg. */ static inline void memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) @@ -514,7 +492,13 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) if (unlikely(fatal_signal_pending(current))) return cachep; - return __memcg_kmem_get_cache(cachep, gfp); + return __memcg_kmem_get_cache(cachep); +} + +static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) +{ + if (memcg_kmem_enabled()) + __memcg_kmem_put_cache(cachep); } #else #define for_each_memcg_cache_index(_idx) \ @@ -550,6 +534,10 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { return cachep; } + +static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) +{ +} #endif /* CONFIG_MEMCG_KMEM */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h index adba89d9c660..689312745b2f 100644 --- a/include/linux/mfd/abx500/ab8500-sysctrl.h +++ b/include/linux/mfd/abx500/ab8500-sysctrl.h @@ -12,7 +12,6 @@ int ab8500_sysctrl_read(u16 reg, u8 *value); int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value); -void ab8500_restart(char mode, const char *cmd); #else diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h index f34723f7663c..910e3aa1e965 100644 --- a/include/linux/mfd/arizona/core.h +++ b/include/linux/mfd/arizona/core.h @@ -141,6 +141,7 @@ struct arizona { uint16_t dac_comp_coeff; uint8_t dac_comp_enabled; + struct mutex dac_comp_lock; }; int arizona_clk32k_enable(struct arizona *arizona); diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h index c0b075f6bc35..aacc10d7789c 100644 --- a/include/linux/mfd/arizona/registers.h +++ b/include/linux/mfd/arizona/registers.h @@ -125,6 +125,8 @@ #define ARIZONA_MIC_BIAS_CTRL_1 0x218 #define ARIZONA_MIC_BIAS_CTRL_2 0x219 #define ARIZONA_MIC_BIAS_CTRL_3 0x21A +#define ARIZONA_HP_CTRL_1L 0x225 +#define ARIZONA_HP_CTRL_1R 0x226 #define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293 #define ARIZONA_HEADPHONE_DETECT_1 0x29B #define ARIZONA_HEADPHONE_DETECT_2 0x29C @@ -279,8 +281,16 @@ #define ARIZONA_AIF2_FRAME_CTRL_2 0x548 #define ARIZONA_AIF2_FRAME_CTRL_3 0x549 #define ARIZONA_AIF2_FRAME_CTRL_4 0x54A +#define ARIZONA_AIF2_FRAME_CTRL_5 0x54B +#define ARIZONA_AIF2_FRAME_CTRL_6 0x54C +#define ARIZONA_AIF2_FRAME_CTRL_7 0x54D +#define ARIZONA_AIF2_FRAME_CTRL_8 0x54E #define ARIZONA_AIF2_FRAME_CTRL_11 0x551 #define ARIZONA_AIF2_FRAME_CTRL_12 0x552 +#define ARIZONA_AIF2_FRAME_CTRL_13 0x553 +#define ARIZONA_AIF2_FRAME_CTRL_14 0x554 +#define ARIZONA_AIF2_FRAME_CTRL_15 0x555 +#define ARIZONA_AIF2_FRAME_CTRL_16 0x556 #define ARIZONA_AIF2_TX_ENABLES 0x559 #define ARIZONA_AIF2_RX_ENABLES 0x55A #define ARIZONA_AIF2_FORCE_WRITE 0x55B @@ -2245,6 +2255,46 @@ #define ARIZONA_MICB3_ENA_WIDTH 1 /* MICB3_ENA */ /* + * R549 (0x225) - HP Ctrl 1L + */ +#define ARIZONA_RMV_SHRT_HP1L 0x4000 /* RMV_SHRT_HP1L */ +#define ARIZONA_RMV_SHRT_HP1L_MASK 0x4000 /* RMV_SHRT_HP1L */ +#define ARIZONA_RMV_SHRT_HP1L_SHIFT 14 /* RMV_SHRT_HP1L */ +#define ARIZONA_RMV_SHRT_HP1L_WIDTH 1 /* RMV_SHRT_HP1L */ +#define ARIZONA_HP1L_FLWR 0x0004 /* HP1L_FLWR */ +#define ARIZONA_HP1L_FLWR_MASK 0x0004 /* HP1L_FLWR */ +#define ARIZONA_HP1L_FLWR_SHIFT 2 /* HP1L_FLWR */ +#define ARIZONA_HP1L_FLWR_WIDTH 1 /* HP1L_FLWR */ +#define ARIZONA_HP1L_SHRTI 0x0002 /* HP1L_SHRTI */ +#define ARIZONA_HP1L_SHRTI_MASK 0x0002 /* HP1L_SHRTI */ +#define ARIZONA_HP1L_SHRTI_SHIFT 1 /* HP1L_SHRTI */ +#define ARIZONA_HP1L_SHRTI_WIDTH 1 /* HP1L_SHRTI */ +#define ARIZONA_HP1L_SHRTO 0x0001 /* HP1L_SHRTO */ +#define ARIZONA_HP1L_SHRTO_MASK 0x0001 /* HP1L_SHRTO */ +#define ARIZONA_HP1L_SHRTO_SHIFT 0 /* HP1L_SHRTO */ +#define ARIZONA_HP1L_SHRTO_WIDTH 1 /* HP1L_SHRTO */ + +/* + * R550 (0x226) - HP Ctrl 1R + */ +#define ARIZONA_RMV_SHRT_HP1R 0x4000 /* RMV_SHRT_HP1R */ +#define ARIZONA_RMV_SHRT_HP1R_MASK 0x4000 /* RMV_SHRT_HP1R */ +#define ARIZONA_RMV_SHRT_HP1R_SHIFT 14 /* RMV_SHRT_HP1R */ +#define ARIZONA_RMV_SHRT_HP1R_WIDTH 1 /* RMV_SHRT_HP1R */ +#define ARIZONA_HP1R_FLWR 0x0004 /* HP1R_FLWR */ +#define ARIZONA_HP1R_FLWR_MASK 0x0004 /* HP1R_FLWR */ +#define ARIZONA_HP1R_FLWR_SHIFT 2 /* HP1R_FLWR */ +#define ARIZONA_HP1R_FLWR_WIDTH 1 /* HP1R_FLWR */ +#define ARIZONA_HP1R_SHRTI 0x0002 /* HP1R_SHRTI */ +#define ARIZONA_HP1R_SHRTI_MASK 0x0002 /* HP1R_SHRTI */ +#define ARIZONA_HP1R_SHRTI_SHIFT 1 /* HP1R_SHRTI */ +#define ARIZONA_HP1R_SHRTI_WIDTH 1 /* HP1R_SHRTI */ +#define ARIZONA_HP1R_SHRTO 0x0001 /* HP1R_SHRTO */ +#define ARIZONA_HP1R_SHRTO_MASK 0x0001 /* HP1R_SHRTO */ +#define ARIZONA_HP1R_SHRTO_SHIFT 0 /* HP1R_SHRTO */ +#define ARIZONA_HP1R_SHRTO_WIDTH 1 /* HP1R_SHRTO */ + +/* * R659 (0x293) - Accessory Detect Mode 1 */ #define ARIZONA_ACCDET_SRC 0x2000 /* ACCDET_SRC */ diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h new file mode 100644 index 000000000000..1279ab1644b5 --- /dev/null +++ b/include/linux/mfd/atmel-hlcdc.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2014 Free Electrons + * Copyright (C) 2014 Atmel + * + * Author: Boris BREZILLON <boris.brezillon@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __LINUX_MFD_HLCDC_H +#define __LINUX_MFD_HLCDC_H + +#include <linux/clk.h> +#include <linux/regmap.h> + +#define ATMEL_HLCDC_CFG(i) ((i) * 0x4) +#define ATMEL_HLCDC_SIG_CFG LCDCFG(5) +#define ATMEL_HLCDC_HSPOL BIT(0) +#define ATMEL_HLCDC_VSPOL BIT(1) +#define ATMEL_HLCDC_VSPDLYS BIT(2) +#define ATMEL_HLCDC_VSPDLYE BIT(3) +#define ATMEL_HLCDC_DISPPOL BIT(4) +#define ATMEL_HLCDC_DITHER BIT(6) +#define ATMEL_HLCDC_DISPDLY BIT(7) +#define ATMEL_HLCDC_MODE_MASK GENMASK(9, 8) +#define ATMEL_HLCDC_PP BIT(10) +#define ATMEL_HLCDC_VSPSU BIT(12) +#define ATMEL_HLCDC_VSPHO BIT(13) +#define ATMEL_HLCDC_GUARDTIME_MASK GENMASK(20, 16) + +#define ATMEL_HLCDC_EN 0x20 +#define ATMEL_HLCDC_DIS 0x24 +#define ATMEL_HLCDC_SR 0x28 +#define ATMEL_HLCDC_IER 0x2c +#define ATMEL_HLCDC_IDR 0x30 +#define ATMEL_HLCDC_IMR 0x34 +#define ATMEL_HLCDC_ISR 0x38 + +#define ATMEL_HLCDC_CLKPOL BIT(0) +#define ATMEL_HLCDC_CLKSEL BIT(2) +#define ATMEL_HLCDC_CLKPWMSEL BIT(3) +#define ATMEL_HLCDC_CGDIS(i) BIT(8 + (i)) +#define ATMEL_HLCDC_CLKDIV_SHFT 16 +#define ATMEL_HLCDC_CLKDIV_MASK GENMASK(23, 16) +#define ATMEL_HLCDC_CLKDIV(div) ((div - 2) << ATMEL_HLCDC_CLKDIV_SHFT) + +#define ATMEL_HLCDC_PIXEL_CLK BIT(0) +#define ATMEL_HLCDC_SYNC BIT(1) +#define ATMEL_HLCDC_DISP BIT(2) +#define ATMEL_HLCDC_PWM BIT(3) +#define ATMEL_HLCDC_SIP BIT(4) + +#define ATMEL_HLCDC_SOF BIT(0) +#define ATMEL_HLCDC_SYNCDIS BIT(1) +#define ATMEL_HLCDC_FIFOERR BIT(4) +#define ATMEL_HLCDC_LAYER_STATUS(x) BIT((x) + 8) + +/** + * Structure shared by the MFD device and its subdevices. + * + * @regmap: register map used to access HLCDC IP registers + * @periph_clk: the hlcdc peripheral clock + * @sys_clk: the hlcdc system clock + * @slow_clk: the system slow clk + * @irq: the hlcdc irq + */ +struct atmel_hlcdc { + struct regmap *regmap; + struct clk *periph_clk; + struct clk *sys_clk; + struct clk *slow_clk; + int irq; +}; + +#endif /* __LINUX_MFD_HLCDC_H */ diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index d0e31a2287ac..81589d176ae8 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -14,6 +14,8 @@ enum { AXP202_ID = 0, AXP209_ID, + AXP288_ID, + NR_AXP20X_VARIANTS, }; #define AXP20X_DATACACHE(m) (0x04 + (m)) @@ -49,11 +51,13 @@ enum { #define AXP20X_IRQ3_EN 0x42 #define AXP20X_IRQ4_EN 0x43 #define AXP20X_IRQ5_EN 0x44 +#define AXP20X_IRQ6_EN 0x45 #define AXP20X_IRQ1_STATE 0x48 #define AXP20X_IRQ2_STATE 0x49 #define AXP20X_IRQ3_STATE 0x4a #define AXP20X_IRQ4_STATE 0x4b #define AXP20X_IRQ5_STATE 0x4c +#define AXP20X_IRQ6_STATE 0x4d /* ADC */ #define AXP20X_ACIN_V_ADC_H 0x56 @@ -116,6 +120,15 @@ enum { #define AXP20X_CC_CTRL 0xb8 #define AXP20X_FG_RES 0xb9 +/* AXP288 specific registers */ +#define AXP288_PMIC_ADC_H 0x56 +#define AXP288_PMIC_ADC_L 0x57 +#define AXP288_ADC_TS_PIN_CTRL 0x84 + +#define AXP288_PMIC_ADC_EN 0x84 +#define AXP288_FG_TUNE5 0xed + + /* Regulators IDs */ enum { AXP20X_LDO1 = 0, @@ -169,12 +182,58 @@ enum { AXP20X_IRQ_GPIO0_INPUT, }; +enum axp288_irqs { + AXP288_IRQ_VBUS_FALL = 2, + AXP288_IRQ_VBUS_RISE, + AXP288_IRQ_OV, + AXP288_IRQ_FALLING_ALT, + AXP288_IRQ_RISING_ALT, + AXP288_IRQ_OV_ALT, + AXP288_IRQ_DONE = 10, + AXP288_IRQ_CHARGING, + AXP288_IRQ_SAFE_QUIT, + AXP288_IRQ_SAFE_ENTER, + AXP288_IRQ_ABSENT, + AXP288_IRQ_APPEND, + AXP288_IRQ_QWBTU, + AXP288_IRQ_WBTU, + AXP288_IRQ_QWBTO, + AXP288_IRQ_WBTO, + AXP288_IRQ_QCBTU, + AXP288_IRQ_CBTU, + AXP288_IRQ_QCBTO, + AXP288_IRQ_CBTO, + AXP288_IRQ_WL2, + AXP288_IRQ_WL1, + AXP288_IRQ_GPADC, + AXP288_IRQ_OT = 31, + AXP288_IRQ_GPIO0, + AXP288_IRQ_GPIO1, + AXP288_IRQ_POKO, + AXP288_IRQ_POKL, + AXP288_IRQ_POKS, + AXP288_IRQ_POKN, + AXP288_IRQ_POKP, + AXP288_IRQ_TIMER, + AXP288_IRQ_MV_CHNG, + AXP288_IRQ_BC_USB_CHNG, +}; + +#define AXP288_TS_ADC_H 0x58 +#define AXP288_TS_ADC_L 0x59 +#define AXP288_GP_ADC_H 0x5a +#define AXP288_GP_ADC_L 0x5b + struct axp20x_dev { struct device *dev; struct i2c_client *i2c_client; struct regmap *regmap; struct regmap_irq_chip_data *regmap_irqc; long variant; + int nr_cells; + struct mfd_cell *cells; + const struct regmap_config *regmap_cfg; + const struct regmap_irq_chip *regmap_irq_chip; }; #endif /* __LINUX_MFD_AXP20X_H */ diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index 73e1709d4c09..a76bc100bf97 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -111,6 +111,13 @@ extern int mfd_add_devices(struct device *parent, int id, struct resource *mem_base, int irq_base, struct irq_domain *irq_domain); +static inline int mfd_add_hotplug_devices(struct device *parent, + const struct mfd_cell *cells, int n_devs) +{ + return mfd_add_devices(parent, PLATFORM_DEVID_AUTO, cells, n_devs, + NULL, 0, NULL); +} + extern void mfd_remove_devices(struct device *parent); #endif diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h index cb01496bfa49..8e1cdbef3dad 100644 --- a/include/linux/mfd/davinci_voicecodec.h +++ b/include/linux/mfd/davinci_voicecodec.h @@ -99,12 +99,6 @@ struct davinci_vcif { dma_addr_t dma_rx_addr; }; -struct cq93vc { - struct platform_device *pdev; - struct snd_soc_codec *codec; - u32 sysclk; -}; - struct davinci_vc; struct davinci_vc { @@ -122,7 +116,6 @@ struct davinci_vc { /* Client devices */ struct davinci_vcif davinci_vcif; - struct cq93vc cq93vc; }; #endif diff --git a/include/linux/mfd/dln2.h b/include/linux/mfd/dln2.h new file mode 100644 index 000000000000..004b24576da8 --- /dev/null +++ b/include/linux/mfd/dln2.h @@ -0,0 +1,103 @@ +#ifndef __LINUX_USB_DLN2_H +#define __LINUX_USB_DLN2_H + +#define DLN2_CMD(cmd, id) ((cmd) | ((id) << 8)) + +struct dln2_platform_data { + u16 handle; /* sub-driver handle (internally used only) */ + u8 port; /* I2C/SPI port */ +}; + +/** + * dln2_event_cb_t - event callback function signature + * + * @pdev - the sub-device that registered this callback + * @echo - the echo header field received in the message + * @data - the data payload + * @len - the data payload length + * + * The callback function is called in interrupt context and the data payload is + * only valid during the call. If the user needs later access of the data, it + * must copy it. + */ + +typedef void (*dln2_event_cb_t)(struct platform_device *pdev, u16 echo, + const void *data, int len); + +/** + * dl2n_register_event_cb - register a callback function for an event + * + * @pdev - the sub-device that registers the callback + * @event - the event for which to register a callback + * @event_cb - the callback function + * + * @return 0 in case of success, negative value in case of error + */ +int dln2_register_event_cb(struct platform_device *pdev, u16 event, + dln2_event_cb_t event_cb); + +/** + * dln2_unregister_event_cb - unregister the callback function for an event + * + * @pdev - the sub-device that registered the callback + * @event - the event for which to register a callback + */ +void dln2_unregister_event_cb(struct platform_device *pdev, u16 event); + +/** + * dln2_transfer - issue a DLN2 command and wait for a response and the + * associated data + * + * @pdev - the sub-device which is issuing this transfer + * @cmd - the command to be sent to the device + * @obuf - the buffer to be sent to the device; it can be NULL if the user + * doesn't need to transmit data with this command + * @obuf_len - the size of the buffer to be sent to the device + * @ibuf - any data associated with the response will be copied here; it can be + * NULL if the user doesn't need the response data + * @ibuf_len - must be initialized to the input buffer size; it will be modified + * to indicate the actual data transferred; + * + * @return 0 for success, negative value for errors + */ +int dln2_transfer(struct platform_device *pdev, u16 cmd, + const void *obuf, unsigned obuf_len, + void *ibuf, unsigned *ibuf_len); + +/** + * dln2_transfer_rx - variant of @dln2_transfer() where TX buffer is not needed + * + * @pdev - the sub-device which is issuing this transfer + * @cmd - the command to be sent to the device + * @ibuf - any data associated with the response will be copied here; it can be + * NULL if the user doesn't need the response data + * @ibuf_len - must be initialized to the input buffer size; it will be modified + * to indicate the actual data transferred; + * + * @return 0 for success, negative value for errors + */ + +static inline int dln2_transfer_rx(struct platform_device *pdev, u16 cmd, + void *ibuf, unsigned *ibuf_len) +{ + return dln2_transfer(pdev, cmd, NULL, 0, ibuf, ibuf_len); +} + +/** + * dln2_transfer_tx - variant of @dln2_transfer() where RX buffer is not needed + * + * @pdev - the sub-device which is issuing this transfer + * @cmd - the command to be sent to the device + * @obuf - the buffer to be sent to the device; it can be NULL if the + * user doesn't need to transmit data with this command + * @obuf_len - the size of the buffer to be sent to the device + * + * @return 0 for success, negative value for errors + */ +static inline int dln2_transfer_tx(struct platform_device *pdev, u16 cmd, + const void *obuf, unsigned obuf_len) +{ + return dln2_transfer(pdev, cmd, obuf, obuf_len, NULL, NULL); +} + +#endif diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h index 7e6dc4b2b795..553f7d09258a 100644 --- a/include/linux/mfd/max77686.h +++ b/include/linux/mfd/max77686.h @@ -131,13 +131,6 @@ enum max77686_opmode { MAX77686_OPMODE_STANDBY, }; -enum max77802_opmode { - MAX77802_OPMODE_OFF, - MAX77802_OPMODE_STANDBY, - MAX77802_OPMODE_LP, - MAX77802_OPMODE_NORMAL, -}; - struct max77686_opmode_data { int id; int mode; diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index fc17d56581b2..08dae01258b9 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -26,7 +26,6 @@ #include <linux/i2c.h> -#define MAX77693_NUM_IRQ_MUIC_REGS 3 #define MAX77693_REG_INVALID (0xff) /* Slave addr = 0xCC: PMIC, Charger, Flash LED */ @@ -330,6 +329,13 @@ enum max77693_irq_source { MAX77693_IRQ_GROUP_NR, }; +#define SRC_IRQ_CHARGER BIT(0) +#define SRC_IRQ_TOP BIT(1) +#define SRC_IRQ_FLASH BIT(2) +#define SRC_IRQ_MUIC BIT(3) +#define SRC_IRQ_ALL (SRC_IRQ_CHARGER | SRC_IRQ_TOP \ + | SRC_IRQ_FLASH | SRC_IRQ_MUIC) + #define LED_IRQ_FLED2_OPEN BIT(0) #define LED_IRQ_FLED2_SHORT BIT(1) #define LED_IRQ_FLED1_OPEN BIT(2) diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index 74346d5e7899..0c12628e91c6 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h @@ -558,6 +558,7 @@ #define SD_SAMPLE_POINT_CTL 0xFDA7 #define SD_PUSH_POINT_CTL 0xFDA8 #define SD_CMD0 0xFDA9 +#define SD_CMD_START 0x40 #define SD_CMD1 0xFDAA #define SD_CMD2 0xFDAB #define SD_CMD3 0xFDAC @@ -707,6 +708,14 @@ #define PM_CTRL1 0xFF44 #define PM_CTRL2 0xFF45 #define PM_CTRL3 0xFF46 +#define SDIO_SEND_PME_EN 0x80 +#define FORCE_RC_MODE_ON 0x40 +#define FORCE_RX50_LINK_ON 0x20 +#define D3_DELINK_MODE_EN 0x10 +#define USE_PESRTB_CTL_DELINK 0x08 +#define DELAY_PIN_WAKE 0x04 +#define RESET_PIN_WAKE 0x02 +#define PM_WAKE_EN 0x01 #define PM_CTRL4 0xFF47 /* Memory mapping */ @@ -752,6 +761,14 @@ #define PHY_DUM_REG 0x1F #define LCTLR 0x80 +#define LCTLR_EXT_SYNC 0x80 +#define LCTLR_COMMON_CLOCK_CFG 0x40 +#define LCTLR_RETRAIN_LINK 0x20 +#define LCTLR_LINK_DISABLE 0x10 +#define LCTLR_RCB 0x08 +#define LCTLR_RESERVED 0x04 +#define LCTLR_ASPM_CTL_MASK 0x03 + #define PCR_SETTING_REG1 0x724 #define PCR_SETTING_REG2 0x814 #define PCR_SETTING_REG3 0x747 @@ -967,4 +984,24 @@ static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr) return (u8 *)(pcr->host_cmds_ptr); } +static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr, + u8 mask, u8 append) +{ + int err; + u8 val; + + err = pci_read_config_byte(pcr->pci, addr, &val); + if (err < 0) + return err; + return pci_write_config_byte(pcr->pci, addr, (val & mask) | append); +} + +static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val) +{ + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 1, 0xFF, val >> 16); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 2, 0xFF, val >> 8); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 3, 0xFF, val); +} + #endif diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index 1825edacbda7..3fdb7cfbffb3 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -28,6 +28,7 @@ #define MIN_800_MV 800000 #define MIN_750_MV 750000 #define MIN_600_MV 600000 +#define MIN_500_MV 500000 /* Macros to represent steps for LDO/BUCK */ #define STEP_50_MV 50000 @@ -41,6 +42,7 @@ enum sec_device_type { S5M8767X, S2MPA01, S2MPS11X, + S2MPS13X, S2MPS14X, S2MPU02, }; diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h new file mode 100644 index 000000000000..ce5dda8958fe --- /dev/null +++ b/include/linux/mfd/samsung/s2mps13.h @@ -0,0 +1,186 @@ +/* + * s2mps13.h + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_MFD_S2MPS13_H +#define __LINUX_MFD_S2MPS13_H + +/* S2MPS13 registers */ +enum s2mps13_reg { + S2MPS13_REG_ID, + S2MPS13_REG_INT1, + S2MPS13_REG_INT2, + S2MPS13_REG_INT3, + S2MPS13_REG_INT1M, + S2MPS13_REG_INT2M, + S2MPS13_REG_INT3M, + S2MPS13_REG_ST1, + S2MPS13_REG_ST2, + S2MPS13_REG_PWRONSRC, + S2MPS13_REG_OFFSRC, + S2MPS13_REG_BU_CHG, + S2MPS13_REG_RTCCTRL, + S2MPS13_REG_CTRL1, + S2MPS13_REG_CTRL2, + S2MPS13_REG_RSVD1, + S2MPS13_REG_RSVD2, + S2MPS13_REG_RSVD3, + S2MPS13_REG_RSVD4, + S2MPS13_REG_RSVD5, + S2MPS13_REG_RSVD6, + S2MPS13_REG_CTRL3, + S2MPS13_REG_RSVD7, + S2MPS13_REG_RSVD8, + S2MPS13_REG_WRSTBI, + S2MPS13_REG_B1CTRL, + S2MPS13_REG_B1OUT, + S2MPS13_REG_B2CTRL, + S2MPS13_REG_B2OUT, + S2MPS13_REG_B3CTRL, + S2MPS13_REG_B3OUT, + S2MPS13_REG_B4CTRL, + S2MPS13_REG_B4OUT, + S2MPS13_REG_B5CTRL, + S2MPS13_REG_B5OUT, + S2MPS13_REG_B6CTRL, + S2MPS13_REG_B6OUT, + S2MPS13_REG_B7CTRL, + S2MPS13_REG_B7OUT, + S2MPS13_REG_B8CTRL, + S2MPS13_REG_B8OUT, + S2MPS13_REG_B9CTRL, + S2MPS13_REG_B9OUT, + S2MPS13_REG_B10CTRL, + S2MPS13_REG_B10OUT, + S2MPS13_REG_BB1CTRL, + S2MPS13_REG_BB1OUT, + S2MPS13_REG_BUCK_RAMP1, + S2MPS13_REG_BUCK_RAMP2, + S2MPS13_REG_LDO_DVS1, + S2MPS13_REG_LDO_DVS2, + S2MPS13_REG_LDO_DVS3, + S2MPS13_REG_B6OUT2, + S2MPS13_REG_L1CTRL, + S2MPS13_REG_L2CTRL, + S2MPS13_REG_L3CTRL, + S2MPS13_REG_L4CTRL, + S2MPS13_REG_L5CTRL, + S2MPS13_REG_L6CTRL, + S2MPS13_REG_L7CTRL, + S2MPS13_REG_L8CTRL, + S2MPS13_REG_L9CTRL, + S2MPS13_REG_L10CTRL, + S2MPS13_REG_L11CTRL, + S2MPS13_REG_L12CTRL, + S2MPS13_REG_L13CTRL, + S2MPS13_REG_L14CTRL, + S2MPS13_REG_L15CTRL, + S2MPS13_REG_L16CTRL, + S2MPS13_REG_L17CTRL, + S2MPS13_REG_L18CTRL, + S2MPS13_REG_L19CTRL, + S2MPS13_REG_L20CTRL, + S2MPS13_REG_L21CTRL, + S2MPS13_REG_L22CTRL, + S2MPS13_REG_L23CTRL, + S2MPS13_REG_L24CTRL, + S2MPS13_REG_L25CTRL, + S2MPS13_REG_L26CTRL, + S2MPS13_REG_L27CTRL, + S2MPS13_REG_L28CTRL, + S2MPS13_REG_L30CTRL, + S2MPS13_REG_L31CTRL, + S2MPS13_REG_L32CTRL, + S2MPS13_REG_L33CTRL, + S2MPS13_REG_L34CTRL, + S2MPS13_REG_L35CTRL, + S2MPS13_REG_L36CTRL, + S2MPS13_REG_L37CTRL, + S2MPS13_REG_L38CTRL, + S2MPS13_REG_L39CTRL, + S2MPS13_REG_L40CTRL, + S2MPS13_REG_LDODSCH1, + S2MPS13_REG_LDODSCH2, + S2MPS13_REG_LDODSCH3, + S2MPS13_REG_LDODSCH4, + S2MPS13_REG_LDODSCH5, +}; + +/* regulator ids */ +enum s2mps13_regulators { + S2MPS13_LDO1, + S2MPS13_LDO2, + S2MPS13_LDO3, + S2MPS13_LDO4, + S2MPS13_LDO5, + S2MPS13_LDO6, + S2MPS13_LDO7, + S2MPS13_LDO8, + S2MPS13_LDO9, + S2MPS13_LDO10, + S2MPS13_LDO11, + S2MPS13_LDO12, + S2MPS13_LDO13, + S2MPS13_LDO14, + S2MPS13_LDO15, + S2MPS13_LDO16, + S2MPS13_LDO17, + S2MPS13_LDO18, + S2MPS13_LDO19, + S2MPS13_LDO20, + S2MPS13_LDO21, + S2MPS13_LDO22, + S2MPS13_LDO23, + S2MPS13_LDO24, + S2MPS13_LDO25, + S2MPS13_LDO26, + S2MPS13_LDO27, + S2MPS13_LDO28, + S2MPS13_LDO29, + S2MPS13_LDO30, + S2MPS13_LDO31, + S2MPS13_LDO32, + S2MPS13_LDO33, + S2MPS13_LDO34, + S2MPS13_LDO35, + S2MPS13_LDO36, + S2MPS13_LDO37, + S2MPS13_LDO38, + S2MPS13_LDO39, + S2MPS13_LDO40, + S2MPS13_BUCK1, + S2MPS13_BUCK2, + S2MPS13_BUCK3, + S2MPS13_BUCK4, + S2MPS13_BUCK5, + S2MPS13_BUCK6, + S2MPS13_BUCK7, + S2MPS13_BUCK8, + S2MPS13_BUCK9, + S2MPS13_BUCK10, + + S2MPS13_REGULATOR_MAX, +}; + +/* + * Default ramp delay in uv/us. Datasheet says that ramp delay can be + * controlled however it does not specify which register is used for that. + * Let's assume that default value will be set. + */ +#define S2MPS13_BUCK_RAMP_DELAY 12500 + +#endif /* __LINUX_MFD_S2MPS13_H */ diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h index ff44374a1a4e..c877cad61a13 100644 --- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h @@ -395,4 +395,43 @@ #define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17) #define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14) +/* For imx6sx iomux gpr register field define */ +#define IMX6SX_GPR1_VDEC_SW_RST_MASK (0x1 << 20) +#define IMX6SX_GPR1_VDEC_SW_RST_RESET (0x1 << 20) +#define IMX6SX_GPR1_VDEC_SW_RST_RELEASE (0x0 << 20) +#define IMX6SX_GPR1_VADC_SW_RST_MASK (0x1 << 19) +#define IMX6SX_GPR1_VADC_SW_RST_RESET (0x1 << 19) +#define IMX6SX_GPR1_VADC_SW_RST_RELEASE (0x0 << 19) +#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_MASK (0x3 << 13) +#define IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK (0x3 << 17) +#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_EXT (0x3 << 13) + +#define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3) +#define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4) + +#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_MASK (0x1 << 3) +#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF1 (0x0 << 3) +#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF2 (0x1 << 3) + +#define IMX6SX_GPR5_CSI2_MUX_CTRL_MASK (0x3 << 27) +#define IMX6SX_GPR5_CSI2_MUX_CTRL_EXT_PIN (0x0 << 27) +#define IMX6SX_GPR5_CSI2_MUX_CTRL_CVD (0x1 << 27) +#define IMX6SX_GPR5_CSI2_MUX_CTRL_VDAC_TO_CSI (0x2 << 27) +#define IMX6SX_GPR5_CSI2_MUX_CTRL_GND (0x3 << 27) +#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_MASK (0x1 << 26) +#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_ENABLE (0x1 << 26) +#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_DISABLE (0x0 << 26) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_MASK (0x3 << 4) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_EXT_PIN (0x0 << 4) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_CVD (0x1 << 4) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_VDAC_TO_CSI (0x2 << 4) +#define IMX6SX_GPR5_CSI1_MUX_CTRL_GND (0x3 << 4) + +#define IMX6SX_GPR5_DISP_MUX_DCIC2_LCDIF2 (0x0 << 2) +#define IMX6SX_GPR5_DISP_MUX_DCIC2_LVDS (0x1 << 2) +#define IMX6SX_GPR5_DISP_MUX_DCIC2_MASK (0x1 << 2) +#define IMX6SX_GPR5_DISP_MUX_DCIC1_LCDIF1 (0x0 << 1) +#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1) +#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1) + #endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h index e6088c2e2092..e1c12d84c26a 100644 --- a/include/linux/mfd/tc3589x.h +++ b/include/linux/mfd/tc3589x.h @@ -164,13 +164,10 @@ struct tc3589x_keypad_platform_data { /** * struct tc3589x_gpio_platform_data - TC3589x GPIO platform data - * @gpio_base: first gpio number assigned to TC3589x. A maximum of - * %TC3589x_NR_GPIOS GPIOs will be allocated. * @setup: callback for board-specific initialization * @remove: callback for board-specific teardown */ struct tc3589x_gpio_platform_data { - int gpio_base; void (*setup)(struct tc3589x *tc3589x, unsigned gpio_base); void (*remove)(struct tc3589x *tc3589x, unsigned gpio_base); }; @@ -178,18 +175,13 @@ struct tc3589x_gpio_platform_data { /** * struct tc3589x_platform_data - TC3589x platform data * @block: bitmask of blocks to enable (use TC3589x_BLOCK_*) - * @irq_base: base IRQ number. %TC3589x_NR_IRQS irqs will be used. * @gpio: GPIO-specific platform data * @keypad: keypad-specific platform data */ struct tc3589x_platform_data { unsigned int block; - int irq_base; struct tc3589x_gpio_platform_data *gpio; const struct tc3589x_keypad_platform_data *keypad; }; -#define TC3589x_NR_GPIOS 24 -#define TC3589x_NR_IRQS TC3589x_INT_GPIO(TC3589x_NR_GPIOS) - #endif diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 53d33dee70e1..2e5b194b9b19 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -37,7 +37,6 @@ /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 -#define MICREL_PHY_25MHZ_CLK 0x00000002 #define MICREL_KSZ9021_EXTREG_CTRL 0xB #define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 379c02648ab3..64d25941b329 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -67,6 +67,8 @@ enum { MLX4_CMD_MAP_ICM_AUX = 0xffc, MLX4_CMD_UNMAP_ICM_AUX = 0xffb, MLX4_CMD_SET_ICM_SIZE = 0xffd, + MLX4_CMD_ACCESS_REG = 0x3b, + /*master notify fw on finish for slave's flr*/ MLX4_CMD_INFORM_FLR_DONE = 0x5b, MLX4_CMD_GET_OP_REQ = 0x59, @@ -197,6 +199,33 @@ enum { MLX4_CMD_NATIVE }; +/* + * MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP - + * Receive checksum value is reported in CQE also for non TCP/UDP packets. + * + * MLX4_RX_CSUM_MODE_L4 - + * L4_CSUM bit in CQE, which indicates whether or not L4 checksum + * was validated correctly, is supported. + * + * MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP - + * IP_OK CQE's field is supported also for non TCP/UDP IP packets. + * + * MLX4_RX_CSUM_MODE_MULTI_VLAN - + * Receive Checksum offload is supported for packets with more than 2 vlan headers. + */ +enum mlx4_rx_csum_mode { + MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP = 1UL << 0, + MLX4_RX_CSUM_MODE_L4 = 1UL << 1, + MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP = 1UL << 2, + MLX4_RX_CSUM_MODE_MULTI_VLAN = 1UL << 3 +}; + +struct mlx4_config_dev_params { + u16 vxlan_udp_dport; + u8 rx_csum_flags_port_1; + u8 rx_csum_flags_port_2; +}; + struct mlx4_dev; struct mlx4_cmd_mailbox { @@ -248,6 +277,8 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); +int mlx4_config_dev_retrieval(struct mlx4_dev *dev, + struct mlx4_config_dev_params *params); /* * mlx4_get_slave_default_vlan - * return true if VST ( default vlan) diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 37e4404d0227..25c791e295fd 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -95,7 +95,7 @@ enum { enum { MLX4_MAX_NUM_PF = 16, - MLX4_MAX_NUM_VF = 64, + MLX4_MAX_NUM_VF = 126, MLX4_MAX_NUM_VF_P_PORT = 64, MLX4_MFUNC_MAX = 80, MLX4_MAX_EQ_NUM = 1024, @@ -117,6 +117,14 @@ enum { MLX4_STEERING_MODE_DEVICE_MANAGED }; +enum { + MLX4_STEERING_DMFS_A0_DEFAULT, + MLX4_STEERING_DMFS_A0_DYNAMIC, + MLX4_STEERING_DMFS_A0_STATIC, + MLX4_STEERING_DMFS_A0_DISABLE, + MLX4_STEERING_DMFS_A0_NOT_SUPPORTED +}; + static inline const char *mlx4_steering_mode_str(int steering_mode) { switch (steering_mode) { @@ -186,7 +194,31 @@ enum { MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, - MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13 + MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13, + MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14, + MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15, + MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, + MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, + MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, + MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19 +}; + +enum { + MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0, + MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1 +}; + +/* bit enums for an 8-bit flags field indicating special use + * QPs which require special handling in qp_reserve_range. + * Currently, this only includes QPs used by the ETH interface, + * where we expect to use blueflame. These QPs must not have + * bits 6 and 7 set in their qp number. + * + * This enum may use only bits 0..7. + */ +enum { + MLX4_RESERVE_A0_QP = 1 << 6, + MLX4_RESERVE_ETH_BF_QP = 1 << 7, }; enum { @@ -202,7 +234,8 @@ enum { enum { MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, - MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1 + MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1, + MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2 }; @@ -328,6 +361,8 @@ enum { enum mlx4_qp_region { MLX4_QP_REGION_FW = 0, + MLX4_QP_REGION_RSS_RAW_ETH, + MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH, MLX4_QP_REGION_ETH_ADDR, MLX4_QP_REGION_FC_ADDR, MLX4_QP_REGION_FC_EXCH, @@ -379,6 +414,13 @@ enum { #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) +enum mlx4_module_id { + MLX4_MODULE_ID_SFP = 0x3, + MLX4_MODULE_ID_QSFP = 0xC, + MLX4_MODULE_ID_QSFP_PLUS = 0xD, + MLX4_MODULE_ID_QSFP28 = 0x11, +}; + static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) { return (major << 32) | (minor << 16) | subminor; @@ -433,6 +475,7 @@ struct mlx4_caps { int num_cqs; int max_cqes; int reserved_cqs; + int num_sys_eqs; int num_eqs; int reserved_eqs; int num_comp_vectors; @@ -449,6 +492,7 @@ struct mlx4_caps { int reserved_mcgs; int num_qp_per_mgm; int steering_mode; + int dmfs_high_steer_mode; int fs_log_max_ucast_qp_range_size; int num_pds; int reserved_pds; @@ -487,6 +531,10 @@ struct mlx4_caps { u16 hca_core_clock; u64 phys_port_id[MLX4_MAX_PORTS + 1]; int tunnel_offload_mode; + u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1]; + u8 alloc_res_qp_mask; + u32 dmfs_high_rate_qpn_base; + u32 dmfs_high_rate_qpn_range; }; struct mlx4_buf_list { @@ -607,6 +655,11 @@ struct mlx4_cq { atomic_t refcount; struct completion free; + struct { + struct list_head list; + void (*comp)(struct mlx4_cq *); + void *priv; + } tasklet_ctx; }; struct mlx4_qp { @@ -799,6 +852,26 @@ struct mlx4_init_port_param { u64 si_guid; }; +#define MAD_IFC_DATA_SZ 192 +/* MAD IFC Mailbox */ +struct mlx4_mad_ifc { + u8 base_version; + u8 mgmt_class; + u8 class_version; + u8 method; + __be16 status; + __be16 class_specific; + __be64 tid; + __be16 attr_id; + __be16 resv; + __be32 attr_mod; + __be64 mkey; + __be16 dr_slid; + __be16 dr_dlid; + u8 reserved[28]; + u8 data[MAD_IFC_DATA_SZ]; +} __packed; + #define mlx4_foreach_port(port, dev, type) \ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ if ((type) == (dev)->caps.port_mask[(port)]) @@ -835,7 +908,9 @@ static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev) static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) { return (qpn < dev->phys_caps.base_sqpn + 8 + - 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev)); + 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) && + qpn >= dev->phys_caps.base_sqpn) || + (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]); } static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) @@ -911,8 +986,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, unsigned vector, int collapsed, int timestamp_en); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); - -int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); +int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, + int *base, u8 flags); void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, @@ -1283,10 +1358,50 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, u64 iova, u64 size, int npages, int page_shift, struct mlx4_mpt_entry *mpt_entry); +int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, + u16 offset, u16 size, u8 *data); + /* Returns true if running in low memory profile (kdump kernel) */ static inline bool mlx4_low_memory_profile(void) { return is_kdump_kernel(); } +/* ACCESS REG commands */ +enum mlx4_access_reg_method { + MLX4_ACCESS_REG_QUERY = 0x1, + MLX4_ACCESS_REG_WRITE = 0x2, +}; + +/* ACCESS PTYS Reg command */ +enum mlx4_ptys_proto { + MLX4_PTYS_IB = 1<<0, + MLX4_PTYS_EN = 1<<2, +}; + +struct mlx4_ptys_reg { + u8 resrvd1; + u8 local_port; + u8 resrvd2; + u8 proto_mask; + __be32 resrvd3[2]; + __be32 eth_proto_cap; + __be16 ib_width_cap; + __be16 ib_speed_cap; + __be32 resrvd4; + __be32 eth_proto_admin; + __be16 ib_width_admin; + __be16 ib_speed_admin; + __be32 resrvd5; + __be32 eth_proto_oper; + __be16 ib_width_oper; + __be16 ib_speed_oper; + __be32 resrvd6; + __be32 eth_proto_lp_adv; +} __packed; + +int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev, + enum mlx4_access_reg_method method, + struct mlx4_ptys_reg *ptys_reg); + #endif /* MLX4_DEVICE_H */ diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 5f4e36cf0091..467ccdf94c98 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -120,13 +120,15 @@ enum { MLX4_RSS_QPC_FLAG_OFFSET = 13, }; +#define MLX4_EN_RSS_KEY_SIZE 40 + struct mlx4_rss_context { __be32 base_qpn; __be32 default_qpn; u16 reserved; u8 hash_fn; u8 flags; - __be32 rss_key[10]; + __be32 rss_key[MLX4_EN_RSS_KEY_SIZE / sizeof(__be32)]; __be32 base_qpn_udp; }; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 1d67fd32e71c..ea4f1c46f761 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -219,23 +219,15 @@ enum { }; enum { - MLX5_DEV_CAP_FLAG_RC = 1LL << 0, - MLX5_DEV_CAP_FLAG_UC = 1LL << 1, - MLX5_DEV_CAP_FLAG_UD = 1LL << 2, MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, - MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6, MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, MLX5_DEV_CAP_FLAG_APM = 1LL << 17, MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, - MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, - MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, - MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, - MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 246310dc8bef..b1bf41556b32 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -633,14 +633,6 @@ static inline void *mlx5_vzalloc(unsigned long size) return rtn; } -static inline void mlx5_vfree(const void *addr) -{ - if (addr && is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); -} - static inline u32 mlx5_base_mkey(const u32 key) { return key & 0xffffff00u; diff --git a/include/linux/mm.h b/include/linux/mm.h index b46461116cd2..c0a67b894c4c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -19,6 +19,7 @@ #include <linux/bit_spinlock.h> #include <linux/shrinker.h> #include <linux/resource.h> +#include <linux/page_ext.h> struct mempolicy; struct anon_vma; @@ -56,6 +57,17 @@ extern int sysctl_legacy_va_layout; #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) #endif +/* + * To prevent common memory management code establishing + * a zero page mapping on a read fault. + * This macro should be defined within <asm/pgtable.h>. + * s390 does this to prevent multiplexing of hardware bits + * related to the physical page in case of virtualization. + */ +#ifndef mm_forbids_zeropage +#define mm_forbids_zeropage(X) (0) +#endif + extern unsigned long sysctl_user_reserve_kbytes; extern unsigned long sysctl_admin_reserve_kbytes; @@ -128,6 +140,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ +#define VM_ARCH_2 0x02000000 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ #ifdef CONFIG_MEM_SOFT_DIRTY @@ -155,6 +168,11 @@ extern unsigned int kobjsize(const void *objp); # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ #endif +#if defined(CONFIG_X86) +/* MPX specific bounds table or bounds directory */ +# define VM_MPX VM_ARCH_2 +#endif + #ifndef VM_GROWSUP # define VM_GROWSUP VM_NONE #endif @@ -2043,7 +2061,22 @@ static inline void vm_stat_account(struct mm_struct *mm, #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_DEBUG_PAGEALLOC -extern void kernel_map_pages(struct page *page, int numpages, int enable); +extern bool _debug_pagealloc_enabled; +extern void __kernel_map_pages(struct page *page, int numpages, int enable); + +static inline bool debug_pagealloc_enabled(void) +{ + return _debug_pagealloc_enabled; +} + +static inline void +kernel_map_pages(struct page *page, int numpages, int enable) +{ + if (!debug_pagealloc_enabled()) + return; + + __kernel_map_pages(page, numpages, enable); +} #ifdef CONFIG_HIBERNATION extern bool kernel_page_present(struct page *page); #endif /* CONFIG_HIBERNATION */ @@ -2077,9 +2110,9 @@ int drop_caches_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); #endif -unsigned long shrink_slab(struct shrink_control *shrink, - unsigned long nr_pages_scanned, - unsigned long lru_pages); +unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid, + unsigned long nr_scanned, + unsigned long nr_eligible); #ifndef CONFIG_MMU #define randomize_va_space 0 @@ -2138,20 +2171,36 @@ extern void copy_user_huge_page(struct page *dst, struct page *src, unsigned int pages_per_huge_page); #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ +extern struct page_ext_operations debug_guardpage_ops; +extern struct page_ext_operations page_poisoning_ops; + #ifdef CONFIG_DEBUG_PAGEALLOC extern unsigned int _debug_guardpage_minorder; +extern bool _debug_guardpage_enabled; static inline unsigned int debug_guardpage_minorder(void) { return _debug_guardpage_minorder; } +static inline bool debug_guardpage_enabled(void) +{ + return _debug_guardpage_enabled; +} + static inline bool page_is_guard(struct page *page) { - return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); + struct page_ext *page_ext; + + if (!debug_guardpage_enabled()) + return false; + + page_ext = lookup_page_ext(page); + return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); } #else static inline unsigned int debug_guardpage_minorder(void) { return 0; } +static inline bool debug_guardpage_enabled(void) { return false; } static inline bool page_is_guard(struct page *page) { return false; } #endif /* CONFIG_DEBUG_PAGEALLOC */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6e0b286649f1..6d34aa266a8c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -10,7 +10,6 @@ #include <linux/rwsem.h> #include <linux/completion.h> #include <linux/cpumask.h> -#include <linux/page-debug-flags.h> #include <linux/uprobes.h> #include <linux/page-flags-layout.h> #include <asm/page.h> @@ -22,6 +21,7 @@ #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) struct address_space; +struct mem_cgroup; #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ @@ -167,6 +167,10 @@ struct page { struct page *first_page; /* Compound tail pages */ }; +#ifdef CONFIG_MEMCG + struct mem_cgroup *mem_cgroup; +#endif + /* * On machines where all RAM is mapped into kernel address space, * we can simply calculate the virtual address. On machines with @@ -181,9 +185,6 @@ struct page { void *virtual; /* Kernel virtual address (NULL if not kmapped, ie. highmem) */ #endif /* WANT_PAGE_VIRTUAL */ -#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS - unsigned long debug_flags; /* Use atomic bitops on this */ -#endif #ifdef CONFIG_KMEMCHECK /* @@ -454,6 +455,10 @@ struct mm_struct { bool tlb_flush_pending; #endif struct uprobes_state uprobes_state; +#ifdef CONFIG_X86_INTEL_MPX + /* address of the bounds directory */ + void __user *bd_addr; +#endif }; static inline void mm_init_cpumask(struct mm_struct *mm) @@ -525,4 +530,12 @@ enum tlb_flush_reason { NR_TLB_FLUSH_REASONS, }; + /* + * A swap entry has to fit into a "unsigned long", as the entry is hidden + * in the "index" field of the swapper address space. + */ +typedef struct { + unsigned long val; +} swp_entry_t; + #endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index b0692d28f8e6..4d69c00497bd 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -88,6 +88,9 @@ struct mmc_ext_csd { unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ unsigned int boot_ro_lock; /* ro lock support */ bool boot_ro_lockable; + bool ffu_capable; /* Firmware upgrade support */ +#define MMC_FIRMWARE_LEN 8 + u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */ u8 raw_exception_status; /* 54 */ u8 raw_partition_support; /* 160 */ u8 raw_rpmb_size_mult; /* 168 */ @@ -509,24 +512,8 @@ static inline int mmc_card_broken_irq_polling(const struct mmc_card *c) #define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev) -#define mmc_list_to_card(l) container_of(l, struct mmc_card, node) -#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev) -#define mmc_set_drvdata(c,d) dev_set_drvdata(&(c)->dev, d) - -/* - * MMC device driver (e.g., Flash card, I/O card...) - */ -struct mmc_driver { - struct device_driver drv; - int (*probe)(struct mmc_card *); - void (*remove)(struct mmc_card *); - int (*suspend)(struct mmc_card *); - int (*resume)(struct mmc_card *); - void (*shutdown)(struct mmc_card *); -}; - -extern int mmc_register_driver(struct mmc_driver *); -extern void mmc_unregister_driver(struct mmc_driver *); +extern int mmc_register_driver(struct device_driver *); +extern void mmc_unregister_driver(struct device_driver *); extern void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table); diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index f206e29f94d7..cb2b0400d284 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -154,7 +154,8 @@ extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool, bool, bool); extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); -extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); +extern int mmc_send_tuning(struct mmc_host *host); +extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); #define MMC_ERASE_ARG 0x00000000 #define MMC_SECURE_ERASE_ARG 0x80000000 diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index 001366927cf4..42b724e8d503 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -54,6 +54,7 @@ struct mmc_data; * transfer is in progress. * @use_dma: Whether DMA channel is initialized or not. * @using_dma: Whether DMA is in use for the current transfer. + * @dma_64bit_address: Whether DMA supports 64-bit address mode or not. * @sg_dma: Bus address of DMA buffer. * @sg_cpu: Virtual address of DMA buffer. * @dma_ops: Pointer to platform-specific DMA callbacks. @@ -96,6 +97,7 @@ struct mmc_data; * @quirks: Set of quirks that apply to specific versions of the IP. * @irq_flags: The flags to be passed to request_irq. * @irq: The irq value to be passed to request_irq. + * @sdio_id0: Number of slot0 in the SDIO interrupt registers. * * Locking * ======= @@ -135,11 +137,11 @@ struct dw_mci { struct mmc_command stop_abort; unsigned int prev_blksz; unsigned char timing; - struct workqueue_struct *card_workqueue; /* DMA interface members*/ int use_dma; int using_dma; + int dma_64bit_address; dma_addr_t sg_dma; void *sg_cpu; @@ -154,7 +156,6 @@ struct dw_mci { u32 stop_cmdr; u32 dir_status; struct tasklet_struct tasklet; - struct work_struct card_work; unsigned long pending_events; unsigned long completed_events; enum dw_mci_state state; @@ -193,6 +194,8 @@ struct dw_mci { bool vqmmc_enabled; unsigned long irq_flags; /* IRQ flags */ int irq; + + int sdio_id0; }; /* DMA ops for Internal/External DMAC interface */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index df0c15396bbf..9f322706f7cb 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -289,6 +289,7 @@ struct mmc_host { #define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */ #define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \ MMC_CAP2_HS400_1_2V) +#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V) #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) mmc_pm_flag_t pm_caps; /* supported pm features */ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 1cd00b3a75b9..49ad7a943638 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -296,6 +296,7 @@ struct _mmc_csd { #define EXT_CSD_SANITIZE_START 165 /* W */ #define EXT_CSD_WR_REL_PARAM 166 /* RO */ #define EXT_CSD_RPMB_MULT 168 /* RO */ +#define EXT_CSD_FW_CONFIG 169 /* R/W */ #define EXT_CSD_BOOT_WP 173 /* R/W */ #define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ #define EXT_CSD_PART_CONFIG 179 /* R/W */ @@ -332,6 +333,8 @@ struct _mmc_csd { #define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ #define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ #define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ +#define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */ +#define EXT_CSD_SUPPORTED_MODE 493 /* RO */ #define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ #define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ #define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */ diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h index dba793e3a331..375af80bde7d 100644 --- a/include/linux/mmc/sdhci.h +++ b/include/linux/mmc/sdhci.h @@ -100,6 +100,12 @@ struct sdhci_host { #define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7) /* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */ #define SDHCI_QUIRK2_STOP_WITH_TC (1<<8) +/* Controller does not support 64-bit DMA */ +#define SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1<<9) +/* need clear transfer mode register before send cmd */ +#define SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1<<10) +/* Capability register bit-63 indicates HS400 support */ +#define SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1<<11) int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ @@ -130,6 +136,7 @@ struct sdhci_host { #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ +#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ unsigned int version; /* SDHCI spec. version */ @@ -155,12 +162,19 @@ struct sdhci_host { int sg_count; /* Mapped sg entries */ - u8 *adma_desc; /* ADMA descriptor table */ - u8 *align_buffer; /* Bounce buffer */ + void *adma_table; /* ADMA descriptor table */ + void *align_buffer; /* Bounce buffer */ + + size_t adma_table_sz; /* ADMA descriptor table size */ + size_t align_buffer_sz; /* Bounce buffer size */ dma_addr_t adma_addr; /* Mapped ADMA descr. table */ dma_addr_t align_addr; /* Mapped bounce buffer */ + unsigned int desc_sz; /* ADMA descriptor size */ + unsigned int align_sz; /* ADMA alignment */ + unsigned int align_mask; /* ADMA alignment mask */ + struct tasklet_struct finish_tasklet; /* Tasklet structures */ struct timer_list timer; /* Timer for timeouts */ diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 50f0bc952328..aab032a6ae61 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -84,8 +84,6 @@ struct sdio_driver { struct device_driver drv; }; -#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv) - /** * SDIO_DEVICE - macro used to describe a specific SDIO device * @vend: the 16 bit manufacturer code diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 88787bb4b3b9..ab8564b03468 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -154,7 +154,7 @@ struct mmu_notifier_ops { * Therefore notifier chains can only be traversed when either * * 1. mmap_sem is held. - * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem). + * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem). * 3. No other concurrent thread can access the list (release) */ struct mmu_notifier { diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 48bf12ef6620..2f0856d14b21 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -431,6 +431,15 @@ struct zone { */ int nr_migrate_reserve_block; +#ifdef CONFIG_MEMORY_ISOLATION + /* + * Number of isolated pageblock. It is used to solve incorrect + * freepage counting problem due to racy retrieving migratetype + * of pageblock. Protected by zone->lock. + */ + unsigned long nr_isolate_pageblock; +#endif + #ifdef CONFIG_MEMORY_HOTPLUG /* see spanned/present_pages for more description */ seqlock_t span_seqlock; @@ -713,8 +722,8 @@ typedef struct pglist_data { int nr_zones; #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ struct page *node_mem_map; -#ifdef CONFIG_MEMCG - struct page_cgroup *node_page_cgroup; +#ifdef CONFIG_PAGE_EXTENSION + struct page_ext *node_page_ext; #endif #endif #ifndef CONFIG_NO_BOOTMEM @@ -1069,7 +1078,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) struct page; -struct page_cgroup; +struct page_ext; struct mem_section { /* * This is, logically, a pointer to an array of struct @@ -1087,12 +1096,12 @@ struct mem_section { /* See declaration of similar field in struct zone */ unsigned long *pageblock_flags; -#ifdef CONFIG_MEMCG +#ifdef CONFIG_PAGE_EXTENSION /* - * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use - * section. (see memcontrol.h/page_cgroup.h about this.) + * If !SPARSEMEM, pgdat doesn't have page_ext pointer. We use + * section. (see page_ext.h about this.) */ - struct page_cgroup *page_cgroup; + struct page_ext *page_ext; unsigned long pad; #endif /* diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 44eeef0da186..745def862580 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -69,7 +69,7 @@ struct ieee1394_device_id { * @bDeviceClass: Class of device; numbers are assigned * by the USB forum. Products may choose to implement classes, * or be vendor-specific. Device classes specify behavior of all - * the interfaces on a devices. + * the interfaces on a device. * @bDeviceSubClass: Subclass of device; associated with bDeviceClass. * @bDeviceProtocol: Protocol of device; associated with bDeviceClass. * @bInterfaceClass: Class of interface; numbers are assigned diff --git a/include/linux/msi.h b/include/linux/msi.h index 44f4746d033b..8ac4a68ffae2 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -10,17 +10,12 @@ struct msi_msg { u32 data; /* 16 bits of msi message data */ }; +extern int pci_msi_ignore_mask; /* Helper functions */ struct irq_data; struct msi_desc; -void mask_msi_irq(struct irq_data *data); -void unmask_msi_irq(struct irq_data *data); -void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -void read_msi_msg(unsigned int irq, struct msi_msg *msg); void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); -void write_msi_msg(unsigned int irq, struct msi_msg *msg); struct msi_desc { struct { @@ -48,6 +43,52 @@ struct msi_desc { struct msi_msg msg; }; +/* Helpers to hide struct msi_desc implementation details */ +#define msi_desc_to_dev(desc) (&(desc)->dev.dev) +#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list) +#define first_msi_entry(dev) \ + list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) +#define for_each_msi_entry(desc, dev) \ + list_for_each_entry((desc), dev_to_msi_list((dev)), list) + +#ifdef CONFIG_PCI_MSI +#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) +#define for_each_pci_msi_entry(desc, pdev) \ + for_each_msi_entry((desc), &(pdev)->dev) + +static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) +{ + return desc->dev; +} +#endif /* CONFIG_PCI_MSI */ + +void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); +void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); + +u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); +u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); +void pci_msi_mask_irq(struct irq_data *data); +void pci_msi_unmask_irq(struct irq_data *data); + +/* Conversion helpers. Should be removed after merging */ +static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +{ + __pci_write_msi_msg(entry, msg); +} +static inline void write_msi_msg(int irq, struct msi_msg *msg) +{ + pci_write_msi_msg(irq, msg); +} +static inline void mask_msi_irq(struct irq_data *data) +{ + pci_msi_mask_irq(data); +} +static inline void unmask_msi_irq(struct irq_data *data) +{ + pci_msi_unmask_irq(data); +} + /* * The arch hooks to setup up msi irqs. Those functions are * implemented as weak symbols so that they /can/ be overriden by @@ -61,18 +102,142 @@ void arch_restore_msi_irqs(struct pci_dev *dev); void default_teardown_msi_irqs(struct pci_dev *dev); void default_restore_msi_irqs(struct pci_dev *dev); -u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); -u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag); -struct msi_chip { +struct msi_controller { struct module *owner; struct device *dev; struct device_node *of_node; struct list_head list; +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN + struct irq_domain *domain; +#endif - int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev, + int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, struct msi_desc *desc); - void (*teardown_irq)(struct msi_chip *chip, unsigned int irq); + void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); +}; + +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN + +#include <linux/irqhandler.h> +#include <asm/msi.h> + +struct irq_domain; +struct irq_chip; +struct device_node; +struct msi_domain_info; + +/** + * struct msi_domain_ops - MSI interrupt domain callbacks + * @get_hwirq: Retrieve the resulting hw irq number + * @msi_init: Domain specific init function for MSI interrupts + * @msi_free: Domain specific function to free a MSI interrupts + * @msi_check: Callback for verification of the domain/info/dev data + * @msi_prepare: Prepare the allocation of the interrupts in the domain + * @msi_finish: Optional callbacl to finalize the allocation + * @set_desc: Set the msi descriptor for an interrupt + * @handle_error: Optional error handler if the allocation fails + * + * @get_hwirq, @msi_init and @msi_free are callbacks used by + * msi_create_irq_domain() and related interfaces + * + * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error + * are callbacks used by msi_irq_domain_alloc_irqs() and related + * interfaces which are based on msi_desc. + */ +struct msi_domain_ops { + irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, + msi_alloc_info_t *arg); + int (*msi_init)(struct irq_domain *domain, + struct msi_domain_info *info, + unsigned int virq, irq_hw_number_t hwirq, + msi_alloc_info_t *arg); + void (*msi_free)(struct irq_domain *domain, + struct msi_domain_info *info, + unsigned int virq); + int (*msi_check)(struct irq_domain *domain, + struct msi_domain_info *info, + struct device *dev); + int (*msi_prepare)(struct irq_domain *domain, + struct device *dev, int nvec, + msi_alloc_info_t *arg); + void (*msi_finish)(msi_alloc_info_t *arg, int retval); + void (*set_desc)(msi_alloc_info_t *arg, + struct msi_desc *desc); + int (*handle_error)(struct irq_domain *domain, + struct msi_desc *desc, int error); +}; + +/** + * struct msi_domain_info - MSI interrupt domain data + * @flags: Flags to decribe features and capabilities + * @ops: The callback data structure + * @chip: Optional: associated interrupt chip + * @chip_data: Optional: associated interrupt chip data + * @handler: Optional: associated interrupt flow handler + * @handler_data: Optional: associated interrupt flow handler data + * @handler_name: Optional: associated interrupt flow handler name + * @data: Optional: domain specific data + */ +struct msi_domain_info { + u32 flags; + struct msi_domain_ops *ops; + struct irq_chip *chip; + void *chip_data; + irq_flow_handler_t handler; + void *handler_data; + const char *handler_name; + void *data; +}; + +/* Flags for msi_domain_info */ +enum { + /* + * Init non implemented ops callbacks with default MSI domain + * callbacks. + */ + MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0), + /* + * Init non implemented chip callbacks with default MSI chip + * callbacks. + */ + MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), + /* Build identity map between hwirq and irq */ + MSI_FLAG_IDENTITY_MAP = (1 << 2), + /* Support multiple PCI MSI interrupts */ + MSI_FLAG_MULTI_PCI_MSI = (1 << 3), + /* Support PCI MSIX interrupts */ + MSI_FLAG_PCI_MSIX = (1 << 4), }; +int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force); + +struct irq_domain *msi_create_irq_domain(struct device_node *of_node, + struct msi_domain_info *info, + struct irq_domain *parent); +int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, + int nvec); +void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); +struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); + +#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ + +#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN +void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); +struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, + struct msi_domain_info *info, + struct irq_domain *parent); +int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, + int nvec, int type); +void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); +struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, + struct msi_domain_info *info, struct irq_domain *parent); + +irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, + struct msi_desc *desc); +int pci_msi_domain_check_cap(struct irq_domain *domain, + struct msi_domain_info *info, struct device *dev); +#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ + #endif /* LINUX_MSI_H */ diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index dcfdecbfa0b7..8e30685affeb 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -47,9 +47,9 @@ enum { NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ - NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ + NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ - NETIF_F_GSO_MPLS_BIT, + NETIF_F_GSO_TUNNEL_REMCSUM_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ @@ -118,7 +118,7 @@ enum { #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) #define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) -#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) +#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) @@ -181,7 +181,6 @@ enum { NETIF_F_GSO_IPIP | \ NETIF_F_GSO_SIT | \ NETIF_F_GSO_UDP_TUNNEL | \ - NETIF_F_GSO_UDP_TUNNEL_CSUM | \ - NETIF_F_GSO_MPLS) + NETIF_F_GSO_UDP_TUNNEL_CSUM) #endif /* _LINUX_NETDEV_FEATURES_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 74fd5d37f15a..c31f74d76ebd 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -57,6 +57,8 @@ struct device; struct phy_device; /* 802.11 specific */ struct wireless_dev; +/* 802.15.4 specific */ +struct wpan_dev; void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); @@ -314,6 +316,7 @@ struct napi_struct { struct net_device *dev; struct sk_buff *gro_list; struct sk_buff *skb; + struct hrtimer timer; struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; @@ -386,6 +389,7 @@ typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); void __napi_schedule(struct napi_struct *n); +void __napi_schedule_irqoff(struct napi_struct *n); static inline bool napi_disable_pending(struct napi_struct *n) { @@ -420,6 +424,18 @@ static inline void napi_schedule(struct napi_struct *n) __napi_schedule(n); } +/** + * napi_schedule_irqoff - schedule NAPI poll + * @n: napi context + * + * Variant of napi_schedule(), assuming hard irqs are masked. + */ +static inline void napi_schedule_irqoff(struct napi_struct *n) +{ + if (napi_schedule_prep(n)) + __napi_schedule_irqoff(n); +} + /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ static inline bool napi_reschedule(struct napi_struct *napi) { @@ -430,14 +446,19 @@ static inline bool napi_reschedule(struct napi_struct *napi) return false; } +void __napi_complete(struct napi_struct *n); +void napi_complete_done(struct napi_struct *n, int work_done); /** * napi_complete - NAPI processing complete * @n: napi context * * Mark NAPI processing as complete. + * Consider using napi_complete_done() instead. */ -void __napi_complete(struct napi_struct *n); -void napi_complete(struct napi_struct *n); +static inline void napi_complete(struct napi_struct *n) +{ + return napi_complete_done(n, 0); +} /** * napi_by_id - lookup a NAPI by napi_id @@ -472,14 +493,7 @@ void napi_hash_del(struct napi_struct *napi); * Stop NAPI from being scheduled on this context. * Waits till any outstanding processing completes. */ -static inline void napi_disable(struct napi_struct *n) -{ - might_sleep(); - set_bit(NAPI_STATE_DISABLE, &n->state); - while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) - msleep(1); - clear_bit(NAPI_STATE_DISABLE, &n->state); -} +void napi_disable(struct napi_struct *n); /** * napi_enable - enable NAPI scheduling @@ -740,13 +754,13 @@ struct netdev_fcoe_hbainfo { }; #endif -#define MAX_PHYS_PORT_ID_LEN 32 +#define MAX_PHYS_ITEM_ID_LEN 32 -/* This structure holds a unique identifier to identify the - * physical port used by a netdevice. +/* This structure holds a unique identifier to identify some + * physical item (port for example) used by a netdevice. */ -struct netdev_phys_port_id { - unsigned char id[MAX_PHYS_PORT_ID_LEN]; +struct netdev_phys_item_id { + unsigned char id[MAX_PHYS_ITEM_ID_LEN]; unsigned char id_len; }; @@ -937,11 +951,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, - * const unsigned char *addr, u16 flags) + * const unsigned char *addr, u16 vid, u16 flags) * Adds an FDB entry to dev for addr. * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, - * const unsigned char *addr) + * const unsigned char *addr, u16 vid) * Deletes the FDB entry from dev coresponding to addr. * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, * struct net_device *dev, struct net_device *filter_dev, @@ -962,7 +976,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. * * int (*ndo_get_phys_port_id)(struct net_device *dev, - * struct netdev_phys_port_id *ppid); + * struct netdev_phys_item_id *ppid); * Called to get ID of physical port of this device. If driver does * not implement this, it is assumed that the hw is not able to have * multiple net devices on single physical port. @@ -1004,6 +1018,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * performing GSO on a packet. The device returns true if it is * able to GSO the packet, false otherwise. If the return value is * false the stack will do software GSO. + * + * int (*ndo_switch_parent_id_get)(struct net_device *dev, + * struct netdev_phys_item_id *psid); + * Called to get an ID of the switch chip this port is part of. + * If driver implements this, it indicates that it represents a port + * of a switch chip. + * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state); + * Called to notify switch device port of bridge port STP + * state change. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1114,11 +1137,13 @@ struct net_device_ops { struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, + u16 vid, u16 flags); int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr); + const unsigned char *addr, + u16 vid); int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, @@ -1136,7 +1161,7 @@ struct net_device_ops { int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); int (*ndo_get_phys_port_id)(struct net_device *dev, - struct netdev_phys_port_id *ppid); + struct netdev_phys_item_id *ppid); void (*ndo_add_vxlan_port)(struct net_device *dev, sa_family_t sa_family, __be16 port); @@ -1155,6 +1180,12 @@ struct net_device_ops { int (*ndo_get_lock_subclass)(struct net_device *dev); bool (*ndo_gso_check) (struct sk_buff *skb, struct net_device *dev); +#ifdef CONFIG_NET_SWITCHDEV + int (*ndo_switch_parent_id_get)(struct net_device *dev, + struct netdev_phys_item_id *psid); + int (*ndo_switch_port_stp_update)(struct net_device *dev, + u8 state); +#endif }; /** @@ -1216,6 +1247,8 @@ enum netdev_priv_flags { IFF_LIVE_ADDR_CHANGE = 1<<20, IFF_MACVLAN = 1<<21, IFF_XMIT_DST_RELEASE_PERM = 1<<22, + IFF_IPVLAN_MASTER = 1<<23, + IFF_IPVLAN_SLAVE = 1<<24, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1241,6 +1274,8 @@ enum netdev_priv_flags { #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE #define IFF_MACVLAN IFF_MACVLAN #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM +#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER +#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE /** * struct net_device - The DEVICE structure. @@ -1572,6 +1607,7 @@ struct net_device { struct inet6_dev __rcu *ip6_ptr; void *ax25_ptr; struct wireless_dev *ieee80211_ptr; + struct wpan_dev *ieee802154_ptr; /* * Cache lines mostly used on receive path (including eth_type_trans()) @@ -1590,6 +1626,7 @@ struct net_device { #endif + unsigned long gro_flush_timeout; rx_handler_func_t __rcu *rx_handler; void __rcu *rx_handler_data; @@ -2316,10 +2353,7 @@ extern int netdev_flow_limit_table_len; * Incoming packets are placed on per-cpu queues */ struct softnet_data { - struct Qdisc *output_queue; - struct Qdisc **output_queue_tailp; struct list_head poll_list; - struct sk_buff *completion_queue; struct sk_buff_head process_queue; /* stats */ @@ -2327,10 +2361,17 @@ struct softnet_data { unsigned int time_squeeze; unsigned int cpu_collision; unsigned int received_rps; - #ifdef CONFIG_RPS struct softnet_data *rps_ipi_list; +#endif +#ifdef CONFIG_NET_FLOW_LIMIT + struct sd_flow_limit __rcu *flow_limit; +#endif + struct Qdisc *output_queue; + struct Qdisc **output_queue_tailp; + struct sk_buff *completion_queue; +#ifdef CONFIG_RPS /* Elements below can be accessed between CPUs for RPS */ struct call_single_data csd ____cacheline_aligned_in_smp; struct softnet_data *rps_ipi_next; @@ -2342,9 +2383,6 @@ struct softnet_data { struct sk_buff_head input_pkt_queue; struct napi_struct backlog; -#ifdef CONFIG_NET_FLOW_LIMIT - struct sd_flow_limit __rcu *flow_limit; -#endif }; static inline void input_queue_head_incr(struct softnet_data *sd) @@ -2748,23 +2786,6 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev, } #endif -static inline int netif_copy_real_num_queues(struct net_device *to_dev, - const struct net_device *from_dev) -{ - int err; - - err = netif_set_real_num_tx_queues(to_dev, - from_dev->real_num_tx_queues); - if (err) - return err; -#ifdef CONFIG_SYSFS - return netif_set_real_num_rx_queues(to_dev, - from_dev->real_num_rx_queues); -#else - return 0; -#endif -} - #ifdef CONFIG_SYSFS static inline unsigned int get_netdev_rx_queue_index( struct netdev_rx_queue *queue) @@ -2864,7 +2885,7 @@ void dev_set_group(struct net_device *, int); int dev_set_mac_address(struct net_device *, struct sockaddr *); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, - struct netdev_phys_port_id *ppid); + struct netdev_phys_item_id *ppid); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@ -3425,6 +3446,12 @@ void netdev_upper_dev_unlink(struct net_device *dev, void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); + +/* RSS keys are 40 or 52 bytes long */ +#define NETDEV_RSS_KEY_LEN 52 +extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; +void netdev_rss_key_fill(void *buffer, size_t len); + int dev_get_nest_level(struct net_device *dev, bool (*type_check)(struct net_device *dev)); int skb_checksum_help(struct sk_buff *skb); @@ -3569,7 +3596,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; } @@ -3614,6 +3641,21 @@ static inline bool netif_is_macvlan(struct net_device *dev) return dev->priv_flags & IFF_MACVLAN; } +static inline bool netif_is_macvlan_port(struct net_device *dev) +{ + return dev->priv_flags & IFF_MACVLAN_PORT; +} + +static inline bool netif_is_ipvlan(struct net_device *dev) +{ + return dev->priv_flags & IFF_IPVLAN_SLAVE; +} + +static inline bool netif_is_ipvlan_port(struct net_device *dev) +{ + return dev->priv_flags & IFF_IPVLAN_MASTER; +} + static inline bool netif_is_bond_master(struct net_device *dev) { return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 356acc2846fd..022b761dbf0a 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -490,6 +490,8 @@ enum { /* nfs42 */ NFSPROC4_CLNT_SEEK, + NFSPROC4_CLNT_ALLOCATE, + NFSPROC4_CLNT_DEALLOCATE, }; /* nfs41 types */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index c72d1ad41ad4..6d627b92df53 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -163,7 +163,7 @@ struct nfs_inode { */ __be32 cookieverf[2]; - unsigned long npages; + unsigned long nrequests; struct nfs_mds_commit_info commit_info; /* Open contexts for shared mmap writes */ @@ -520,7 +520,7 @@ extern void nfs_commit_free(struct nfs_commit_data *data); static inline int nfs_have_writebacks(struct inode *inode) { - return NFS_I(inode)->npages != 0; + return NFS_I(inode)->nrequests != 0; } /* diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index a32ba0d7a98f..1e37fbb78f7a 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -231,5 +231,7 @@ struct nfs_server { #define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17) #define NFS_CAP_SECURITY_LABEL (1U << 18) #define NFS_CAP_SEEK (1U << 19) +#define NFS_CAP_ALLOCATE (1U << 20) +#define NFS_CAP_DEALLOCATE (1U << 21) #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 983876f24aed..467c84efb596 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1224,14 +1224,39 @@ struct nfs41_free_stateid_res { unsigned int status; }; +static inline void +nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) +{ + kfree(cinfo->buckets); +} + #else struct pnfs_ds_commit_info { }; +static inline void +nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) +{ +} + #endif /* CONFIG_NFS_V4_1 */ #ifdef CONFIG_NFS_V4_2 +struct nfs42_falloc_args { + struct nfs4_sequence_args seq_args; + + struct nfs_fh *falloc_fh; + nfs4_stateid falloc_stateid; + u64 falloc_offset; + u64 falloc_length; +}; + +struct nfs42_falloc_res { + struct nfs4_sequence_res seq_res; + unsigned int status; +}; + struct nfs42_seek_args { struct nfs4_sequence_args seq_args; diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h index 20163b9a0eae..167342c2ce6b 100644 --- a/include/linux/nl802154.h +++ b/include/linux/nl802154.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * */ #ifndef NL802154_H diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 2bf403195c09..258945fcabf1 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -19,6 +19,7 @@ #include <linux/pci.h> #include <linux/miscdevice.h> #include <linux/kref.h> +#include <linux/blk-mq.h> struct nvme_bar { __u64 cap; /* Controller Capabilities */ @@ -38,6 +39,7 @@ struct nvme_bar { #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) +#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) enum { NVME_CC_ENABLE = 1 << 0, @@ -70,8 +72,10 @@ extern unsigned char nvme_io_timeout; */ struct nvme_dev { struct list_head node; - struct nvme_queue __rcu **queues; - unsigned short __percpu *io_queue; + struct nvme_queue **queues; + struct request_queue *admin_q; + struct blk_mq_tag_set tagset; + struct blk_mq_tag_set admin_tagset; u32 __iomem *dbs; struct pci_dev *pci_dev; struct dma_pool *prp_page_pool; @@ -90,15 +94,16 @@ struct nvme_dev { struct miscdevice miscdev; work_func_t reset_workfn; struct work_struct reset_work; - struct work_struct cpu_work; char name[12]; char serial[20]; char model[40]; char firmware_rev[8]; u32 max_hw_sectors; u32 stripe_size; + u32 page_size; u16 oncs; u16 abort_limit; + u8 event_limit; u8 vwc; u8 initialized; }; @@ -132,7 +137,6 @@ struct nvme_iod { int offset; /* Of PRP list */ int nents; /* Used in scatterlist */ int length; /* Of data, in bytes */ - unsigned long start_time; dma_addr_t first_dma; struct list_head node; struct scatterlist sg[0]; @@ -150,12 +154,14 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) */ void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod); -int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t); +int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t); struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, unsigned long addr, unsigned length); void nvme_unmap_user_pages(struct nvme_dev *dev, int write, struct nvme_iod *iod); -int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *); +int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *, + struct nvme_command *, u32 *); +int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns); int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, u32 *result); int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns, diff --git a/include/linux/of.h b/include/linux/of.h index 29f0adc5f3e4..dfde07e77a63 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -23,6 +23,8 @@ #include <linux/spinlock.h> #include <linux/topology.h> #include <linux/notifier.h> +#include <linux/property.h> +#include <linux/list.h> #include <asm/byteorder.h> #include <asm/errno.h> @@ -49,14 +51,13 @@ struct device_node { const char *type; phandle phandle; const char *full_name; + struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; /* removed properties */ struct device_node *parent; struct device_node *child; struct device_node *sibling; - struct device_node *next; /* next device of same type */ - struct device_node *allnext; /* next in list of all nodes */ struct kobject kobj; unsigned long _flags; void *data; @@ -74,11 +75,18 @@ struct of_phandle_args { uint32_t args[MAX_PHANDLE_ARGS]; }; +struct of_reconfig_data { + struct device_node *dn; + struct property *prop; + struct property *old_prop; +}; + /* initialize a node */ extern struct kobj_type of_node_ktype; static inline void of_node_init(struct device_node *node) { kobject_init(&node->kobj, &of_node_ktype); + node->fwnode.type = FWNODE_OF; } /* true when node is initialized */ @@ -105,18 +113,27 @@ static inline struct device_node *of_node_get(struct device_node *node) static inline void of_node_put(struct device_node *node) { } #endif /* !CONFIG_OF_DYNAMIC */ -#ifdef CONFIG_OF - /* Pointer for first entry in chain of all nodes. */ -extern struct device_node *of_allnodes; +extern struct device_node *of_root; extern struct device_node *of_chosen; extern struct device_node *of_aliases; extern struct device_node *of_stdout; extern raw_spinlock_t devtree_lock; +#ifdef CONFIG_OF +static inline bool is_of_node(struct fwnode_handle *fwnode) +{ + return fwnode && fwnode->type == FWNODE_OF; +} + +static inline struct device_node *of_node(struct fwnode_handle *fwnode) +{ + return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL; +} + static inline bool of_have_populated_dt(void) { - return of_allnodes != NULL; + return of_root != NULL; } static inline bool of_node_is_root(const struct device_node *node) @@ -160,6 +177,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag clear_bit(flag, &p->_flags); } +extern struct device_node *__of_find_all_nodes(struct device_node *prev); extern struct device_node *of_find_all_nodes(struct device_node *prev); /* @@ -215,8 +233,9 @@ static inline const char *of_node_full_name(const struct device_node *np) return np ? np->full_name : "<no-node>"; } -#define for_each_of_allnodes(dn) \ - for (dn = of_allnodes; dn; dn = dn->allnext) +#define for_each_of_allnodes_from(from, dn) \ + for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn)) +#define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn) extern struct device_node *of_find_node_by_name(struct device_node *from, const char *name); extern struct device_node *of_find_node_by_type(struct device_node *from, @@ -228,7 +247,13 @@ extern struct device_node *of_find_matching_node_and_match( const struct of_device_id *matches, const struct of_device_id **match); -extern struct device_node *of_find_node_by_path(const char *path); +extern struct device_node *of_find_node_opts_by_path(const char *path, + const char **opts); +static inline struct device_node *of_find_node_by_path(const char *path) +{ + return of_find_node_opts_by_path(path, NULL); +} + extern struct device_node *of_find_node_by_phandle(phandle handle); extern struct device_node *of_get_parent(const struct device_node *node); extern struct device_node *of_get_next_parent(struct device_node *node); @@ -263,6 +288,10 @@ extern int of_property_read_u32_array(const struct device_node *np, size_t sz); extern int of_property_read_u64(const struct device_node *np, const char *propname, u64 *out_value); +extern int of_property_read_u64_array(const struct device_node *np, + const char *propname, + u64 *out_values, + size_t sz); extern int of_property_read_string(struct device_node *np, const char *propname, @@ -275,7 +304,7 @@ extern int of_property_read_string_helper(struct device_node *np, const char **out_strs, size_t sz, int index); extern int of_device_is_compatible(const struct device_node *device, const char *); -extern int of_device_is_available(const struct device_node *device); +extern bool of_device_is_available(const struct device_node *device); extern const void *of_get_property(const struct device_node *node, const char *name, int *lenp); @@ -317,16 +346,6 @@ extern int of_update_property(struct device_node *np, struct property *newprop); #define OF_RECONFIG_REMOVE_PROPERTY 0x0004 #define OF_RECONFIG_UPDATE_PROPERTY 0x0005 -struct of_prop_reconfig { - struct device_node *dn; - struct property *prop; - struct property *old_prop; -}; - -extern int of_reconfig_notifier_register(struct notifier_block *); -extern int of_reconfig_notifier_unregister(struct notifier_block *); -extern int of_reconfig_notify(unsigned long, void *); - extern int of_attach_node(struct device_node *); extern int of_detach_node(struct device_node *); @@ -355,6 +374,16 @@ bool of_console_check(struct device_node *dn, char *name, int index); #else /* CONFIG_OF */ +static inline bool is_of_node(struct fwnode_handle *fwnode) +{ + return false; +} + +static inline struct device_node *of_node(struct fwnode_handle *fwnode) +{ + return NULL; +} + static inline const char* of_node_full_name(const struct device_node *np) { return "<no-node>"; @@ -385,6 +414,12 @@ static inline struct device_node *of_find_node_by_path(const char *path) return NULL; } +static inline struct device_node *of_find_node_opts_by_path(const char *path, + const char **opts) +{ + return NULL; +} + static inline struct device_node *of_get_parent(const struct device_node *node) { return NULL; @@ -426,9 +461,9 @@ static inline int of_device_is_compatible(const struct device_node *device, return 0; } -static inline int of_device_is_available(const struct device_node *device) +static inline bool of_device_is_available(const struct device_node *device) { - return 0; + return false; } static inline struct property *of_find_property(const struct device_node *np, @@ -477,6 +512,13 @@ static inline int of_property_read_u32_array(const struct device_node *np, return -ENOSYS; } +static inline int of_property_read_u64_array(const struct device_node *np, + const char *propname, + u64 *out_values, size_t sz) +{ + return -ENOSYS; +} + static inline int of_property_read_string(struct device_node *np, const char *propname, const char **out_string) @@ -760,6 +802,13 @@ static inline int of_property_read_u32(const struct device_node *np, return of_property_read_u32_array(np, propname, out_value, 1); } +static inline int of_property_read_s32(const struct device_node *np, + const char *propname, + s32 *out_value) +{ + return of_property_read_u32(np, propname, (u32*) out_value); +} + #define of_property_for_each_u32(np, propname, prop, p, u) \ for (prop = of_find_property(np, propname, NULL), \ p = of_prop_next_u32(prop, NULL, &u); \ @@ -828,7 +877,7 @@ static inline int of_get_available_child_count(const struct device_node *np) = { .compatible = compat, \ .data = (fn == (fn_type)NULL) ? fn : fn } #else -#define _OF_DECLARE(table, name, compat, fn, fn_type) \ +#define _OF_DECLARE(table, name, compat, fn, fn_type) \ static const struct of_device_id __of_table_##name \ __attribute__((unused)) \ = { .compatible = compat, \ @@ -879,7 +928,19 @@ struct of_changeset { struct list_head entries; }; +enum of_reconfig_change { + OF_RECONFIG_NO_CHANGE = 0, + OF_RECONFIG_CHANGE_ADD, + OF_RECONFIG_CHANGE_REMOVE, +}; + #ifdef CONFIG_OF_DYNAMIC +extern int of_reconfig_notifier_register(struct notifier_block *); +extern int of_reconfig_notifier_unregister(struct notifier_block *); +extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd); +extern int of_reconfig_get_state_change(unsigned long action, + struct of_reconfig_data *arg); + extern void of_changeset_init(struct of_changeset *ocs); extern void of_changeset_destroy(struct of_changeset *ocs); extern int of_changeset_apply(struct of_changeset *ocs); @@ -917,9 +978,69 @@ static inline int of_changeset_update_property(struct of_changeset *ocs, { return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop); } -#endif +#else /* CONFIG_OF_DYNAMIC */ +static inline int of_reconfig_notifier_register(struct notifier_block *nb) +{ + return -EINVAL; +} +static inline int of_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return -EINVAL; +} +static inline int of_reconfig_notify(unsigned long action, + struct of_reconfig_data *arg) +{ + return -EINVAL; +} +static inline int of_reconfig_get_state_change(unsigned long action, + struct of_reconfig_data *arg) +{ + return -EINVAL; +} +#endif /* CONFIG_OF_DYNAMIC */ /* CONFIG_OF_RESOLVE api */ extern int of_resolve_phandles(struct device_node *tree); +/** + * of_device_is_system_power_controller - Tells if system-power-controller is found for device_node + * @np: Pointer to the given device_node + * + * return true if present false otherwise + */ +static inline bool of_device_is_system_power_controller(const struct device_node *np) +{ + return of_property_read_bool(np, "system-power-controller"); +} + +/** + * Overlay support + */ + +#ifdef CONFIG_OF_OVERLAY + +/* ID based overlays; the API for external users */ +int of_overlay_create(struct device_node *tree); +int of_overlay_destroy(int id); +int of_overlay_destroy_all(void); + +#else + +static inline int of_overlay_create(struct device_node *tree) +{ + return -ENOTSUPP; +} + +static inline int of_overlay_destroy(int id) +{ + return -ENOTSUPP; +} + +static inline int of_overlay_destroy_all(void) +{ + return -ENOTSUPP; +} + +#endif + #endif /* _LINUX_OF_H */ diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 8cb14eb393d6..d88e81be6368 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -106,7 +106,7 @@ extern int of_address_to_resource(struct device_node *dev, int index, struct resource *r); void __iomem *of_iomap(struct device_node *node, int index); void __iomem *of_io_request_and_map(struct device_node *device, - int index, char *name); + int index, const char *name); #else #include <linux/io.h> @@ -123,7 +123,7 @@ static inline void __iomem *of_iomap(struct device_node *device, int index) } static inline void __iomem *of_io_request_and_map(struct device_node *device, - int index, char *name) + int index, const char *name) { return IOMEM_ERR_PTR(-EINVAL); } diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index 1fd207e7a847..ce0e5abeb454 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -59,13 +59,13 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, #endif #if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) -int of_pci_msi_chip_add(struct msi_chip *chip); -void of_pci_msi_chip_remove(struct msi_chip *chip); -struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node); +int of_pci_msi_chip_add(struct msi_controller *chip); +void of_pci_msi_chip_remove(struct msi_controller *chip); +struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node); #else -static inline int of_pci_msi_chip_add(struct msi_chip *chip) { return -EINVAL; } -static inline void of_pci_msi_chip_remove(struct msi_chip *chip) { } -static inline struct msi_chip * +static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; } +static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { } +static inline struct msi_controller * of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; } #endif diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h index c65a18a0cfdf..7e09244bb679 100644 --- a/include/linux/of_pdt.h +++ b/include/linux/of_pdt.h @@ -39,7 +39,6 @@ extern void *prom_early_alloc(unsigned long size); /* for building the device tree */ extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); -extern void (*of_pdt_build_more)(struct device_node *dp, - struct device_node ***nextp); +extern void (*of_pdt_build_more)(struct device_node *dp); #endif /* _LINUX_OF_PDT_H */ diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index c2b0627a2317..8a860f096c35 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -84,4 +84,10 @@ static inline int of_platform_populate(struct device_node *root, static inline void of_platform_depopulate(struct device *parent) { } #endif +#ifdef CONFIG_OF_DYNAMIC +extern void of_platform_register_reconfig_notifier(void); +#else +static inline void of_platform_register_reconfig_notifier(void) { } +#endif + #endif /* _LINUX_OF_PLATFORM_H */ diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h new file mode 100644 index 000000000000..c2080eebbb47 --- /dev/null +++ b/include/linux/omap-gpmc.h @@ -0,0 +1,199 @@ +/* + * OMAP GPMC (General Purpose Memory Controller) defines + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/* Maximum Number of Chip Selects */ +#define GPMC_CS_NUM 8 + +#define GPMC_CONFIG_WP 0x00000005 + +#define GPMC_IRQ_FIFOEVENTENABLE 0x01 +#define GPMC_IRQ_COUNT_EVENT 0x02 + +#define GPMC_BURST_4 4 /* 4 word burst */ +#define GPMC_BURST_8 8 /* 8 word burst */ +#define GPMC_BURST_16 16 /* 16 word burst */ +#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */ +#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */ +#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */ +#define GPMC_MUX_AD 2 /* Addr-Data multiplex */ + +/* bool type time settings */ +struct gpmc_bool_timings { + bool cycle2cyclediffcsen; + bool cycle2cyclesamecsen; + bool we_extra_delay; + bool oe_extra_delay; + bool adv_extra_delay; + bool cs_extra_delay; + bool time_para_granularity; +}; + +/* + * Note that all values in this struct are in nanoseconds except sync_clk + * (which is in picoseconds), while the register values are in gpmc_fck cycles. + */ +struct gpmc_timings { + /* Minimum clock period for synchronous mode (in picoseconds) */ + u32 sync_clk; + + /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */ + u32 cs_on; /* Assertion time */ + u32 cs_rd_off; /* Read deassertion time */ + u32 cs_wr_off; /* Write deassertion time */ + + /* ADV signal timings corresponding to GPMC_CONFIG3 */ + u32 adv_on; /* Assertion time */ + u32 adv_rd_off; /* Read deassertion time */ + u32 adv_wr_off; /* Write deassertion time */ + + /* WE signals timings corresponding to GPMC_CONFIG4 */ + u32 we_on; /* WE assertion time */ + u32 we_off; /* WE deassertion time */ + + /* OE signals timings corresponding to GPMC_CONFIG4 */ + u32 oe_on; /* OE assertion time */ + u32 oe_off; /* OE deassertion time */ + + /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */ + u32 page_burst_access; /* Multiple access word delay */ + u32 access; /* Start-cycle to first data valid delay */ + u32 rd_cycle; /* Total read cycle time */ + u32 wr_cycle; /* Total write cycle time */ + + u32 bus_turnaround; + u32 cycle2cycle_delay; + + u32 wait_monitoring; + u32 clk_activation; + + /* The following are only on OMAP3430 */ + u32 wr_access; /* WRACCESSTIME */ + u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */ + + struct gpmc_bool_timings bool_timings; +}; + +/* Device timings in picoseconds */ +struct gpmc_device_timings { + u32 t_ceasu; /* address setup to CS valid */ + u32 t_avdasu; /* address setup to ADV valid */ + /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is + * of tusb using these timings even for sync whilst + * ideally for adv_rd/(wr)_off it should have considered + * t_avdh instead. This indirectly necessitates r/w + * variations of t_avdp as it is possible to have one + * sync & other async + */ + u32 t_avdp_r; /* ADV low time (what about t_cer ?) */ + u32 t_avdp_w; + u32 t_aavdh; /* address hold time */ + u32 t_oeasu; /* address setup to OE valid */ + u32 t_aa; /* access time from ADV assertion */ + u32 t_iaa; /* initial access time */ + u32 t_oe; /* access time from OE assertion */ + u32 t_ce; /* access time from CS asertion */ + u32 t_rd_cycle; /* read cycle time */ + u32 t_cez_r; /* read CS deassertion to high Z */ + u32 t_cez_w; /* write CS deassertion to high Z */ + u32 t_oez; /* OE deassertion to high Z */ + u32 t_weasu; /* address setup to WE valid */ + u32 t_wpl; /* write assertion time */ + u32 t_wph; /* write deassertion time */ + u32 t_wr_cycle; /* write cycle time */ + + u32 clk; + u32 t_bacc; /* burst access valid clock to output delay */ + u32 t_ces; /* CS setup time to clk */ + u32 t_avds; /* ADV setup time to clk */ + u32 t_avdh; /* ADV hold time from clk */ + u32 t_ach; /* address hold time from clk */ + u32 t_rdyo; /* clk to ready valid */ + + u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */ + u32 t_ce_avd; /* CS on to ADV on delay */ + + /* XXX: check the possibility of combining + * cyc_aavhd_oe & cyc_aavdh_we + */ + u8 cyc_aavdh_oe;/* read address hold time in cycles */ + u8 cyc_aavdh_we;/* write address hold time in cycles */ + u8 cyc_oe; /* access time from OE assertion in cycles */ + u8 cyc_wpl; /* write deassertion time in cycles */ + u32 cyc_iaa; /* initial access time in cycles */ + + /* extra delays */ + bool ce_xdelay; + bool avd_xdelay; + bool oe_xdelay; + bool we_xdelay; +}; + +struct gpmc_settings { + bool burst_wrap; /* enables wrap bursting */ + bool burst_read; /* enables read page/burst mode */ + bool burst_write; /* enables write page/burst mode */ + bool device_nand; /* device is NAND */ + bool sync_read; /* enables synchronous reads */ + bool sync_write; /* enables synchronous writes */ + bool wait_on_read; /* monitor wait on reads */ + bool wait_on_write; /* monitor wait on writes */ + u32 burst_len; /* page/burst length */ + u32 device_width; /* device bus width (8 or 16 bit) */ + u32 mux_add_data; /* multiplex address & data */ + u32 wait_pin; /* wait-pin to be used */ +}; + +extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, + struct gpmc_settings *gpmc_s, + struct gpmc_device_timings *dev_t); + +struct gpmc_nand_regs; +struct device_node; + +extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs); +extern int gpmc_get_client_irq(unsigned irq_config); + +extern unsigned int gpmc_ticks_to_ns(unsigned int ticks); + +extern void gpmc_cs_write_reg(int cs, int idx, u32 val); +extern int gpmc_calc_divider(unsigned int sync_clk); +extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t); +extern int gpmc_cs_program_settings(int cs, struct gpmc_settings *p); +extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base); +extern void gpmc_cs_free(int cs); +extern int gpmc_configure(int cmd, int wval); +extern void gpmc_read_settings_dt(struct device_node *np, + struct gpmc_settings *p); + +extern void omap3_gpmc_save_context(void); +extern void omap3_gpmc_restore_context(void); + +struct gpmc_timings; +struct omap_nand_platform_data; +struct omap_onenand_platform_data; + +#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2) +extern int gpmc_nand_init(struct omap_nand_platform_data *d, + struct gpmc_timings *gpmc_t); +#else +static inline int gpmc_nand_init(struct omap_nand_platform_data *d, + struct gpmc_timings *gpmc_t) +{ + return 0; +} +#endif + +#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) +extern void gpmc_onenand_init(struct omap_onenand_platform_data *d); +#else +#define board_onenand_data NULL +static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d) +{ +} +#endif diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h index f8322d9cd235..587bbdd31f5a 100644 --- a/include/linux/omap-mailbox.h +++ b/include/linux/omap-mailbox.h @@ -10,20 +10,20 @@ #define OMAP_MAILBOX_H typedef u32 mbox_msg_t; -struct omap_mbox; typedef int __bitwise omap_mbox_irq_t; #define IRQ_TX ((__force omap_mbox_irq_t) 1) #define IRQ_RX ((__force omap_mbox_irq_t) 2) -int omap_mbox_msg_send(struct omap_mbox *, mbox_msg_t msg); +struct mbox_chan; +struct mbox_client; -struct omap_mbox *omap_mbox_get(const char *, struct notifier_block *nb); -void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb); +struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl, + const char *chan_name); -void omap_mbox_save_ctx(struct omap_mbox *mbox); -void omap_mbox_restore_ctx(struct omap_mbox *mbox); -void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq); -void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq); +void omap_mbox_save_ctx(struct mbox_chan *chan); +void omap_mbox_restore_ctx(struct mbox_chan *chan); +void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); +void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq); #endif /* OMAP_MAILBOX_H */ diff --git a/include/linux/oom.h b/include/linux/oom.h index e8d6e1058723..853698c721f7 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -92,6 +92,17 @@ static inline bool oom_gfp_allowed(gfp_t gfp_mask) extern struct task_struct *find_lock_task_mm(struct task_struct *p); +static inline bool task_will_free_mem(struct task_struct *task) +{ + /* + * A coredumping process may sleep for an extended period in exit_mm(), + * so the oom killer cannot assume that the process will promptly exit + * and release memory. + */ + return (task->flags & PF_EXITING) && + !(task->signal->flags & SIGNAL_GROUP_COREDUMP); +} + /* sysctls */ extern int sysctl_oom_dump_tasks; extern int sysctl_oom_kill_allocating_task; diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h deleted file mode 100644 index 22691f614043..000000000000 --- a/include/linux/page-debug-flags.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef LINUX_PAGE_DEBUG_FLAGS_H -#define LINUX_PAGE_DEBUG_FLAGS_H - -/* - * page->debug_flags bits: - * - * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to - * implement generic debug pagealloc feature. The pages are filled with - * poison patterns and set this flag after free_pages(). The poisoned - * pages are verified whether the patterns are not corrupted and clear - * the flag before alloc_pages(). - */ - -enum page_debug_flags { - PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */ - PAGE_DEBUG_FLAG_GUARD, -}; - -/* - * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably - * gets turned off when no debug features are enabling it! - */ - -#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS -#if !defined(CONFIG_PAGE_POISONING) && \ - !defined(CONFIG_PAGE_GUARD) \ -/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */ -#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features! -#endif -#endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */ - -#endif /* LINUX_PAGE_DEBUG_FLAGS_H */ diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 3fff8e774067..2dc1e1697b45 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -2,6 +2,10 @@ #define __LINUX_PAGEISOLATION_H #ifdef CONFIG_MEMORY_ISOLATION +static inline bool has_isolate_pageblock(struct zone *zone) +{ + return zone->nr_isolate_pageblock; +} static inline bool is_migrate_isolate_page(struct page *page) { return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; @@ -11,6 +15,10 @@ static inline bool is_migrate_isolate(int migratetype) return migratetype == MIGRATE_ISOLATE; } #else +static inline bool has_isolate_pageblock(struct zone *zone) +{ + return false; +} static inline bool is_migrate_isolate_page(struct page *page) { return false; diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h deleted file mode 100644 index 5c831f1eca79..000000000000 --- a/include/linux/page_cgroup.h +++ /dev/null @@ -1,105 +0,0 @@ -#ifndef __LINUX_PAGE_CGROUP_H -#define __LINUX_PAGE_CGROUP_H - -enum { - /* flags for mem_cgroup */ - PCG_USED = 0x01, /* This page is charged to a memcg */ - PCG_MEM = 0x02, /* This page holds a memory charge */ - PCG_MEMSW = 0x04, /* This page holds a memory+swap charge */ -}; - -struct pglist_data; - -#ifdef CONFIG_MEMCG -struct mem_cgroup; - -/* - * Page Cgroup can be considered as an extended mem_map. - * A page_cgroup page is associated with every page descriptor. The - * page_cgroup helps us identify information about the cgroup - * All page cgroups are allocated at boot or memory hotplug event, - * then the page cgroup for pfn always exists. - */ -struct page_cgroup { - unsigned long flags; - struct mem_cgroup *mem_cgroup; -}; - -extern void pgdat_page_cgroup_init(struct pglist_data *pgdat); - -#ifdef CONFIG_SPARSEMEM -static inline void page_cgroup_init_flatmem(void) -{ -} -extern void page_cgroup_init(void); -#else -extern void page_cgroup_init_flatmem(void); -static inline void page_cgroup_init(void) -{ -} -#endif - -struct page_cgroup *lookup_page_cgroup(struct page *page); - -static inline int PageCgroupUsed(struct page_cgroup *pc) -{ - return !!(pc->flags & PCG_USED); -} -#else /* !CONFIG_MEMCG */ -struct page_cgroup; - -static inline void pgdat_page_cgroup_init(struct pglist_data *pgdat) -{ -} - -static inline struct page_cgroup *lookup_page_cgroup(struct page *page) -{ - return NULL; -} - -static inline void page_cgroup_init(void) -{ -} - -static inline void page_cgroup_init_flatmem(void) -{ -} -#endif /* CONFIG_MEMCG */ - -#include <linux/swap.h> - -#ifdef CONFIG_MEMCG_SWAP -extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, - unsigned short old, unsigned short new); -extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); -extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); -extern int swap_cgroup_swapon(int type, unsigned long max_pages); -extern void swap_cgroup_swapoff(int type); -#else - -static inline -unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) -{ - return 0; -} - -static inline -unsigned short lookup_swap_cgroup_id(swp_entry_t ent) -{ - return 0; -} - -static inline int -swap_cgroup_swapon(int type, unsigned long max_pages) -{ - return 0; -} - -static inline void swap_cgroup_swapoff(int type) -{ - return; -} - -#endif /* CONFIG_MEMCG_SWAP */ - -#endif /* __LINUX_PAGE_CGROUP_H */ diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h new file mode 100644 index 000000000000..955421575d16 --- /dev/null +++ b/include/linux/page_counter.h @@ -0,0 +1,51 @@ +#ifndef _LINUX_PAGE_COUNTER_H +#define _LINUX_PAGE_COUNTER_H + +#include <linux/atomic.h> +#include <linux/kernel.h> +#include <asm/page.h> + +struct page_counter { + atomic_long_t count; + unsigned long limit; + struct page_counter *parent; + + /* legacy */ + unsigned long watermark; + unsigned long failcnt; +}; + +#if BITS_PER_LONG == 32 +#define PAGE_COUNTER_MAX LONG_MAX +#else +#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE) +#endif + +static inline void page_counter_init(struct page_counter *counter, + struct page_counter *parent) +{ + atomic_long_set(&counter->count, 0); + counter->limit = PAGE_COUNTER_MAX; + counter->parent = parent; +} + +static inline unsigned long page_counter_read(struct page_counter *counter) +{ + return atomic_long_read(&counter->count); +} + +void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); +void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); +int page_counter_try_charge(struct page_counter *counter, + unsigned long nr_pages, + struct page_counter **fail); +void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); +int page_counter_limit(struct page_counter *counter, unsigned long limit); +int page_counter_memparse(const char *buf, unsigned long *nr_pages); + +static inline void page_counter_reset_watermark(struct page_counter *counter) +{ + counter->watermark = page_counter_read(counter); +} + +#endif /* _LINUX_PAGE_COUNTER_H */ diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h new file mode 100644 index 000000000000..d2a2c84c72d0 --- /dev/null +++ b/include/linux/page_ext.h @@ -0,0 +1,84 @@ +#ifndef __LINUX_PAGE_EXT_H +#define __LINUX_PAGE_EXT_H + +#include <linux/types.h> +#include <linux/stacktrace.h> + +struct pglist_data; +struct page_ext_operations { + bool (*need)(void); + void (*init)(void); +}; + +#ifdef CONFIG_PAGE_EXTENSION + +/* + * page_ext->flags bits: + * + * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to + * implement generic debug pagealloc feature. The pages are filled with + * poison patterns and set this flag after free_pages(). The poisoned + * pages are verified whether the patterns are not corrupted and clear + * the flag before alloc_pages(). + */ + +enum page_ext_flags { + PAGE_EXT_DEBUG_POISON, /* Page is poisoned */ + PAGE_EXT_DEBUG_GUARD, + PAGE_EXT_OWNER, +}; + +/* + * Page Extension can be considered as an extended mem_map. + * A page_ext page is associated with every page descriptor. The + * page_ext helps us add more information about the page. + * All page_ext are allocated at boot or memory hotplug event, + * then the page_ext for pfn always exists. + */ +struct page_ext { + unsigned long flags; +#ifdef CONFIG_PAGE_OWNER + unsigned int order; + gfp_t gfp_mask; + struct stack_trace trace; + unsigned long trace_entries[8]; +#endif +}; + +extern void pgdat_page_ext_init(struct pglist_data *pgdat); + +#ifdef CONFIG_SPARSEMEM +static inline void page_ext_init_flatmem(void) +{ +} +extern void page_ext_init(void); +#else +extern void page_ext_init_flatmem(void); +static inline void page_ext_init(void) +{ +} +#endif + +struct page_ext *lookup_page_ext(struct page *page); + +#else /* !CONFIG_PAGE_EXTENSION */ +struct page_ext; + +static inline void pgdat_page_ext_init(struct pglist_data *pgdat) +{ +} + +static inline struct page_ext *lookup_page_ext(struct page *page) +{ + return NULL; +} + +static inline void page_ext_init(void) +{ +} + +static inline void page_ext_init_flatmem(void) +{ +} +#endif /* CONFIG_PAGE_EXTENSION */ +#endif /* __LINUX_PAGE_EXT_H */ diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h new file mode 100644 index 000000000000..b48c3471c254 --- /dev/null +++ b/include/linux/page_owner.h @@ -0,0 +1,38 @@ +#ifndef __LINUX_PAGE_OWNER_H +#define __LINUX_PAGE_OWNER_H + +#ifdef CONFIG_PAGE_OWNER +extern bool page_owner_inited; +extern struct page_ext_operations page_owner_ops; + +extern void __reset_page_owner(struct page *page, unsigned int order); +extern void __set_page_owner(struct page *page, + unsigned int order, gfp_t gfp_mask); + +static inline void reset_page_owner(struct page *page, unsigned int order) +{ + if (likely(!page_owner_inited)) + return; + + __reset_page_owner(page, order); +} + +static inline void set_page_owner(struct page *page, + unsigned int order, gfp_t gfp_mask) +{ + if (likely(!page_owner_inited)) + return; + + __set_page_owner(page, order, gfp_mask); +} +#else +static inline void reset_page_owner(struct page *page, unsigned int order) +{ +} +static inline void set_page_owner(struct page *page, + unsigned int order, gfp_t gfp_mask) +{ +} + +#endif /* CONFIG_PAGE_OWNER */ +#endif /* __LINUX_PAGE_OWNER_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 5be8db45e368..44a27696ab6c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -331,6 +331,7 @@ struct pci_dev { unsigned int is_added:1; unsigned int is_busmaster:1; /* device is busmaster */ unsigned int no_msi:1; /* device may not use msi */ + unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */ unsigned int block_cfg_access:1; /* config space access is blocked */ unsigned int broken_parity_status:1; /* Device generates false positive parity */ unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ @@ -449,7 +450,7 @@ struct pci_bus { struct resource busn_res; /* bus numbers routed to this bus */ struct pci_ops *ops; /* configuration access functions */ - struct msi_chip *msi; /* MSI controller */ + struct msi_controller *msi; /* MSI controller */ void *sysdata; /* hook for sys-specific extension */ struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ @@ -1003,6 +1004,8 @@ void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); int pci_save_state(struct pci_dev *dev); void pci_restore_state(struct pci_dev *dev); struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); +int pci_load_saved_state(struct pci_dev *dev, + struct pci_saved_state *state); int pci_load_and_free_saved_state(struct pci_dev *dev, struct pci_saved_state **state); struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 2706ee9a4327..8c7895061121 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -109,7 +109,6 @@ struct hotplug_slot { struct list_head slot_list; struct pci_slot *pci_slot; }; -#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj) static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 1fa99a301817..97fb9f69aaed 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -522,6 +522,8 @@ #define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403 #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d #define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e +#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F3 0x1573 +#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F4 0x1574 #define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 420032d41d27..57f3a1c550dc 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -254,8 +254,6 @@ do { \ #endif /* CONFIG_SMP */ #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) -#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) -#define __get_cpu_var(var) (*this_cpu_ptr(&(var))) /* * Must be an lvalue. Since @var must be a simple identifier, diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index d5c89e0dd0e6..b4337646388b 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -128,12 +128,16 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) static inline bool __ref_is_percpu(struct percpu_ref *ref, unsigned long __percpu **percpu_countp) { - unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr); - /* paired with smp_store_release() in percpu_ref_reinit() */ - smp_read_barrier_depends(); + unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr); - if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) + /* + * Theoretically, the following could test just ATOMIC; however, + * then we'd have to mask off DEAD separately as DEAD may be + * visible without ATOMIC if we race with percpu_ref_kill(). DEAD + * implies ATOMIC anyway. Test them together. + */ + if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD)) return false; *percpu_countp = (unsigned long __percpu *)percpu_ptr; @@ -141,28 +145,42 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, } /** - * percpu_ref_get - increment a percpu refcount + * percpu_ref_get_many - increment a percpu refcount * @ref: percpu_ref to get + * @nr: number of references to get * - * Analagous to atomic_long_inc(). + * Analogous to atomic_long_add(). * * This function is safe to call as long as @ref is between init and exit. */ -static inline void percpu_ref_get(struct percpu_ref *ref) +static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) - this_cpu_inc(*percpu_count); + this_cpu_add(*percpu_count, nr); else - atomic_long_inc(&ref->count); + atomic_long_add(nr, &ref->count); rcu_read_unlock_sched(); } /** + * percpu_ref_get - increment a percpu refcount + * @ref: percpu_ref to get + * + * Analagous to atomic_long_inc(). + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_get(struct percpu_ref *ref) +{ + percpu_ref_get_many(ref, 1); +} + +/** * percpu_ref_tryget - try to increment a percpu refcount * @ref: percpu_ref to try-get * @@ -225,29 +243,44 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) } /** - * percpu_ref_put - decrement a percpu refcount + * percpu_ref_put_many - decrement a percpu refcount * @ref: percpu_ref to put + * @nr: number of references to put * * Decrement the refcount, and if 0, call the release function (which was passed * to percpu_ref_init()) * * This function is safe to call as long as @ref is between init and exit. */ -static inline void percpu_ref_put(struct percpu_ref *ref) +static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) - this_cpu_dec(*percpu_count); - else if (unlikely(atomic_long_dec_and_test(&ref->count))) + this_cpu_sub(*percpu_count, nr); + else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) ref->release(ref); rcu_read_unlock_sched(); } /** + * percpu_ref_put - decrement a percpu refcount + * @ref: percpu_ref to put + * + * Decrement the refcount, and if 0, call the release function (which was passed + * to percpu_ref_init()) + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_put(struct percpu_ref *ref) +{ + percpu_ref_put_many(ref, 1); +} + +/** * percpu_ref_is_zero - test whether a percpu refcount reached zero * @ref: percpu_ref to test * diff --git a/include/linux/percpu.h b/include/linux/percpu.h index a3aa63e47637..caebf2a758dc 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -5,6 +5,7 @@ #include <linux/preempt.h> #include <linux/smp.h> #include <linux/cpumask.h> +#include <linux/printk.h> #include <linux/pfn.h> #include <linux/init.h> @@ -134,4 +135,7 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ __alignof__(type)) +/* To avoid include hell, as printk can not declare this, we declare it here */ +DECLARE_PER_CPU(printk_func_t, printk_func); + #endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 893a0d07986f..486e84ccb1f9 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -79,7 +79,7 @@ struct perf_branch_stack { struct perf_branch_entry entries[0]; }; -struct perf_regs_user { +struct perf_regs { __u64 abi; struct pt_regs *regs; }; @@ -580,34 +580,40 @@ extern u64 perf_event_read_value(struct perf_event *event, struct perf_sample_data { - u64 type; + /* + * Fields set by perf_sample_data_init(), group so as to + * minimize the cachelines touched. + */ + u64 addr; + struct perf_raw_record *raw; + struct perf_branch_stack *br_stack; + u64 period; + u64 weight; + u64 txn; + union perf_mem_data_src data_src; + /* + * The other fields, optionally {set,used} by + * perf_{prepare,output}_sample(). + */ + u64 type; u64 ip; struct { u32 pid; u32 tid; } tid_entry; u64 time; - u64 addr; u64 id; u64 stream_id; struct { u32 cpu; u32 reserved; } cpu_entry; - u64 period; - union perf_mem_data_src data_src; struct perf_callchain_entry *callchain; - struct perf_raw_record *raw; - struct perf_branch_stack *br_stack; - struct perf_regs_user regs_user; + struct perf_regs regs_user; + struct perf_regs regs_intr; u64 stack_user_size; - u64 weight; - /* - * Transaction flags for abort events: - */ - u64 txn; -}; +} ____cacheline_aligned; /* default value for data source */ #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ @@ -624,9 +630,6 @@ static inline void perf_sample_data_init(struct perf_sample_data *data, data->raw = NULL; data->br_stack = NULL; data->period = period; - data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE; - data->regs_user.regs = NULL; - data->stack_user_size = 0; data->weight = 0; data->data_src.val = PERF_MEM_NA; data->txn = 0; diff --git a/include/linux/phy.h b/include/linux/phy.h index d090cfcaa167..22af8f8f5802 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -433,6 +433,7 @@ struct phy_device { * by this PHY * flags: A bitfield defining certain other features this PHY * supports (like interrupts) + * driver_data: static driver data * * The drivers must implement config_aneg and read_status. All * other functions are optional. Note that none of these @@ -448,6 +449,7 @@ struct phy_driver { unsigned int phy_id_mask; u32 features; u32 flags; + const void *driver_data; /* * Called to issue a PHY software reset @@ -772,4 +774,28 @@ int __init mdio_bus_init(void); void mdio_bus_exit(void); extern struct bus_type mdio_bus_type; + +/** + * module_phy_driver() - Helper macro for registering PHY drivers + * @__phy_drivers: array of PHY drivers to register + * + * Helper macro for PHY drivers which do not do anything special in module + * init/exit. Each module may only use this macro once, and calling it + * replaces module_init() and module_exit(). + */ +#define phy_module_driver(__phy_drivers, __count) \ +static int __init phy_module_init(void) \ +{ \ + return phy_drivers_register(__phy_drivers, __count); \ +} \ +module_init(phy_module_init); \ +static void __exit phy_module_exit(void) \ +{ \ + phy_drivers_unregister(__phy_drivers, __count); \ +} \ +module_exit(phy_module_exit) + +#define module_phy_driver(__phy_drivers) \ + phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers)) + #endif /* __PHY_H */ diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h index a6591c693ebb..5e0bc779e6c5 100644 --- a/include/linux/platform_data/asoc-s3c.h +++ b/include/linux/platform_data/asoc-s3c.h @@ -27,6 +27,7 @@ struct samsung_i2s { #define QUIRK_NO_MUXPSR (1 << 2) #define QUIRK_NEED_RSTCLR (1 << 3) #define QUIRK_SUPPORTS_TDM (1 << 4) +#define QUIRK_SUPPORTS_IDMA (1 << 5) /* Quirks of the I2S controller */ u32 quirks; dma_addr_t idma_addr; diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h new file mode 100644 index 000000000000..26af54321958 --- /dev/null +++ b/include/linux/platform_data/bcmgenet.h @@ -0,0 +1,18 @@ +#ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__ +#define __LINUX_PLATFORM_DATA_BCMGENET_H__ + +#include <linux/types.h> +#include <linux/if_ether.h> +#include <linux/phy.h> + +struct bcmgenet_platform_data { + bool mdio_enabled; + phy_interface_t phy_interface; + int phy_address; + int phy_speed; + int phy_duplex; + u8 mac_address[ETH_ALEN]; + int genet_version; +}; + +#endif diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index 6a1357d31871..7d964e787299 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h @@ -41,6 +41,7 @@ enum sdma_peripheral_type { IMX_DMATYPE_ESAI, /* ESAI */ IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ + IMX_DMATYPE_SAI, /* SAI */ }; enum imx_dma_prio { diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h new file mode 100644 index 000000000000..67bbcf0785f6 --- /dev/null +++ b/include/linux/platform_data/hsmmc-omap.h @@ -0,0 +1,90 @@ +/* + * MMC definitions for OMAP2 + * + * Copyright (C) 2006 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * struct omap_hsmmc_dev_attr.flags possibilities + * + * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can + * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag + * should be set if this is the case. See for example Section 22.5.3 + * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia + * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R). + * + * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers + * don't work correctly on some MMC controller instances on some + * OMAP3 SoCs; this flag should be set if this is the case. See + * for example Advisory 2.1.1.128 "MMC: Multiple Block Read + * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_ + * Revision F (October 2010) (SPRZ278F). + */ +#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0) +#define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1) +#define OMAP_HSMMC_SWAKEUP_MISSING BIT(2) + +struct omap_hsmmc_dev_attr { + u8 flags; +}; + +struct mmc_card; + +struct omap_hsmmc_platform_data { + /* back-link to device */ + struct device *dev; + + /* set if your board has components or wiring that limits the + * maximum frequency on the MMC bus */ + unsigned int max_freq; + + /* Integrating attributes from the omap_hwmod layer */ + u8 controller_flags; + + /* Register offset deviation */ + u16 reg_offset; + + /* + * 4/8 wires and any additional host capabilities + * need to OR'd all capabilities (ref. linux/mmc/host.h) + */ + u32 caps; /* Used for the MMC driver on 2430 and later */ + u32 pm_caps; /* PM capabilities of the mmc */ + + /* switch pin can be for card detect (default) or card cover */ + unsigned cover:1; + + /* use the internal clock */ + unsigned internal_clock:1; + + /* nonremovable e.g. eMMC */ + unsigned nonremovable:1; + + /* eMMC does not handle power off when not in sleep state */ + unsigned no_regulator_off_init:1; + + /* we can put the features above into this variable */ +#define HSMMC_HAS_PBIAS (1 << 0) +#define HSMMC_HAS_UPDATED_RESET (1 << 1) +#define HSMMC_HAS_HSPE_SUPPORT (1 << 2) + unsigned features; + + int switch_pin; /* gpio (card detect) */ + int gpio_wp; /* gpio (write protect) */ + + int (*set_power)(struct device *dev, int power_on, int vdd); + void (*remux)(struct device *dev, int power_on); + /* Call back before enabling / disabling regulators */ + void (*before_set_reg)(struct device *dev, int power_on, int vdd); + /* Call back after enabling / disabling regulators */ + void (*after_set_reg)(struct device *dev, int power_on, int vdd); + /* if we have special card, init it using this callback */ + void (*init_card)(struct mmc_card *card); + + const char *name; + u32 ocr_mask; +}; diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h index 1b2ba24e4e03..9c7fd1efe495 100644 --- a/include/linux/platform_data/lp855x.h +++ b/include/linux/platform_data/lp855x.h @@ -136,6 +136,7 @@ struct lp855x_rom_data { Only valid when mode is PWM_BASED. * @size_program : total size of lp855x_rom_data * @rom_data : list of new eeprom/eprom registers + * @supply : regulator that supplies 3V input */ struct lp855x_platform_data { const char *name; @@ -144,6 +145,7 @@ struct lp855x_platform_data { unsigned int period_ns; int size_program; struct lp855x_rom_data *rom_data; + struct regulator *supply; }; #endif diff --git a/include/linux/platform_data/mmc-atmel-mci.h b/include/linux/platform_data/mmc-atmel-mci.h new file mode 100644 index 000000000000..399a2d5a14bd --- /dev/null +++ b/include/linux/platform_data/mmc-atmel-mci.h @@ -0,0 +1,22 @@ +#ifndef __MMC_ATMEL_MCI_H +#define __MMC_ATMEL_MCI_H + +#include <linux/platform_data/dma-atmel.h> +#include <linux/platform_data/dma-dw.h> + +/** + * struct mci_dma_data - DMA data for MCI interface + */ +struct mci_dma_data { +#ifdef CONFIG_ARM + struct at_dma_slave sdata; +#else + struct dw_dma_slave sdata; +#endif +}; + +/* accessor macros */ +#define slave_data_ptr(s) (&(s)->sdata) +#define find_slave_dev(s) ((s)->sdata.dma_dev) + +#endif /* __MMC_ATMEL_MCI_H */ diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h index 51e70cf25cbc..5c188f4e9bec 100644 --- a/include/linux/platform_data/mmc-omap.h +++ b/include/linux/platform_data/mmc-omap.h @@ -10,32 +10,8 @@ #define OMAP_MMC_MAX_SLOTS 2 -/* - * struct omap_mmc_dev_attr.flags possibilities - * - * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can - * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag - * should be set if this is the case. See for example Section 22.5.3 - * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia - * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R). - * - * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers - * don't work correctly on some MMC controller instances on some - * OMAP3 SoCs; this flag should be set if this is the case. See - * for example Advisory 2.1.1.128 "MMC: Multiple Block Read - * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_ - * Revision F (October 2010) (SPRZ278F). - */ -#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0) -#define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1) -#define OMAP_HSMMC_SWAKEUP_MISSING BIT(2) - struct mmc_card; -struct omap_mmc_dev_attr { - u8 flags; -}; - struct omap_mmc_platform_data { /* back-link to device */ struct device *dev; @@ -106,9 +82,6 @@ struct omap_mmc_platform_data { unsigned vcc_aux_disable_is_sleep:1; /* we can put the features above into this variable */ -#define HSMMC_HAS_PBIAS (1 << 0) -#define HSMMC_HAS_UPDATED_RESET (1 << 1) -#define HSMMC_HAS_HSPE_SUPPORT (1 << 2) #define MMC_OMAP7XX (1 << 3) #define MMC_OMAP15XX (1 << 4) #define MMC_OMAP16XX (1 << 5) diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h index 27d3156d093a..9e20c2fb4ffd 100644 --- a/include/linux/platform_data/pxa_sdhci.h +++ b/include/linux/platform_data/pxa_sdhci.h @@ -55,9 +55,4 @@ struct sdhci_pxa_platdata { unsigned int quirks2; unsigned int pm_caps; }; - -struct sdhci_pxa { - u8 clk_enable; - u8 power_mode; -}; #endif /* _PXA_SDHCI_H_ */ diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h index c860c1b314c0..d09275f3cde3 100644 --- a/include/linux/platform_data/serial-omap.h +++ b/include/linux/platform_data/serial-omap.h @@ -38,9 +38,6 @@ struct omap_uart_port_info { unsigned int dma_rx_timeout; unsigned int autosuspend_timeout; unsigned int dma_rx_poll_rate; - int DTR_gpio; - int DTR_inverted; - int DTR_present; int (*get_context_loss_count)(struct device *); void (*enable_wakeup)(struct device *, bool); diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h index 1730312398ff..5087fff96d86 100644 --- a/include/linux/platform_data/st21nfca.h +++ b/include/linux/platform_data/st21nfca.h @@ -24,7 +24,6 @@ #define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" struct st21nfca_nfc_platform_data { - unsigned int gpio_irq; unsigned int gpio_ena; unsigned int irq_polarity; }; diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h index 2d11f1f5efab..c3b432f5b63e 100644 --- a/include/linux/platform_data/st21nfcb.h +++ b/include/linux/platform_data/st21nfcb.h @@ -24,7 +24,6 @@ #define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" struct st21nfcb_nfc_platform_data { - unsigned int gpio_irq; unsigned int gpio_reset; unsigned int irq_polarity; }; diff --git a/include/linux/plist.h b/include/linux/plist.h index 8b6c970cff6c..97883604a3c5 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -176,7 +176,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); * plist_for_each_entry - iterate over list of given type * @pos: the type * to use as a loop counter * @head: the head for your list - * @mem: the name of the list_struct within the struct + * @mem: the name of the list_head within the struct */ #define plist_for_each_entry(pos, head, mem) \ list_for_each_entry(pos, &(head)->node_list, mem.node_list) @@ -185,7 +185,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); * plist_for_each_entry_continue - continue iteration over list of given type * @pos: the type * to use as a loop cursor * @head: the head for your list - * @m: the name of the list_struct within the struct + * @m: the name of the list_head within the struct * * Continue to iterate over list of given type, continuing after * the current position. @@ -198,7 +198,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head); * @pos: the type * to use as a loop counter * @n: another type * to use as temporary storage * @head: the head for your list - * @m: the name of the list_struct within the struct + * @m: the name of the list_head within the struct * * Iterate over list of given type, safe against removal of list entry. */ @@ -229,7 +229,7 @@ static inline int plist_node_empty(const struct plist_node *node) * plist_first_entry - get the struct for the first entry * @head: the &struct plist_head pointer * @type: the type of the struct this is embedded in - * @member: the name of the list_struct within the struct + * @member: the name of the list_head within the struct */ #ifdef CONFIG_DEBUG_PI_LIST # define plist_first_entry(head, type, member) \ @@ -246,7 +246,7 @@ static inline int plist_node_empty(const struct plist_node *node) * plist_last_entry - get the struct for the last entry * @head: the &struct plist_head pointer * @type: the type of the struct this is embedded in - * @member: the name of the list_struct within the struct + * @member: the name of the list_head within the struct */ #ifdef CONFIG_DEBUG_PI_LIST # define plist_last_entry(head, type, member) \ diff --git a/include/linux/pm.h b/include/linux/pm.h index 383fd68aaee1..66a656eb335b 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -342,7 +342,7 @@ struct dev_pm_ops { #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) #endif -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ .runtime_suspend = suspend_fn, \ .runtime_resume = resume_fn, \ @@ -351,14 +351,7 @@ struct dev_pm_ops { #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) #endif -#ifdef CONFIG_PM -#define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ - .runtime_suspend = suspend_fn, \ - .runtime_resume = resume_fn, \ - .runtime_idle = idle_fn, -#else -#define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) -#endif +#define SET_PM_RUNTIME_PM_OPS SET_RUNTIME_PM_OPS /* * Use this if you want to use the same suspend and resume callbacks for suspend @@ -538,11 +531,7 @@ enum rpm_request { }; struct wakeup_source; - -struct pm_domain_data { - struct list_head list_node; - struct device *dev; -}; +struct pm_domain_data; struct pm_subsys_data { spinlock_t lock; @@ -576,7 +565,7 @@ struct dev_pm_info { #else unsigned int should_wakeup:1; #endif -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM struct timer_list suspend_timer; unsigned long timer_expires; struct work_struct work; diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h index 8348866e7b05..0b0039634410 100644 --- a/include/linux/pm_clock.h +++ b/include/linux/pm_clock.h @@ -18,6 +18,8 @@ struct pm_clk_notifier_block { char *con_ids[]; }; +struct clk; + #ifdef CONFIG_PM_CLK static inline bool pm_clk_no_clocks(struct device *dev) { @@ -29,6 +31,7 @@ extern void pm_clk_init(struct device *dev); extern int pm_clk_create(struct device *dev); extern void pm_clk_destroy(struct device *dev); extern int pm_clk_add(struct device *dev, const char *con_id); +extern int pm_clk_add_clk(struct device *dev, struct clk *clk); extern void pm_clk_remove(struct device *dev, const char *con_id); extern int pm_clk_suspend(struct device *dev); extern int pm_clk_resume(struct device *dev); @@ -51,6 +54,11 @@ static inline int pm_clk_add(struct device *dev, const char *con_id) { return -EINVAL; } + +static inline int pm_clk_add_clk(struct device *dev, struct clk *clk) +{ + return -EINVAL; +} static inline void pm_clk_remove(struct device *dev, const char *con_id) { } diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 73e938b7e937..6cd20d5e651b 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -17,6 +17,9 @@ #include <linux/notifier.h> #include <linux/cpuidle.h> +/* Defines used for the flags field in the struct generic_pm_domain */ +#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */ + enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ @@ -72,8 +75,11 @@ struct generic_pm_domain { bool max_off_time_changed; bool cached_power_down_ok; struct gpd_cpuidle_data *cpuidle_data; - void (*attach_dev)(struct device *dev); - void (*detach_dev)(struct device *dev); + int (*attach_dev)(struct generic_pm_domain *domain, + struct device *dev); + void (*detach_dev)(struct generic_pm_domain *domain, + struct device *dev); + unsigned int flags; /* Bit field of configs for genpd */ }; static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) @@ -98,13 +104,18 @@ struct gpd_timing_data { bool cached_stop_ok; }; +struct pm_domain_data { + struct list_head list_node; + struct device *dev; +}; + struct generic_pm_domain_data { struct pm_domain_data base; struct gpd_timing_data td; struct notifier_block nb; struct mutex lock; unsigned int refcount; - bool need_restore; + int need_restore; }; #ifdef CONFIG_PM_GENERIC_DOMAINS @@ -145,6 +156,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd, extern int pm_genpd_poweron(struct generic_pm_domain *genpd); extern int pm_genpd_name_poweron(const char *domain_name); +extern void pm_genpd_poweroff_unused(void); extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor pm_domain_always_on_gov; @@ -219,6 +231,7 @@ static inline int pm_genpd_name_poweron(const char *domain_name) { return -ENOSYS; } +static inline void pm_genpd_poweroff_unused(void) {} #define simple_qos_governor NULL #define pm_domain_always_on_gov NULL #endif @@ -235,12 +248,6 @@ static inline int pm_genpd_name_add_device(const char *domain_name, return __pm_genpd_name_add_device(domain_name, dev, NULL); } -#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME -extern void pm_genpd_poweroff_unused(void); -#else -static inline void pm_genpd_poweroff_unused(void) {} -#endif - #ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP extern void pm_genpd_syscore_poweroff(struct device *dev); extern void pm_genpd_syscore_poweron(struct device *dev); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 0330217abfad..cec2d4540914 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -21,7 +21,7 @@ struct dev_pm_opp; struct device; enum dev_pm_opp_event { - OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, + OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, }; #if defined(CONFIG_PM_OPP) @@ -44,6 +44,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt); +void dev_pm_opp_remove(struct device *dev, unsigned long freq); int dev_pm_opp_enable(struct device *dev, unsigned long freq); @@ -90,6 +91,10 @@ static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, return -EINVAL; } +static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) +{ +} + static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) { return 0; @@ -109,11 +114,16 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) int of_init_opp_table(struct device *dev); +void of_free_opp_table(struct device *dev); #else static inline int of_init_opp_table(struct device *dev) { return -EINVAL; } + +static inline void of_free_opp_table(struct device *dev) +{ +} #endif #endif /* __LINUX_OPP_H__ */ diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 636e82834506..7b3ae0cffc05 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -154,6 +154,23 @@ void dev_pm_qos_constraints_destroy(struct device *dev); int dev_pm_qos_add_ancestor_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value); +int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); +void dev_pm_qos_hide_latency_limit(struct device *dev); +int dev_pm_qos_expose_flags(struct device *dev, s32 value); +void dev_pm_qos_hide_flags(struct device *dev); +int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); +s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); +int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); + +static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) +{ + return dev->power.qos->resume_latency_req->data.pnode.prio; +} + +static inline s32 dev_pm_qos_requested_flags(struct device *dev) +{ + return dev->power.qos->flags_req->data.flr.flags; +} #else static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) @@ -200,27 +217,6 @@ static inline int dev_pm_qos_add_ancestor_request(struct device *dev, enum dev_pm_qos_req_type type, s32 value) { return 0; } -#endif - -#ifdef CONFIG_PM_RUNTIME -int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); -void dev_pm_qos_hide_latency_limit(struct device *dev); -int dev_pm_qos_expose_flags(struct device *dev, s32 value); -void dev_pm_qos_hide_flags(struct device *dev); -int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); -s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); -int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); - -static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) -{ - return dev->power.qos->resume_latency_req->data.pnode.prio; -} - -static inline s32 dev_pm_qos_requested_flags(struct device *dev) -{ - return dev->power.qos->flags_req->data.flr.flags; -} -#else static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) { return 0; } static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 367f49b9a1c9..30e84d48bfea 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -35,16 +35,6 @@ extern int pm_generic_runtime_suspend(struct device *dev); extern int pm_generic_runtime_resume(struct device *dev); extern int pm_runtime_force_suspend(struct device *dev); extern int pm_runtime_force_resume(struct device *dev); -#else -static inline bool queue_pm_work(struct work_struct *work) { return false; } - -static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } -static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } -static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } -static inline int pm_runtime_force_resume(struct device *dev) { return 0; } -#endif - -#ifdef CONFIG_PM_RUNTIME extern int __pm_runtime_idle(struct device *dev, int rpmflags); extern int __pm_runtime_suspend(struct device *dev, int rpmflags); @@ -128,7 +118,19 @@ static inline void pm_runtime_mark_last_busy(struct device *dev) ACCESS_ONCE(dev->power.last_busy) = jiffies; } -#else /* !CONFIG_PM_RUNTIME */ +static inline bool pm_runtime_is_irq_safe(struct device *dev) +{ + return dev->power.irq_safe; +} + +#else /* !CONFIG_PM */ + +static inline bool queue_pm_work(struct work_struct *work) { return false; } + +static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } +static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } +static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } +static inline int pm_runtime_force_resume(struct device *dev) { return 0; } static inline int __pm_runtime_idle(struct device *dev, int rpmflags) { @@ -167,6 +169,7 @@ static inline bool pm_runtime_enabled(struct device *dev) { return false; } static inline void pm_runtime_no_callbacks(struct device *dev) {} static inline void pm_runtime_irq_safe(struct device *dev) {} +static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; } static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } static inline void pm_runtime_mark_last_busy(struct device *dev) {} @@ -179,7 +182,7 @@ static inline unsigned long pm_runtime_autosuspend_expiration( static inline void pm_runtime_set_memalloc_noio(struct device *dev, bool enable){} -#endif /* !CONFIG_PM_RUNTIME */ +#endif /* !CONFIG_PM */ static inline int pm_runtime_idle(struct device *dev) { diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h index 07e7945a1ff2..e97fc656a058 100644 --- a/include/linux/power/charger-manager.h +++ b/include/linux/power/charger-manager.h @@ -253,9 +253,6 @@ struct charger_manager { struct device *dev; struct charger_desc *desc; - struct power_supply *fuel_gauge; - struct power_supply **charger_stat; - #ifdef CONFIG_THERMAL struct thermal_zone_device *tzd_batt; #endif diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 3ed049673022..096dbced02ac 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -200,6 +200,12 @@ struct power_supply { void (*external_power_changed)(struct power_supply *psy); void (*set_charged)(struct power_supply *psy); + /* + * Set if thermal zone should not be created for this power supply. + * For example for virtual supplies forwarding calls to actual + * sensors or other supplies. + */ + bool no_thermal; /* For APM emulation, think legacy userspace. */ int use_for_apm; diff --git a/include/linux/printk.h b/include/linux/printk.h index d78125f73ac4..c8f170324e64 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -118,12 +118,13 @@ int no_printk(const char *fmt, ...) #ifdef CONFIG_EARLY_PRINTK extern asmlinkage __printf(1, 2) void early_printk(const char *fmt, ...); -void early_vprintk(const char *fmt, va_list ap); #else static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } #endif +typedef int(*printk_func_t)(const char *fmt, va_list args); + #ifdef CONFIG_PRINTK asmlinkage __printf(5, 0) int vprintk_emit(int facility, int level, diff --git a/include/linux/property.h b/include/linux/property.h new file mode 100644 index 000000000000..a6a3d98bd7e9 --- /dev/null +++ b/include/linux/property.h @@ -0,0 +1,143 @@ +/* + * property.h - Unified device property interface. + * + * Copyright (C) 2014, Intel Corporation + * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_PROPERTY_H_ +#define _LINUX_PROPERTY_H_ + +#include <linux/types.h> + +struct device; + +enum dev_prop_type { + DEV_PROP_U8, + DEV_PROP_U16, + DEV_PROP_U32, + DEV_PROP_U64, + DEV_PROP_STRING, + DEV_PROP_MAX, +}; + +bool device_property_present(struct device *dev, const char *propname); +int device_property_read_u8_array(struct device *dev, const char *propname, + u8 *val, size_t nval); +int device_property_read_u16_array(struct device *dev, const char *propname, + u16 *val, size_t nval); +int device_property_read_u32_array(struct device *dev, const char *propname, + u32 *val, size_t nval); +int device_property_read_u64_array(struct device *dev, const char *propname, + u64 *val, size_t nval); +int device_property_read_string_array(struct device *dev, const char *propname, + const char **val, size_t nval); +int device_property_read_string(struct device *dev, const char *propname, + const char **val); + +enum fwnode_type { + FWNODE_INVALID = 0, + FWNODE_OF, + FWNODE_ACPI, +}; + +struct fwnode_handle { + enum fwnode_type type; +}; + +bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname); +int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, + const char *propname, u8 *val, + size_t nval); +int fwnode_property_read_u16_array(struct fwnode_handle *fwnode, + const char *propname, u16 *val, + size_t nval); +int fwnode_property_read_u32_array(struct fwnode_handle *fwnode, + const char *propname, u32 *val, + size_t nval); +int fwnode_property_read_u64_array(struct fwnode_handle *fwnode, + const char *propname, u64 *val, + size_t nval); +int fwnode_property_read_string_array(struct fwnode_handle *fwnode, + const char *propname, const char **val, + size_t nval); +int fwnode_property_read_string(struct fwnode_handle *fwnode, + const char *propname, const char **val); + +struct fwnode_handle *device_get_next_child_node(struct device *dev, + struct fwnode_handle *child); + +#define device_for_each_child_node(dev, child) \ + for (child = device_get_next_child_node(dev, NULL); child; \ + child = device_get_next_child_node(dev, child)) + +void fwnode_handle_put(struct fwnode_handle *fwnode); + +unsigned int device_get_child_node_count(struct device *dev); + +static inline bool device_property_read_bool(struct device *dev, + const char *propname) +{ + return device_property_present(dev, propname); +} + +static inline int device_property_read_u8(struct device *dev, + const char *propname, u8 *val) +{ + return device_property_read_u8_array(dev, propname, val, 1); +} + +static inline int device_property_read_u16(struct device *dev, + const char *propname, u16 *val) +{ + return device_property_read_u16_array(dev, propname, val, 1); +} + +static inline int device_property_read_u32(struct device *dev, + const char *propname, u32 *val) +{ + return device_property_read_u32_array(dev, propname, val, 1); +} + +static inline int device_property_read_u64(struct device *dev, + const char *propname, u64 *val) +{ + return device_property_read_u64_array(dev, propname, val, 1); +} + +static inline bool fwnode_property_read_bool(struct fwnode_handle *fwnode, + const char *propname) +{ + return fwnode_property_present(fwnode, propname); +} + +static inline int fwnode_property_read_u8(struct fwnode_handle *fwnode, + const char *propname, u8 *val) +{ + return fwnode_property_read_u8_array(fwnode, propname, val, 1); +} + +static inline int fwnode_property_read_u16(struct fwnode_handle *fwnode, + const char *propname, u16 *val) +{ + return fwnode_property_read_u16_array(fwnode, propname, val, 1); +} + +static inline int fwnode_property_read_u32(struct fwnode_handle *fwnode, + const char *propname, u32 *val) +{ + return fwnode_property_read_u32_array(fwnode, propname, val, 1); +} + +static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode, + const char *propname, u64 *val) +{ + return fwnode_property_read_u64_array(fwnode, propname, val, 1); +} + +#endif /* _LINUX_PROPERTY_H_ */ diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 9974975d40db..4af3fdc85b01 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -53,7 +53,8 @@ struct persistent_ram_zone { }; struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, - u32 sig, struct persistent_ram_ecc_info *ecc_info); + u32 sig, struct persistent_ram_ecc_info *ecc_info, + unsigned int memtype); void persistent_ram_free(struct persistent_ram_zone *prz); void persistent_ram_zap(struct persistent_ram_zone *prz); @@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, struct ramoops_platform_data { unsigned long mem_size; unsigned long mem_address; + unsigned int mem_type; unsigned long record_size; unsigned long console_size; unsigned long ftrace_size; diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index cc79eff4a1ad..987a73a40ef8 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -52,7 +52,7 @@ extern void ptrace_notify(int exit_code); extern void __ptrace_link(struct task_struct *child, struct task_struct *new_parent); extern void __ptrace_unlink(struct task_struct *child); -extern void exit_ptrace(struct task_struct *tracer); +extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); #define PTRACE_MODE_READ 0x01 #define PTRACE_MODE_ATTACH 0x02 #define PTRACE_MODE_NOAUDIT 0x04 diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h index 18d75e795606..e1ab6e86cdb3 100644 --- a/include/linux/pxa168_eth.h +++ b/include/linux/pxa168_eth.h @@ -4,6 +4,8 @@ #ifndef __LINUX_PXA168_ETH_H #define __LINUX_PXA168_ETH_H +#include <linux/phy.h> + struct pxa168_eth_platform_data { int port_number; int phy_addr; @@ -13,6 +15,7 @@ struct pxa168_eth_platform_data { */ int speed; /* 0, SPEED_10, SPEED_100 */ int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ + phy_interface_t intf; /* * Override default RX/TX queue sizes if nonzero. diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index f2b405116166..77aed9ea1d26 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -108,6 +108,25 @@ #define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ #endif +/* QUARK_X1000 SSCR0 bit definition */ +#define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */ +#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */ +#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */ +#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */ + +#define RX_THRESH_QUARK_X1000_DFLT 1 +#define TX_THRESH_QUARK_X1000_DFLT 16 + +#define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */ +#define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */ + +#define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */ +#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */ +#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */ +#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */ +#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */ +#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */ + /* extra bits in PXA255, PXA26x and PXA27x SSP ports */ #define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ #define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ @@ -175,6 +194,7 @@ enum pxa_ssp_type { PXA910_SSP, CE4100_SSP, LPSS_SSP, + QUARK_X1000_SSP, }; struct ssp_device { diff --git a/include/linux/quota.h b/include/linux/quota.h index 80d345a3524c..50978b781a19 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -56,6 +56,11 @@ enum quota_type { PRJQUOTA = 2, /* element used for project quotas */ }; +/* Masks for quota types when used as a bitmask */ +#define QTYPE_MASK_USR (1 << USRQUOTA) +#define QTYPE_MASK_GRP (1 << GRPQUOTA) +#define QTYPE_MASK_PRJ (1 << PRJQUOTA) + typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ typedef long long qsize_t; /* Type in which we store sizes */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 1d3eee594cd6..f23538a6e411 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -64,10 +64,10 @@ void dquot_destroy(struct dquot *dquot); int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags); void __dquot_free_space(struct inode *inode, qsize_t number, int flags); -int dquot_alloc_inode(const struct inode *inode); +int dquot_alloc_inode(struct inode *inode); int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); -void dquot_free_inode(const struct inode *inode); +void dquot_free_inode(struct inode *inode); void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number); int dquot_disable(struct super_block *sb, int type, unsigned int flags); @@ -213,12 +213,12 @@ static inline void dquot_drop(struct inode *inode) { } -static inline int dquot_alloc_inode(const struct inode *inode) +static inline int dquot_alloc_inode(struct inode *inode) { return 0; } -static inline void dquot_free_inode(const struct inode *inode) +static inline void dquot_free_inode(struct inode *inode) { } diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 0a260d8a18bf..18102529254e 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h @@ -17,14 +17,20 @@ struct ratelimit_state { unsigned long begin; }; -#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ - \ - struct ratelimit_state name = { \ +#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ .interval = interval_init, \ .burst = burst_init, \ } +#define RATELIMIT_STATE_INIT_DISABLED \ + RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST) + +#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ + \ + struct ratelimit_state name = \ + RATELIMIT_STATE_INIT(name, interval_init, burst_init) \ + static inline void ratelimit_state_init(struct ratelimit_state *rs, int interval, int burst) { diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 372ad5e0dcb8..529bc946f450 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -241,7 +241,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * list_entry_rcu - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). @@ -278,7 +278,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * list_first_or_null_rcu - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Note that if the list is empty, it returns NULL. * @@ -296,7 +296,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * list_for_each_entry_rcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() @@ -311,7 +311,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * list_for_each_entry_continue_rcu - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. - * @member: the name of the list_struct within the struct. + * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after * the current position. @@ -542,6 +542,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ typeof(*(pos)), member)) +/** + * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point + * @pos: the type * to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_from_rcu(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ + typeof(*(pos)), member)) #endif /* __KERNEL__ */ #endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 53ff1a752d7e..ed4f5939a452 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -57,7 +57,7 @@ enum rcutorture_type { INVALID_RCU_FLAVOR }; -#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, unsigned long *gpnum, unsigned long *completed); void rcutorture_record_test_transition(void); @@ -260,7 +260,7 @@ static inline int rcu_preempt_depth(void) void rcu_init(void); void rcu_sched_qs(void); void rcu_bh_qs(void); -void rcu_check_callbacks(int cpu, int user); +void rcu_check_callbacks(int user); struct notifier_block; void rcu_idle_enter(void); void rcu_idle_exit(void); @@ -348,8 +348,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu; */ #define cond_resched_rcu_qs() \ do { \ - rcu_note_voluntary_context_switch(current); \ - cond_resched(); \ + if (!cond_resched()) \ + rcu_note_voluntary_context_switch(current); \ } while (0) #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) @@ -365,7 +365,7 @@ typedef void call_rcu_func_t(struct rcu_head *head, void (*func)(struct rcu_head *head)); void wait_rcu_gp(call_rcu_func_t crf); -#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) #include <linux/rcutree.h> #elif defined(CONFIG_TINY_RCU) #include <linux/rcutiny.h> @@ -867,7 +867,7 @@ static inline void rcu_preempt_sleep_check(void) * * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), * it is illegal to block while in an RCU read-side critical section. - * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT + * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT * kernel builds, RCU read-side critical sections may be preempted, * but explicit blocking is illegal. Finally, in preemptible RCU * implementations in real-time (with -rt patchset) kernel builds, RCU @@ -902,7 +902,9 @@ static inline void rcu_read_lock(void) * Unfortunately, this function acquires the scheduler's runqueue and * priority-inheritance spinlocks. This means that deadlock could result * if the caller of rcu_read_unlock() already holds one of these locks or - * any lock that is ever acquired while holding them. + * any lock that is ever acquired while holding them; or any lock which + * can be taken from interrupt context because rcu_boost()->rt_mutex_lock() + * does not disable irqs while taking ->wait_lock. * * That said, RCU readers are never priority boosted unless they were * preempted. Therefore, one way to avoid deadlock is to make sure @@ -1062,6 +1064,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) */ #define RCU_INIT_POINTER(p, v) \ do { \ + rcu_dereference_sparse(p, __rcu); \ p = RCU_INITIALIZER(v); \ } while (0) @@ -1118,7 +1121,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) -static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) +static inline int rcu_needs_cpu(unsigned long *delta_jiffies) { *delta_jiffies = ULONG_MAX; return 0; diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 38cc5b1e252d..0e5366200154 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -78,7 +78,7 @@ static inline void kfree_call_rcu(struct rcu_head *head, call_rcu(head, func); } -static inline void rcu_note_context_switch(int cpu) +static inline void rcu_note_context_switch(void) { rcu_sched_qs(); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 3e2f5d432743..52953790dcca 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -30,9 +30,9 @@ #ifndef __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H -void rcu_note_context_switch(int cpu); +void rcu_note_context_switch(void); #ifndef CONFIG_RCU_NOCB_CPU_ALL -int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); +int rcu_needs_cpu(unsigned long *delta_jiffies); #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ void rcu_cpu_stall_reset(void); @@ -43,7 +43,7 @@ void rcu_cpu_stall_reset(void); */ static inline void rcu_virt_note_context_switch(int cpu) { - rcu_note_context_switch(cpu); + rcu_note_context_switch(); } void synchronize_rcu_bh(void); diff --git a/include/linux/regmap.h b/include/linux/regmap.h index c5ed83f49c4e..4419b99d8d6e 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -27,6 +27,7 @@ struct spmi_device; struct regmap; struct regmap_range_cfg; struct regmap_field; +struct snd_ac97; /* An enum of all the supported cache types */ enum regcache_type { @@ -340,6 +341,8 @@ struct regmap *regmap_init_spmi_ext(struct spmi_device *dev, struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id, void __iomem *regs, const struct regmap_config *config); +struct regmap *regmap_init_ac97(struct snd_ac97 *ac97, + const struct regmap_config *config); struct regmap *devm_regmap_init(struct device *dev, const struct regmap_bus *bus, @@ -356,6 +359,10 @@ struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev, struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, void __iomem *regs, const struct regmap_config *config); +struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97, + const struct regmap_config *config); + +bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); /** * regmap_init_mmio(): Initialise register map diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index f540b1496e2f..d17e1ff7ad01 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -101,6 +101,8 @@ struct regmap; * Data passed is "struct pre_voltage_change_data" * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason. * Data passed is old voltage cast to (void *). + * PRE_DISABLE Regulator is about to be disabled + * ABORT_DISABLE Regulator disable failed for some reason * * NOTE: These events can be OR'ed together when passed into handler. */ @@ -115,6 +117,8 @@ struct regmap; #define REGULATOR_EVENT_DISABLE 0x80 #define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100 #define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200 +#define REGULATOR_EVENT_PRE_DISABLE 0x400 +#define REGULATOR_EVENT_ABORT_DISABLE 0x800 /** * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event @@ -284,7 +288,7 @@ devm_regulator_get(struct device *dev, const char *id) static inline struct regulator *__must_check regulator_get_exclusive(struct device *dev, const char *id) { - return NULL; + return ERR_PTR(-ENODEV); } static inline struct regulator *__must_check diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index fc0ee0ce8325..5f1e9ca47417 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -243,6 +243,8 @@ enum regulator_type { * * @enable_time: Time taken for initial enable of regulator (in uS). * @off_on_delay: guard time (in uS), before re-enabling a regulator + * + * @of_map_mode: Maps a hardware mode defined in a DeviceTree to a standard mode */ struct regulator_desc { const char *name; @@ -285,6 +287,8 @@ struct regulator_desc { unsigned int enable_time; unsigned int off_on_delay; + + unsigned int (*of_map_mode)(unsigned int mode); }; /** @@ -301,6 +305,9 @@ struct regulator_desc { * NULL). * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is * insufficient. + * @ena_gpio_initialized: GPIO controlling regulator enable was properly + * initialized, meaning that >= 0 is a valid gpio + * identifier and < 0 is a non existent gpio. * @ena_gpio: GPIO controlling regulator enable. * @ena_gpio_invert: Sense for GPIO enable control. * @ena_gpio_flags: Flags to use when calling gpio_request_one() @@ -312,6 +319,7 @@ struct regulator_config { struct device_node *of_node; struct regmap *regmap; + bool ena_gpio_initialized; int ena_gpio; unsigned int ena_gpio_invert:1; unsigned int ena_gpio_flags; diff --git a/include/linux/regulator/of_regulator.h b/include/linux/regulator/of_regulator.h index f9217965aaa3..763953f7e3b8 100644 --- a/include/linux/regulator/of_regulator.h +++ b/include/linux/regulator/of_regulator.h @@ -6,24 +6,29 @@ #ifndef __LINUX_OF_REG_H #define __LINUX_OF_REG_H +struct regulator_desc; + struct of_regulator_match { const char *name; void *driver_data; struct regulator_init_data *init_data; struct device_node *of_node; + const struct regulator_desc *desc; }; #if defined(CONFIG_OF) extern struct regulator_init_data *of_get_regulator_init_data(struct device *dev, - struct device_node *node); + struct device_node *node, + const struct regulator_desc *desc); extern int of_regulator_match(struct device *dev, struct device_node *node, struct of_regulator_match *matches, unsigned int num_matches); #else static inline struct regulator_init_data *of_get_regulator_init_data(struct device *dev, - struct device_node *node) + struct device_node *node, + const struct regulator_desc *desc) { return NULL; } diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h deleted file mode 100644 index 56b7bc32db4f..000000000000 --- a/include/linux/res_counter.h +++ /dev/null @@ -1,223 +0,0 @@ -#ifndef __RES_COUNTER_H__ -#define __RES_COUNTER_H__ - -/* - * Resource Counters - * Contain common data types and routines for resource accounting - * - * Copyright 2007 OpenVZ SWsoft Inc - * - * Author: Pavel Emelianov <xemul@openvz.org> - * - * See Documentation/cgroups/resource_counter.txt for more - * info about what this counter is. - */ - -#include <linux/spinlock.h> -#include <linux/errno.h> - -/* - * The core object. the cgroup that wishes to account for some - * resource may include this counter into its structures and use - * the helpers described beyond - */ - -struct res_counter { - /* - * the current resource consumption level - */ - unsigned long long usage; - /* - * the maximal value of the usage from the counter creation - */ - unsigned long long max_usage; - /* - * the limit that usage cannot exceed - */ - unsigned long long limit; - /* - * the limit that usage can be exceed - */ - unsigned long long soft_limit; - /* - * the number of unsuccessful attempts to consume the resource - */ - unsigned long long failcnt; - /* - * the lock to protect all of the above. - * the routines below consider this to be IRQ-safe - */ - spinlock_t lock; - /* - * Parent counter, used for hierarchial resource accounting - */ - struct res_counter *parent; -}; - -#define RES_COUNTER_MAX ULLONG_MAX - -/** - * Helpers to interact with userspace - * res_counter_read_u64() - returns the value of the specified member. - * res_counter_read/_write - put/get the specified fields from the - * res_counter struct to/from the user - * - * @counter: the counter in question - * @member: the field to work with (see RES_xxx below) - * @buf: the buffer to opeate on,... - * @nbytes: its size... - * @pos: and the offset. - */ - -u64 res_counter_read_u64(struct res_counter *counter, int member); - -ssize_t res_counter_read(struct res_counter *counter, int member, - const char __user *buf, size_t nbytes, loff_t *pos, - int (*read_strategy)(unsigned long long val, char *s)); - -int res_counter_memparse_write_strategy(const char *buf, - unsigned long long *res); - -/* - * the field descriptors. one for each member of res_counter - */ - -enum { - RES_USAGE, - RES_MAX_USAGE, - RES_LIMIT, - RES_FAILCNT, - RES_SOFT_LIMIT, -}; - -/* - * helpers for accounting - */ - -void res_counter_init(struct res_counter *counter, struct res_counter *parent); - -/* - * charge - try to consume more resource. - * - * @counter: the counter - * @val: the amount of the resource. each controller defines its own - * units, e.g. numbers, bytes, Kbytes, etc - * - * returns 0 on success and <0 if the counter->usage will exceed the - * counter->limit - * - * charge_nofail works the same, except that it charges the resource - * counter unconditionally, and returns < 0 if the after the current - * charge we are over limit. - */ - -int __must_check res_counter_charge(struct res_counter *counter, - unsigned long val, struct res_counter **limit_fail_at); -int res_counter_charge_nofail(struct res_counter *counter, - unsigned long val, struct res_counter **limit_fail_at); - -/* - * uncharge - tell that some portion of the resource is released - * - * @counter: the counter - * @val: the amount of the resource - * - * these calls check for usage underflow and show a warning on the console - * - * returns the total charges still present in @counter. - */ - -u64 res_counter_uncharge(struct res_counter *counter, unsigned long val); - -u64 res_counter_uncharge_until(struct res_counter *counter, - struct res_counter *top, - unsigned long val); -/** - * res_counter_margin - calculate chargeable space of a counter - * @cnt: the counter - * - * Returns the difference between the hard limit and the current usage - * of resource counter @cnt. - */ -static inline unsigned long long res_counter_margin(struct res_counter *cnt) -{ - unsigned long long margin; - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - if (cnt->limit > cnt->usage) - margin = cnt->limit - cnt->usage; - else - margin = 0; - spin_unlock_irqrestore(&cnt->lock, flags); - return margin; -} - -/** - * Get the difference between the usage and the soft limit - * @cnt: The counter - * - * Returns 0 if usage is less than or equal to soft limit - * The difference between usage and soft limit, otherwise. - */ -static inline unsigned long long -res_counter_soft_limit_excess(struct res_counter *cnt) -{ - unsigned long long excess; - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - if (cnt->usage <= cnt->soft_limit) - excess = 0; - else - excess = cnt->usage - cnt->soft_limit; - spin_unlock_irqrestore(&cnt->lock, flags); - return excess; -} - -static inline void res_counter_reset_max(struct res_counter *cnt) -{ - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - cnt->max_usage = cnt->usage; - spin_unlock_irqrestore(&cnt->lock, flags); -} - -static inline void res_counter_reset_failcnt(struct res_counter *cnt) -{ - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - cnt->failcnt = 0; - spin_unlock_irqrestore(&cnt->lock, flags); -} - -static inline int res_counter_set_limit(struct res_counter *cnt, - unsigned long long limit) -{ - unsigned long flags; - int ret = -EBUSY; - - spin_lock_irqsave(&cnt->lock, flags); - if (cnt->usage <= limit) { - cnt->limit = limit; - ret = 0; - } - spin_unlock_irqrestore(&cnt->lock, flags); - return ret; -} - -static inline int -res_counter_set_soft_limit(struct res_counter *cnt, - unsigned long long soft_limit) -{ - unsigned long flags; - - spin_lock_irqsave(&cnt->lock, flags); - cnt->soft_limit = soft_limit; - spin_unlock_irqrestore(&cnt->lock, flags); - return 0; -} - -#endif diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h index 41a4695fde08..ce6b962ffed4 100644 --- a/include/linux/reset-controller.h +++ b/include/linux/reset-controller.h @@ -12,11 +12,13 @@ struct reset_controller_dev; * things to reset the device * @assert: manually assert the reset line, if supported * @deassert: manually deassert the reset line, if supported + * @status: return the status of the reset line, if supported */ struct reset_control_ops { int (*reset)(struct reset_controller_dev *rcdev, unsigned long id); int (*assert)(struct reset_controller_dev *rcdev, unsigned long id); int (*deassert)(struct reset_controller_dev *rcdev, unsigned long id); + int (*status)(struct reset_controller_dev *rcdev, unsigned long id); }; struct module; diff --git a/include/linux/reset.h b/include/linux/reset.h index 349f150ae12c..da5602bd77d7 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -10,6 +10,7 @@ struct reset_control; int reset_control_reset(struct reset_control *rstc); int reset_control_assert(struct reset_control *rstc); int reset_control_deassert(struct reset_control *rstc); +int reset_control_status(struct reset_control *rstc); struct reset_control *reset_control_get(struct device *dev, const char *id); void reset_control_put(struct reset_control *rstc); @@ -57,6 +58,12 @@ static inline int reset_control_deassert(struct reset_control *rstc) return 0; } +static inline int reset_control_status(struct reset_control *rstc) +{ + WARN_ON(1); + return 0; +} + static inline void reset_control_put(struct reset_control *rstc) { WARN_ON(1); diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index fb298e9d6d3a..b93fd89b2e5e 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -65,7 +65,10 @@ struct rhashtable_params { size_t new_size); bool (*shrink_decision)(const struct rhashtable *ht, size_t new_size); - int (*mutex_is_held)(void); +#ifdef CONFIG_PROVE_LOCKING + int (*mutex_is_held)(void *parent); + void *parent; +#endif }; /** @@ -96,16 +99,16 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); -void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t); -bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t); +void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); +bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, - struct rhash_head __rcu **pprev, gfp_t flags); + struct rhash_head __rcu **pprev); bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); -int rhashtable_expand(struct rhashtable *ht, gfp_t flags); -int rhashtable_shrink(struct rhashtable *ht, gfp_t flags); +int rhashtable_expand(struct rhashtable *ht); +int rhashtable_shrink(struct rhashtable *ht); void *rhashtable_lookup(const struct rhashtable *ht, const void *key); void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 49a4d6f59108..e2c13cd863bd 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k __ring_buffer_alloc((size), (flags), &__key); \ }) -int ring_buffer_wait(struct ring_buffer *buffer, int cpu); +int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full); int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, struct file *filp, poll_table *poll_table); diff --git a/include/linux/rtc.h b/include/linux/rtc.h index c2c28975293c..6d6be09a2fe5 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -19,11 +19,28 @@ extern int rtc_month_days(unsigned int month, unsigned int year); extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year); extern int rtc_valid_tm(struct rtc_time *tm); -extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time); -extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); +extern time64_t rtc_tm_to_time64(struct rtc_time *tm); +extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm); ktime_t rtc_tm_to_ktime(struct rtc_time tm); struct rtc_time rtc_ktime_to_tm(ktime_t kt); +/** + * Deprecated. Use rtc_time64_to_tm(). + */ +static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm) +{ + rtc_time64_to_tm(time, tm); +} + +/** + * Deprecated. Use rtc_tm_to_time64(). + */ +static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time) +{ + *time = rtc_tm_to_time64(tm); + + return 0; +} #include <linux/device.h> #include <linux/seq_file.h> diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 6cacbce1a06c..5db76a32fcab 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -17,6 +17,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id, long expires, u32 error); void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); +struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, + unsigned change, gfp_t flags); +void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, + gfp_t flags); + /* RTNL is used as a global lock for all changes to network configuration */ extern void rtnl_lock(void); @@ -94,12 +99,15 @@ extern int ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, - u16 flags); + u16 vid, + u16 flags); extern int ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr); + const unsigned char *addr, + u16 vid); extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, u16 mode); + struct net_device *dev, u16 mode, + u32 flags, u32 mask); #endif /* __LINUX_RTNETLINK_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 5e344bbe63ec..8db31ef98d2f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -243,6 +243,43 @@ extern char ___assert_task_state[1 - 2*!!( ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ (task->flags & PF_FROZEN) == 0) +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP + +#define __set_task_state(tsk, state_value) \ + do { \ + (tsk)->task_state_change = _THIS_IP_; \ + (tsk)->state = (state_value); \ + } while (0) +#define set_task_state(tsk, state_value) \ + do { \ + (tsk)->task_state_change = _THIS_IP_; \ + set_mb((tsk)->state, (state_value)); \ + } while (0) + +/* + * set_current_state() includes a barrier so that the write of current->state + * is correctly serialised wrt the caller's subsequent test of whether to + * actually sleep: + * + * set_current_state(TASK_UNINTERRUPTIBLE); + * if (do_i_need_to_sleep()) + * schedule(); + * + * If the caller does not need such serialisation then use __set_current_state() + */ +#define __set_current_state(state_value) \ + do { \ + current->task_state_change = _THIS_IP_; \ + current->state = (state_value); \ + } while (0) +#define set_current_state(state_value) \ + do { \ + current->task_state_change = _THIS_IP_; \ + set_mb(current->state, (state_value)); \ + } while (0) + +#else + #define __set_task_state(tsk, state_value) \ do { (tsk)->state = (state_value); } while (0) #define set_task_state(tsk, state_value) \ @@ -259,11 +296,13 @@ extern char ___assert_task_state[1 - 2*!!( * * If the caller does not need such serialisation then use __set_current_state() */ -#define __set_current_state(state_value) \ +#define __set_current_state(state_value) \ do { current->state = (state_value); } while (0) -#define set_current_state(state_value) \ +#define set_current_state(state_value) \ set_mb(current->state, (state_value)) +#endif + /* Task command name length */ #define TASK_COMM_LEN 16 @@ -1278,9 +1317,9 @@ struct task_struct { union rcu_special rcu_read_unlock_special; struct list_head rcu_node_entry; #endif /* #ifdef CONFIG_PREEMPT_RCU */ -#ifdef CONFIG_TREE_PREEMPT_RCU +#ifdef CONFIG_PREEMPT_RCU struct rcu_node *rcu_blocked_node; -#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ +#endif /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_TASKS_RCU unsigned long rcu_tasks_nvcsw; bool rcu_tasks_holdout; @@ -1325,6 +1364,10 @@ struct task_struct { unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; +#ifdef CONFIG_MEMCG_KMEM + unsigned memcg_kmem_skip_account:1; +#endif + unsigned long atomic_flags; /* Flags needing atomic access. */ pid_t pid; @@ -1558,28 +1601,23 @@ struct task_struct { struct numa_group *numa_group; /* - * Exponential decaying average of faults on a per-node basis. - * Scheduling placement decisions are made based on the these counts. - * The values remain static for the duration of a PTE scan + * numa_faults is an array split into four regions: + * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer + * in this precise order. + * + * faults_memory: Exponential decaying average of faults on a per-node + * basis. Scheduling placement decisions are made based on these + * counts. The values remain static for the duration of a PTE scan. + * faults_cpu: Track the nodes the process was running on when a NUMA + * hinting fault was incurred. + * faults_memory_buffer and faults_cpu_buffer: Record faults per node + * during the current scan window. When the scan completes, the counts + * in faults_memory and faults_cpu decay and these values are copied. */ - unsigned long *numa_faults_memory; + unsigned long *numa_faults; unsigned long total_numa_faults; /* - * numa_faults_buffer records faults per node during the current - * scan window. When the scan completes, the counts in - * numa_faults_memory decay and these values are copied. - */ - unsigned long *numa_faults_buffer_memory; - - /* - * Track the nodes the process was running on when a NUMA hinting - * fault was incurred. - */ - unsigned long *numa_faults_cpu; - unsigned long *numa_faults_buffer_cpu; - - /* * numa_faults_locality tracks if faults recorded during the last * scan window were remote/local. The task scan period is adapted * based on the locality of the faults with different weights @@ -1645,8 +1683,7 @@ struct task_struct { /* bitmask and counter of trace recursion */ unsigned long trace_recursion; #endif /* CONFIG_TRACING */ -#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ - unsigned int memcg_kmem_skip_account; +#ifdef CONFIG_MEMCG struct memcg_oom_info { struct mem_cgroup *memcg; gfp_t gfp_mask; @@ -1661,6 +1698,9 @@ struct task_struct { unsigned int sequential_io; unsigned int sequential_io_avg; #endif +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP + unsigned long task_state_change; +#endif }; /* Future-safe accessor for struct task_struct's cpus_allowed. */ @@ -2052,6 +2092,10 @@ static inline void tsk_restore_flags(struct task_struct *task, task->flags |= orig_flags & flags; } +extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, + const struct cpumask *trial); +extern int task_can_attach(struct task_struct *p, + const struct cpumask *cs_cpus_allowed); #ifdef CONFIG_SMP extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); @@ -2441,6 +2485,10 @@ extern void do_group_exit(int); extern int do_execve(struct filename *, const char __user * const __user *, const char __user * const __user *); +extern int do_execveat(int, struct filename *, + const char __user * const __user *, + const char __user * const __user *, + int); extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); struct task_struct *fork_idle(int); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); @@ -2760,7 +2808,7 @@ static inline int signal_pending_state(long state, struct task_struct *p) extern int _cond_resched(void); #define cond_resched() ({ \ - __might_sleep(__FILE__, __LINE__, 0); \ + ___might_sleep(__FILE__, __LINE__, 0); \ _cond_resched(); \ }) @@ -2773,14 +2821,14 @@ extern int __cond_resched_lock(spinlock_t *lock); #endif #define cond_resched_lock(lock) ({ \ - __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ + ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ __cond_resched_lock(lock); \ }) extern int __cond_resched_softirq(void); #define cond_resched_softirq() ({ \ - __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ + ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ __cond_resched_softirq(); \ }) diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h new file mode 100644 index 000000000000..9aafe0e24c68 --- /dev/null +++ b/include/linux/seq_buf.h @@ -0,0 +1,136 @@ +#ifndef _LINUX_SEQ_BUF_H +#define _LINUX_SEQ_BUF_H + +#include <linux/fs.h> + +/* + * Trace sequences are used to allow a function to call several other functions + * to create a string of data to use. + */ + +/** + * seq_buf - seq buffer structure + * @buffer: pointer to the buffer + * @size: size of the buffer + * @len: the amount of data inside the buffer + * @readpos: The next position to read in the buffer. + */ +struct seq_buf { + char *buffer; + size_t size; + size_t len; + loff_t readpos; +}; + +static inline void seq_buf_clear(struct seq_buf *s) +{ + s->len = 0; + s->readpos = 0; +} + +static inline void +seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size) +{ + s->buffer = buf; + s->size = size; + seq_buf_clear(s); +} + +/* + * seq_buf have a buffer that might overflow. When this happens + * the len and size are set to be equal. + */ +static inline bool +seq_buf_has_overflowed(struct seq_buf *s) +{ + return s->len > s->size; +} + +static inline void +seq_buf_set_overflow(struct seq_buf *s) +{ + s->len = s->size + 1; +} + +/* + * How much buffer is left on the seq_buf? + */ +static inline unsigned int +seq_buf_buffer_left(struct seq_buf *s) +{ + if (seq_buf_has_overflowed(s)) + return 0; + + return s->size - s->len; +} + +/* How much buffer was written? */ +static inline unsigned int seq_buf_used(struct seq_buf *s) +{ + return min(s->len, s->size); +} + +/** + * seq_buf_get_buf - get buffer to write arbitrary data to + * @s: the seq_buf handle + * @bufp: the beginning of the buffer is stored here + * + * Return the number of bytes available in the buffer, or zero if + * there's no space. + */ +static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp) +{ + WARN_ON(s->len > s->size + 1); + + if (s->len < s->size) { + *bufp = s->buffer + s->len; + return s->size - s->len; + } + + *bufp = NULL; + return 0; +} + +/** + * seq_buf_commit - commit data to the buffer + * @s: the seq_buf handle + * @num: the number of bytes to commit + * + * Commit @num bytes of data written to a buffer previously acquired + * by seq_buf_get. To signal an error condition, or that the data + * didn't fit in the available space, pass a negative @num value. + */ +static inline void seq_buf_commit(struct seq_buf *s, int num) +{ + if (num < 0) { + seq_buf_set_overflow(s); + } else { + /* num must be negative on overflow */ + BUG_ON(s->len + num > s->size); + s->len += num; + } +} + +extern __printf(2, 3) +int seq_buf_printf(struct seq_buf *s, const char *fmt, ...); +extern __printf(2, 0) +int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args); +extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s); +extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, + int cnt); +extern int seq_buf_puts(struct seq_buf *s, const char *str); +extern int seq_buf_putc(struct seq_buf *s, unsigned char c); +extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len); +extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, + unsigned int len); +extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc); + +extern int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp, + int nmaskbits); + +#ifdef CONFIG_BINARY_PRINTF +extern int +seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary); +#endif + +#endif /* _LINUX_SEQ_BUF_H */ diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 52e0097f61f0..cf6a9daaaf6d 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -43,6 +43,21 @@ struct seq_operations { #define SEQ_SKIP 1 /** + * seq_has_overflowed - check if the buffer has overflowed + * @m: the seq_file handle + * + * seq_files have a buffer which may overflow. When this happens a larger + * buffer is reallocated and all the data will be printed again. + * The overflow state is true when m->count == m->size. + * + * Returns true if the buffer received more than it can hold. + */ +static inline bool seq_has_overflowed(struct seq_file *m) +{ + return m->count == m->size; +} + +/** * seq_get_buf - get buffer to write arbitrary data to * @m: the seq_file handle * @bufp: the beginning of the buffer is stored here diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 68c097077ef0..f4aee75f00b1 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -18,8 +18,6 @@ struct shrink_control { */ unsigned long nr_to_scan; - /* shrink from these nodes */ - nodemask_t nodes_to_scan; /* current node being shrunk (for NUMA aware shrinkers) */ int nid; }; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6c8b6f604e76..85ab7d72b54c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -20,6 +20,8 @@ #include <linux/time.h> #include <linux/bug.h> #include <linux/cache.h> +#include <linux/rbtree.h> +#include <linux/socket.h> #include <linux/atomic.h> #include <asm/types.h> @@ -148,6 +150,8 @@ struct net_device; struct scatterlist; struct pipe_inode_info; +struct iov_iter; +struct napi_struct; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { @@ -341,7 +345,6 @@ enum { SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ - SKB_FCLONE_FREE, /* this companion fclone skb is available */ }; enum { @@ -370,8 +373,7 @@ enum { SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, - SKB_GSO_MPLS = 1 << 12, - + SKB_GSO_TUNNEL_REMCSUM = 1 << 12, }; #if BITS_PER_LONG > 32 @@ -440,6 +442,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, * @next: Next buffer in list * @prev: Previous buffer in list * @tstamp: Time we arrived/left + * @rbnode: RB tree node, alternative to next/prev for netem/tcp * @sk: Socket we are owned by * @dev: Device we arrived on/are leaving by * @cb: Control buffer. Free for use by every layer. Put private vars here @@ -504,15 +507,19 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, */ struct sk_buff { - /* These two members must be first. */ - struct sk_buff *next; - struct sk_buff *prev; - union { - ktime_t tstamp; - struct skb_mstamp skb_mstamp; + struct { + /* These two members must be first. */ + struct sk_buff *next; + struct sk_buff *prev; + + union { + ktime_t tstamp; + struct skb_mstamp skb_mstamp; + }; + }; + struct rb_node rbnode; /* used in netem & tcp stack */ }; - struct sock *sk; struct net_device *dev; @@ -597,7 +604,8 @@ struct sk_buff { #endif __u8 ipvs_property:1; __u8 inner_protocol_type:1; - /* 4 or 6 bit hole */ + __u8 remcsum_offload:1; + /* 3 or 5 bit hole */ #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ @@ -666,6 +674,7 @@ struct sk_buff { #define SKB_ALLOC_FCLONE 0x01 #define SKB_ALLOC_RX 0x02 +#define SKB_ALLOC_NAPI 0x04 /* Returns true if the skb was allocated from PFMEMALLOC reserves */ static inline bool skb_pfmemalloc(const struct sk_buff *skb) @@ -710,9 +719,6 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) skb->_skb_refdst = (unsigned long)dst; } -void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, - bool force); - /** * skb_dst_set_noref - sets skb dst, hopefully, without taking reference * @skb: buffer @@ -725,24 +731,8 @@ void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, */ static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) { - __skb_dst_set_noref(skb, dst, false); -} - -/** - * skb_dst_set_noref_force - sets skb dst, without taking reference - * @skb: buffer - * @dst: dst entry - * - * Sets skb dst, assuming a reference was not taken on dst. - * No reference is taken and no dst_release will be called. While for - * cached dsts deferred reclaim is a basic feature, for entries that are - * not cached it is caller's job to guarantee that last dst_release for - * provided dst happens when nobody uses it, eg. after a RCU grace period. - */ -static inline void skb_dst_set_noref_force(struct sk_buff *skb, - struct dst_entry *dst) -{ - __skb_dst_set_noref(skb, dst, true); + WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; } /** @@ -810,7 +800,7 @@ static inline bool skb_fclone_busy(const struct sock *sk, fclones = container_of(skb, struct sk_buff_fclones, skb1); return skb->fclone == SKB_FCLONE_ORIG && - fclones->skb2.fclone == SKB_FCLONE_CLONE && + atomic_read(&fclones->fclone_ref) > 1 && fclones->skb2.sk == sk; } @@ -2176,47 +2166,61 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); } +void *napi_alloc_frag(unsigned int fragsz); +struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, + unsigned int length, gfp_t gfp_mask); +static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, + unsigned int length) +{ + return __napi_alloc_skb(napi, length, GFP_ATOMIC); +} + /** - * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data - * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX - * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used - * @order: size of the allocation + * __dev_alloc_pages - allocate page for network Rx + * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx + * @order: size of the allocation * - * Allocate a new page. + * Allocate a new page. * - * %NULL is returned if there is no free memory. + * %NULL is returned if there is no free memory. */ -static inline struct page *__skb_alloc_pages(gfp_t gfp_mask, - struct sk_buff *skb, - unsigned int order) -{ - struct page *page; - - gfp_mask |= __GFP_COLD; - - if (!(gfp_mask & __GFP_NOMEMALLOC)) - gfp_mask |= __GFP_MEMALLOC; +static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, + unsigned int order) +{ + /* This piece of code contains several assumptions. + * 1. This is for device Rx, therefor a cold page is preferred. + * 2. The expectation is the user wants a compound page. + * 3. If requesting a order 0 page it will not be compound + * due to the check to see if order has a value in prep_new_page + * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to + * code in gfp_to_alloc_flags that should be enforcing this. + */ + gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC; - page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); - if (skb && page && page->pfmemalloc) - skb->pfmemalloc = true; + return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); +} - return page; +static inline struct page *dev_alloc_pages(unsigned int order) +{ + return __dev_alloc_pages(GFP_ATOMIC, order); } /** - * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data - * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX - * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used + * __dev_alloc_page - allocate a page for network Rx + * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx * - * Allocate a new page. + * Allocate a new page. * - * %NULL is returned if there is no free memory. + * %NULL is returned if there is no free memory. */ -static inline struct page *__skb_alloc_page(gfp_t gfp_mask, - struct sk_buff *skb) +static inline struct page *__dev_alloc_page(gfp_t gfp_mask) +{ + return __dev_alloc_pages(gfp_mask, 0); +} + +static inline struct page *dev_alloc_page(void) { - return __skb_alloc_pages(gfp_mask, skb, 0); + return __dev_alloc_page(GFP_ATOMIC); } /** @@ -2448,7 +2452,6 @@ static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error. */ - static inline int skb_padto(struct sk_buff *skb, unsigned int len) { unsigned int size = skb->len; @@ -2457,6 +2460,29 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) return skb_pad(skb, len - size); } +/** + * skb_put_padto - increase size and pad an skbuff up to a minimal size + * @skb: buffer to pad + * @len: minimal length + * + * Pads up a buffer to ensure the trailing bytes exist and are + * blanked. If the buffer already contains sufficient data it + * is untouched. Otherwise it is extended. Returns zero on + * success. The skb is freed on error. + */ +static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) +{ + unsigned int size = skb->len; + + if (unlikely(size < len)) { + len -= size; + if (skb_pad(skb, len)) + return -ENOMEM; + __skb_put(skb, len); + } + return 0; +} + static inline int skb_add_data(struct sk_buff *skb, char __user *from, int copy) { @@ -2629,18 +2655,18 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err); unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); -int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, - struct iovec *to, int size); -int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, - struct iovec *iov); -int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, - const struct iovec *from, int from_offset, - int len); -int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm, - int offset, size_t count); -int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset, - const struct iovec *to, int to_offset, - int size); +int skb_copy_datagram_iter(const struct sk_buff *from, int offset, + struct iov_iter *to, int size); +static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, + struct msghdr *msg, int size) +{ + return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); +} +int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen, + struct msghdr *msg); +int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, + struct iov_iter *from, int len); +int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); void skb_free_datagram(struct sock *sk, struct sk_buff *skb); void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); @@ -2661,6 +2687,20 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet); unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *skb_vlan_untag(struct sk_buff *skb); +int skb_ensure_writable(struct sk_buff *skb, int write_len); +int skb_vlan_pop(struct sk_buff *skb); +int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci); + +static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len) +{ + /* XXX: stripping const */ + return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len); +} + +static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len) +{ + return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT; +} struct skb_checksum_ops { __wsum (*update)(const void *mem, int len, __wsum wsum); diff --git a/include/linux/slab.h b/include/linux/slab.h index c265bec6a57d..9a139b637069 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -493,7 +493,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) * @memcg: pointer to the memcg this cache belongs to * @list: list_head for the list of all caches in this memcg * @root_cache: pointer to the global, root cache, this cache was derived from - * @nr_pages: number of pages that belongs to this cache. */ struct memcg_cache_params { bool is_root_cache; @@ -506,17 +505,12 @@ struct memcg_cache_params { struct mem_cgroup *memcg; struct list_head list; struct kmem_cache *root_cache; - atomic_t nr_pages; }; }; }; int memcg_update_all_caches(int num_memcgs); -struct seq_file; -int cache_show(struct kmem_cache *s, struct seq_file *m); -void print_slabinfo_header(struct seq_file *m); - /** * kmalloc_array - allocate memory for an array. * @n: number of elements. diff --git a/include/linux/socket.h b/include/linux/socket.h index ec538fc287a6..6e49a14365dc 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -47,16 +47,25 @@ struct linger { struct msghdr { void *msg_name; /* ptr to socket address structure */ int msg_namelen; /* size of socket address structure */ - struct iovec *msg_iov; /* scatter/gather array */ - __kernel_size_t msg_iovlen; /* # elements in msg_iov */ + struct iov_iter msg_iter; /* data */ void *msg_control; /* ancillary data */ __kernel_size_t msg_controllen; /* ancillary data buffer length */ unsigned int msg_flags; /* flags on received message */ }; + +struct user_msghdr { + void __user *msg_name; /* ptr to socket address structure */ + int msg_namelen; /* size of socket address structure */ + struct iovec __user *msg_iov; /* scatter/gather array */ + __kernel_size_t msg_iovlen; /* # elements in msg_iov */ + void __user *msg_control; /* ancillary data */ + __kernel_size_t msg_controllen; /* ancillary data buffer length */ + unsigned int msg_flags; /* flags on received message */ +}; /* For recvmmsg/sendmmsg */ struct mmsghdr { - struct msghdr msg_hdr; + struct user_msghdr msg_hdr; unsigned int msg_len; }; @@ -94,6 +103,10 @@ struct cmsghdr { (cmsg)->cmsg_len <= (unsigned long) \ ((mhdr)->msg_controllen - \ ((char *)(cmsg) - (char *)(mhdr)->msg_control))) +#define for_each_cmsghdr(cmsg, msg) \ + for (cmsg = CMSG_FIRSTHDR(msg); \ + cmsg; \ + cmsg = CMSG_NXTHDR(msg, cmsg)) /* * Get the next cmsg header @@ -256,7 +269,7 @@ struct ucred { #define MSG_EOF MSG_FIN #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ -#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file +#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file descriptor received through SCM_RIGHTS */ #if defined(CONFIG_COMPAT) @@ -312,15 +325,14 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata, extern unsigned long iov_pages(const struct iovec *iov, int offset, unsigned long nr_segs); -extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); struct timespec; /* The __sys_...msg variants allow MSG_CMSG_COMPAT */ -extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); -extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); +extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); +extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 46d188a9947c..a6ef2a8e6de4 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -1049,4 +1049,10 @@ spi_unregister_device(struct spi_device *spi) extern const struct spi_device_id * spi_get_device_id(const struct spi_device *sdev); +static inline bool +spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer) +{ + return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers); +} + #endif /* __LINUX_SPI_H */ diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 115b570e3bff..669045ab73f3 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -1,6 +1,8 @@ #ifndef __LINUX_STACKTRACE_H #define __LINUX_STACKTRACE_H +#include <linux/types.h> + struct task_struct; struct pt_regs; @@ -20,6 +22,8 @@ extern void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace); extern void print_stack_trace(struct stack_trace *trace, int spaces); +extern int snprint_stack_trace(char *buf, size_t size, + struct stack_trace *trace, int spaces); #ifdef CONFIG_USER_STACKTRACE_SUPPORT extern void save_stack_trace_user(struct stack_trace *trace); @@ -32,6 +36,7 @@ extern void save_stack_trace_user(struct stack_trace *trace); # define save_stack_trace_tsk(tsk, trace) do { } while (0) # define save_stack_trace_user(trace) do { } while (0) # define print_stack_trace(trace, spaces) do { } while (0) +# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0) #endif #endif diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 8e030075fe79..a7cbb570cc5c 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -53,7 +53,7 @@ struct rpc_cred { struct rcu_head cr_rcu; struct rpc_auth * cr_auth; const struct rpc_credops *cr_ops; -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) unsigned long cr_magic; /* 0x0f4aa4f0 */ #endif unsigned long cr_expire; /* when to gc */ diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 70736b98c721..d86acc63b25f 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -63,6 +63,9 @@ struct rpc_clnt { struct rpc_rtt cl_rtt_default; struct rpc_timeout cl_timeout_default; const struct rpc_program *cl_program; +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + struct dentry *cl_debugfs; /* debugfs directory */ +#endif }; /* @@ -176,5 +179,6 @@ size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t); +const char *rpc_proc_name(const struct rpc_task *task); #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index 9385bd74c860..c57d8ea0716c 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h @@ -10,22 +10,10 @@ #include <uapi/linux/sunrpc/debug.h> - -/* - * Enable RPC debugging/profiling. - */ -#ifdef CONFIG_SUNRPC_DEBUG -#define RPC_DEBUG -#endif -#ifdef CONFIG_TRACEPOINTS -#define RPC_TRACEPOINTS -#endif -/* #define RPC_PROFILE */ - /* * Debugging macros etc */ -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) extern unsigned int rpc_debug; extern unsigned int nfs_debug; extern unsigned int nfsd_debug; @@ -36,7 +24,7 @@ extern unsigned int nlm_debug; #define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) #undef ifdebug -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) # define dfprintk(fac, args...) \ @@ -65,9 +53,55 @@ extern unsigned int nlm_debug; /* * Sysctl interface for RPC debugging */ -#ifdef RPC_DEBUG + +struct rpc_clnt; +struct rpc_xprt; + +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) void rpc_register_sysctl(void); void rpc_unregister_sysctl(void); +int sunrpc_debugfs_init(void); +void sunrpc_debugfs_exit(void); +int rpc_clnt_debugfs_register(struct rpc_clnt *); +void rpc_clnt_debugfs_unregister(struct rpc_clnt *); +int rpc_xprt_debugfs_register(struct rpc_xprt *); +void rpc_xprt_debugfs_unregister(struct rpc_xprt *); +#else +static inline int +sunrpc_debugfs_init(void) +{ + return 0; +} + +static inline void +sunrpc_debugfs_exit(void) +{ + return; +} + +static inline int +rpc_clnt_debugfs_register(struct rpc_clnt *clnt) +{ + return 0; +} + +static inline void +rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt) +{ + return; +} + +static inline int +rpc_xprt_debugfs_register(struct rpc_xprt *xprt) +{ + return 0; +} + +static inline void +rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt) +{ + return; +} #endif #endif /* _LINUX_SUNRPC_DEBUG_H_ */ diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h index 1565bbe86d51..eecb5a71e6c0 100644 --- a/include/linux/sunrpc/metrics.h +++ b/include/linux/sunrpc/metrics.h @@ -27,10 +27,13 @@ #include <linux/seq_file.h> #include <linux/ktime.h> +#include <linux/spinlock.h> #define RPC_IOSTATS_VERS "1.0" struct rpc_iostats { + spinlock_t om_lock; + /* * These counters give an idea about how many request * transmissions are required, on average, to complete that diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 1a8959944c5f..5f1e6bd4c316 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -79,7 +79,7 @@ struct rpc_task { unsigned short tk_flags; /* misc flags */ unsigned short tk_timeouts; /* maj timeouts */ -#if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) unsigned short tk_pid; /* debugging aid */ #endif unsigned char tk_priority : 2,/* Task priority */ @@ -187,7 +187,7 @@ struct rpc_wait_queue { unsigned char nr; /* # tasks remaining for cookie */ unsigned short qlen; /* total # tasks waiting in queue */ struct rpc_timer timer_list; -#if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) const char * name; #endif }; @@ -237,7 +237,7 @@ void rpc_free(void *); int rpciod_up(void); void rpciod_down(void); int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct net; void rpc_show_tasks(struct net *); #endif @@ -251,7 +251,7 @@ static inline int rpc_wait_for_completion_task(struct rpc_task *task) return __rpc_wait_for_completion_task(task, NULL); } -#if defined(RPC_DEBUG) || defined (RPC_TRACEPOINTS) +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS) static inline const char * rpc_qname(const struct rpc_wait_queue *q) { return ((q && q->name) ? q->name : "unknown"); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index cf391eef2e6d..9d27ac45b909 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -239,6 +239,9 @@ struct rpc_xprt { struct net *xprt_net; const char *servername; const char *address_strings[RPC_DISPLAY_MAX]; +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) + struct dentry *debugfs; /* debugfs directory */ +#endif }; #if defined(CONFIG_SUNRPC_BACKCHANNEL) diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 1ad36cc25b2e..7591788e9fbf 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -17,6 +17,65 @@ void cleanup_socket_xprt(void); #define RPC_DEF_MIN_RESVPORT (665U) #define RPC_DEF_MAX_RESVPORT (1023U) +struct sock_xprt { + struct rpc_xprt xprt; + + /* + * Network layer + */ + struct socket * sock; + struct sock * inet; + + /* + * State of TCP reply receive + */ + __be32 tcp_fraghdr, + tcp_xid, + tcp_calldir; + + u32 tcp_offset, + tcp_reclen; + + unsigned long tcp_copied, + tcp_flags; + + /* + * Connection of transports + */ + struct delayed_work connect_worker; + struct sockaddr_storage srcaddr; + unsigned short srcport; + + /* + * UDP socket buffer size parameters + */ + size_t rcvsize, + sndsize; + + /* + * Saved socket callback addresses + */ + void (*old_data_ready)(struct sock *); + void (*old_state_change)(struct sock *); + void (*old_write_space)(struct sock *); + void (*old_error_report)(struct sock *); +}; + +/* + * TCP receive state flags + */ +#define TCP_RCV_LAST_FRAG (1UL << 0) +#define TCP_RCV_COPY_FRAGHDR (1UL << 1) +#define TCP_RCV_COPY_XID (1UL << 2) +#define TCP_RCV_COPY_DATA (1UL << 3) +#define TCP_RCV_READ_CALLDIR (1UL << 4) +#define TCP_RCV_COPY_CALLDIR (1UL << 5) + +/* + * TCP RPC flags + */ +#define TCP_RPC_REPLY (1UL << 6) + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 37a585beef5c..34e8b60ab973 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -102,14 +102,6 @@ union swap_header { } info; }; - /* A swap entry has to fit into a "unsigned long", as - * the entry is hidden in the "index" field of the - * swapper address space. - */ -typedef struct { - unsigned long val; -} swp_entry_t; - /* * current->reclaim_state points to one of these when a task is running * memory reclaim diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h new file mode 100644 index 000000000000..145306bdc92f --- /dev/null +++ b/include/linux/swap_cgroup.h @@ -0,0 +1,42 @@ +#ifndef __LINUX_SWAP_CGROUP_H +#define __LINUX_SWAP_CGROUP_H + +#include <linux/swap.h> + +#ifdef CONFIG_MEMCG_SWAP + +extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, + unsigned short old, unsigned short new); +extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); +extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); +extern int swap_cgroup_swapon(int type, unsigned long max_pages); +extern void swap_cgroup_swapoff(int type); + +#else + +static inline +unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) +{ + return 0; +} + +static inline +unsigned short lookup_swap_cgroup_id(swp_entry_t ent) +{ + return 0; +} + +static inline int +swap_cgroup_swapon(int type, unsigned long max_pages) +{ + return 0; +} + +static inline void swap_cgroup_swapoff(int type) +{ + return; +} + +#endif /* CONFIG_MEMCG_SWAP */ + +#endif /* __LINUX_SWAP_CGROUP_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index bda9b81357cc..85893d744901 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -25,7 +25,7 @@ struct linux_dirent64; struct list_head; struct mmap_arg_struct; struct msgbuf; -struct msghdr; +struct user_msghdr; struct mmsghdr; struct msqid_ds; struct new_utsname; @@ -601,13 +601,13 @@ asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); asmlinkage long sys_send(int, void __user *, size_t, unsigned); asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, struct sockaddr __user *, int); -asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); +asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags); asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, unsigned int vlen, unsigned flags); asmlinkage long sys_recv(int, void __user *, size_t, unsigned); asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, struct sockaddr __user *, int __user *); -asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); +asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags); asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, unsigned int vlen, unsigned flags, struct timespec __user *timeout); @@ -877,4 +877,9 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags, asmlinkage long sys_getrandom(char __user *buf, size_t count, unsigned int flags); asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size); + +asmlinkage long sys_execveat(int dfd, const char __user *filename, + const char __user *const __user *argv, + const char __user *const __user *envp, int flags); + #endif diff --git a/include/linux/syslog.h b/include/linux/syslog.h index 98a3153c0f96..4b7b875a7ce1 100644 --- a/include/linux/syslog.h +++ b/include/linux/syslog.h @@ -49,4 +49,13 @@ int do_syslog(int type, char __user *buf, int count, bool from_file); +#ifdef CONFIG_PRINTK +int check_syslog_permissions(int type, bool from_file); +#else +static inline int check_syslog_permissions(int type, bool from_file) +{ + return 0; +} +#endif + #endif /* _LINUX_SYSLOG_H */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index c2dee7deefa8..67309ece0772 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -130,7 +130,7 @@ struct tcp_sock { /* inet_connection_sock has to be the first member of tcp_sock */ struct inet_connection_sock inet_conn; u16 tcp_header_len; /* Bytes of tcp header to send */ - u16 xmit_size_goal_segs; /* Goal for segmenting output packets */ + u16 gso_segs; /* Max number of segs per GSO packet */ /* * Header prediction flags @@ -162,7 +162,7 @@ struct tcp_sock { struct { struct sk_buff_head prequeue; struct task_struct *task; - struct iovec *iov; + struct msghdr *msg; int memory; int len; } ucopy; @@ -204,10 +204,10 @@ struct tcp_sock { u16 urg_data; /* Saved octet of OOB data and control flags */ u8 ecn_flags; /* ECN status bits. */ - u8 reordering; /* Packet reordering metric. */ + u8 keepalive_probes; /* num of allowed keep alive probes */ + u32 reordering; /* Packet reordering metric. */ u32 snd_up; /* Urgent pointer */ - u8 keepalive_probes; /* num of allowed keep alive probes */ /* * Options received (usually on last packet, some only on SYN packets). */ diff --git a/include/linux/time.h b/include/linux/time.h index 8c42cf8d2444..203c2ad40d71 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -39,9 +39,20 @@ static inline int timeval_compare(const struct timeval *lhs, const struct timeva return lhs->tv_usec - rhs->tv_usec; } -extern unsigned long mktime(const unsigned int year, const unsigned int mon, - const unsigned int day, const unsigned int hour, - const unsigned int min, const unsigned int sec); +extern time64_t mktime64(const unsigned int year, const unsigned int mon, + const unsigned int day, const unsigned int hour, + const unsigned int min, const unsigned int sec); + +/** + * Deprecated. Use mktime64(). + */ +static inline unsigned long mktime(const unsigned int year, + const unsigned int mon, const unsigned int day, + const unsigned int hour, const unsigned int min, + const unsigned int sec) +{ + return mktime64(year, mon, day, hour, min, sec); +} extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 95640dcd1899..05af9a334893 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -42,6 +42,7 @@ struct tk_read_base { * struct timekeeper - Structure holding internal timekeeping values. * @tkr: The readout base structure * @xtime_sec: Current CLOCK_REALTIME time in seconds + * @ktime_sec: Current CLOCK_MONOTONIC time in seconds * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset * @offs_real: Offset clock monotonic -> clock realtime * @offs_boot: Offset clock monotonic -> clock boottime @@ -77,6 +78,7 @@ struct tk_read_base { struct timekeeper { struct tk_read_base tkr; u64 xtime_sec; + unsigned long ktime_sec; struct timespec64 wall_to_monotonic; ktime_t offs_real; ktime_t offs_boot; diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 1caa6b04fdc5..9b63d13ba82b 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -10,7 +10,7 @@ extern int timekeeping_suspended; * Get and set timeofday */ extern void do_gettimeofday(struct timeval *tv); -extern int do_settimeofday(const struct timespec *tv); +extern int do_settimeofday64(const struct timespec64 *ts); extern int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz); @@ -25,14 +25,24 @@ struct timespec __current_kernel_time(void); /* * timespec based interfaces */ -struct timespec get_monotonic_coarse(void); -extern void getrawmonotonic(struct timespec *ts); +struct timespec64 get_monotonic_coarse64(void); +extern void getrawmonotonic64(struct timespec64 *ts); extern void ktime_get_ts64(struct timespec64 *ts); +extern time64_t ktime_get_seconds(void); +extern time64_t ktime_get_real_seconds(void); extern int __getnstimeofday64(struct timespec64 *tv); extern void getnstimeofday64(struct timespec64 *tv); #if BITS_PER_LONG == 64 +/** + * Deprecated. Use do_settimeofday64(). + */ +static inline int do_settimeofday(const struct timespec *ts) +{ + return do_settimeofday64(ts); +} + static inline int __getnstimeofday(struct timespec *ts) { return __getnstimeofday64(ts); @@ -53,7 +63,27 @@ static inline void ktime_get_real_ts(struct timespec *ts) getnstimeofday64(ts); } +static inline void getrawmonotonic(struct timespec *ts) +{ + getrawmonotonic64(ts); +} + +static inline struct timespec get_monotonic_coarse(void) +{ + return get_monotonic_coarse64(); +} #else +/** + * Deprecated. Use do_settimeofday64(). + */ +static inline int do_settimeofday(const struct timespec *ts) +{ + struct timespec64 ts64; + + ts64 = timespec_to_timespec64(*ts); + return do_settimeofday64(&ts64); +} + static inline int __getnstimeofday(struct timespec *ts) { struct timespec64 ts64; @@ -86,6 +116,19 @@ static inline void ktime_get_real_ts(struct timespec *ts) getnstimeofday64(&ts64); *ts = timespec64_to_timespec(ts64); } + +static inline void getrawmonotonic(struct timespec *ts) +{ + struct timespec64 ts64; + + getrawmonotonic64(&ts64); + *ts = timespec64_to_timespec(ts64); +} + +static inline struct timespec get_monotonic_coarse(void) +{ + return timespec64_to_timespec(get_monotonic_coarse64()); +} #endif extern void getboottime(struct timespec *ts); @@ -182,7 +225,7 @@ static inline void timekeeping_clocktai(struct timespec *ts) /* * RTC specific */ -extern void timekeeping_inject_sleeptime(struct timespec *delta); +extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); /* * PPS accessor diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index ea6c9dea79e3..cfaf5a1d4bad 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -1,7 +1,7 @@ #ifndef _LINUX_TRACE_SEQ_H #define _LINUX_TRACE_SEQ_H -#include <linux/fs.h> +#include <linux/seq_buf.h> #include <asm/page.h> @@ -12,20 +12,36 @@ struct trace_seq { unsigned char buffer[PAGE_SIZE]; - unsigned int len; - unsigned int readpos; + struct seq_buf seq; int full; }; static inline void trace_seq_init(struct trace_seq *s) { - s->len = 0; - s->readpos = 0; + seq_buf_init(&s->seq, s->buffer, PAGE_SIZE); s->full = 0; } /** + * trace_seq_used - amount of actual data written to buffer + * @s: trace sequence descriptor + * + * Returns the amount of data written to the buffer. + * + * IMPORTANT! + * + * Use this instead of @s->seq.len if you need to pass the amount + * of data from the buffer to another buffer (userspace, or what not). + * The @s->seq.len on overflow is bigger than the buffer size and + * using it can cause access to undefined memory. + */ +static inline int trace_seq_used(struct trace_seq *s) +{ + return seq_buf_used(&s->seq); +} + +/** * trace_seq_buffer_ptr - return pointer to next location in buffer * @s: trace sequence descriptor * @@ -37,7 +53,19 @@ trace_seq_init(struct trace_seq *s) static inline unsigned char * trace_seq_buffer_ptr(struct trace_seq *s) { - return s->buffer + s->len; + return s->buffer + seq_buf_used(&s->seq); +} + +/** + * trace_seq_has_overflowed - return true if the trace_seq took too much + * @s: trace sequence descriptor + * + * Returns true if too much data was added to the trace_seq and it is + * now full and will not take anymore. + */ +static inline bool trace_seq_has_overflowed(struct trace_seq *s) +{ + return s->full || seq_buf_has_overflowed(&s->seq); } /* @@ -45,40 +73,37 @@ trace_seq_buffer_ptr(struct trace_seq *s) */ #ifdef CONFIG_TRACING extern __printf(2, 3) -int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); +void trace_seq_printf(struct trace_seq *s, const char *fmt, ...); extern __printf(2, 0) -int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); -extern int +void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); +extern void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt); -extern int trace_seq_puts(struct trace_seq *s, const char *str); -extern int trace_seq_putc(struct trace_seq *s, unsigned char c); -extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); -extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, +extern void trace_seq_puts(struct trace_seq *s, const char *str); +extern void trace_seq_putc(struct trace_seq *s, unsigned char c); +extern void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); +extern void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, unsigned int len); extern int trace_seq_path(struct trace_seq *s, const struct path *path); -extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, +extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, int nmaskbits); #else /* CONFIG_TRACING */ -static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) +static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) { - return 0; } -static inline int +static inline void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) { - return 0; } -static inline int +static inline void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, int nmaskbits) { - return 0; } static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) @@ -90,23 +115,19 @@ static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, { return 0; } -static inline int trace_seq_puts(struct trace_seq *s, const char *str) +static inline void trace_seq_puts(struct trace_seq *s, const char *str) { - return 0; } -static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) +static inline void trace_seq_putc(struct trace_seq *s, unsigned char c) { - return 0; } -static inline int +static inline void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) { - return 0; } -static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, +static inline void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, unsigned int len) { - return 0; } static inline int trace_seq_path(struct trace_seq *s, const struct path *path) { diff --git a/include/linux/uio.h b/include/linux/uio.h index 9b1581414cd4..a41e252396c0 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -31,6 +31,7 @@ struct iov_iter { size_t count; union { const struct iovec *iov; + const struct kvec *kvec; const struct bio_vec *bvec; }; unsigned long nr_segs; @@ -82,10 +83,13 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i); size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i); +size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i); size_t iov_iter_zero(size_t bytes, struct iov_iter *); unsigned long iov_iter_alignment(const struct iov_iter *i); void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, unsigned long nr_segs, size_t count); +void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *iov, + unsigned long nr_segs, size_t count); ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start); ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, @@ -123,9 +127,10 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) { i->count = count; } +size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); +size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i); int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); -int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, int offset, int len); int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, diff --git a/include/linux/usb.h b/include/linux/usb.h index 447a7e2fc19b..f89c24a03bd9 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -637,7 +637,7 @@ static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index) #endif /* USB autosuspend and autoresume */ -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM extern void usb_enable_autosuspend(struct usb_device *udev); extern void usb_disable_autosuspend(struct usb_device *udev); diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index cd96a2bc3388..668898e29d0e 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -93,7 +93,7 @@ struct usb_hcd { struct timer_list rh_timer; /* drives root-hub polling */ struct urb *status_urb; /* the current status urb */ -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM struct work_struct wakeup_work; /* for remote wakeup */ #endif @@ -625,16 +625,13 @@ extern int usb_find_interface_driver(struct usb_device *dev, extern void usb_root_hub_lost_power(struct usb_device *rhdev); extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); -#endif /* CONFIG_PM */ - -#ifdef CONFIG_PM_RUNTIME extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); #else static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) { return; } -#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h index a4c9547aae64..f8e76e08ebe4 100644 --- a/include/linux/vexpress.h +++ b/include/linux/vexpress.h @@ -15,8 +15,6 @@ #define _LINUX_VEXPRESS_H #include <linux/device.h> -#include <linux/platform_device.h> -#include <linux/reboot.h> #include <linux/regmap.h> #define VEXPRESS_SITE_MB 0 @@ -24,13 +22,6 @@ #define VEXPRESS_SITE_DB2 2 #define VEXPRESS_SITE_MASTER 0xf -#define VEXPRESS_RES_FUNC(_site, _func) \ -{ \ - .start = (_site), \ - .end = (_func), \ - .flags = IORESOURCE_BUS, \ -} - /* Config infrastructure */ void vexpress_config_set_master(u32 site); @@ -58,16 +49,6 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev); /* Platform control */ -unsigned int vexpress_get_mci_cardin(struct device *dev); -u32 vexpress_get_procid(int site); -void *vexpress_get_24mhz_clock_base(void); void vexpress_flags_set(u32 data); -void vexpress_sysreg_early_init(void __iomem *base); -int vexpress_syscfg_device_register(struct platform_device *pdev); - -/* Clocks */ - -void vexpress_clk_init(void __iomem *sp810_base); - #endif diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 65261a7244fc..d09e0938fd60 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -75,6 +75,9 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq); bool virtqueue_is_broken(struct virtqueue *vq); +void *virtqueue_get_avail(struct virtqueue *vq); +void *virtqueue_get_used(struct virtqueue *vq); + /** * virtio_device - representation of a device using virtio * @index: unique position on the virtio bus @@ -101,11 +104,12 @@ struct virtio_device { const struct virtio_config_ops *config; const struct vringh_config_ops *vringh_config; struct list_head vqs; - /* Note that this is a Linux set_bit-style bitmap. */ - unsigned long features[1]; + u64 features; void *priv; }; +bool virtio_device_is_legacy_only(struct virtio_device_id id); + static inline struct virtio_device *dev_to_virtio(struct device *_dev) { return container_of(_dev, struct virtio_device, dev); @@ -128,6 +132,8 @@ int virtio_device_restore(struct virtio_device *dev); * @id_table: the ids serviced by this driver. * @feature_table: an array of feature numbers supported by this driver. * @feature_table_size: number of entries in the feature table array. + * @feature_table_legacy: same as feature_table but when working in legacy mode. + * @feature_table_size_legacy: number of entries in feature table legacy array. * @probe: the function to call when a device is found. Returns 0 or -errno. * @remove: the function to call when a device is removed. * @config_changed: optional function to call when the device configuration @@ -138,6 +144,8 @@ struct virtio_driver { const struct virtio_device_id *id_table; const unsigned int *feature_table; unsigned int feature_table_size; + const unsigned int *feature_table_legacy; + unsigned int feature_table_size_legacy; int (*probe)(struct virtio_device *dev); void (*scan)(struct virtio_device *dev); void (*remove)(struct virtio_device *dev); diff --git a/include/linux/virtio_byteorder.h b/include/linux/virtio_byteorder.h new file mode 100644 index 000000000000..51865d05b267 --- /dev/null +++ b/include/linux/virtio_byteorder.h @@ -0,0 +1,59 @@ +#ifndef _LINUX_VIRTIO_BYTEORDER_H +#define _LINUX_VIRTIO_BYTEORDER_H +#include <linux/types.h> +#include <uapi/linux/virtio_types.h> + +/* + * Low-level memory accessors for handling virtio in modern little endian and in + * compatibility native endian format. + */ + +static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val) +{ + if (little_endian) + return le16_to_cpu((__force __le16)val); + else + return (__force u16)val; +} + +static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val) +{ + if (little_endian) + return (__force __virtio16)cpu_to_le16(val); + else + return (__force __virtio16)val; +} + +static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val) +{ + if (little_endian) + return le32_to_cpu((__force __le32)val); + else + return (__force u32)val; +} + +static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val) +{ + if (little_endian) + return (__force __virtio32)cpu_to_le32(val); + else + return (__force __virtio32)val; +} + +static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val) +{ + if (little_endian) + return le64_to_cpu((__force __le64)val); + else + return (__force u64)val; +} + +static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val) +{ + if (little_endian) + return (__force __virtio64)cpu_to_le64(val); + else + return (__force __virtio64)val; +} + +#endif /* _LINUX_VIRTIO_BYTEORDER */ diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 7f4ef66873ef..7979f850e7ac 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -4,6 +4,7 @@ #include <linux/err.h> #include <linux/bug.h> #include <linux/virtio.h> +#include <linux/virtio_byteorder.h> #include <uapi/linux/virtio_config.h> /** @@ -46,6 +47,7 @@ * vdev: the virtio_device * This gives the final feature bits for the device: it can change * the dev->feature bits if it wants. + * Returns 0 on success or error status * @bus_name: return the bus name associated with the device * vdev: the virtio_device * This returns a pointer to the bus name a la pci_name from which @@ -66,8 +68,8 @@ struct virtio_config_ops { vq_callback_t *callbacks[], const char *names[]); void (*del_vqs)(struct virtio_device *); - u32 (*get_features)(struct virtio_device *vdev); - void (*finalize_features)(struct virtio_device *vdev); + u64 (*get_features)(struct virtio_device *vdev); + int (*finalize_features)(struct virtio_device *vdev); const char *(*bus_name)(struct virtio_device *vdev); int (*set_vq_affinity)(struct virtqueue *vq, int cpu); }; @@ -77,23 +79,70 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev, unsigned int fbit); /** - * virtio_has_feature - helper to determine if this device has this feature. + * __virtio_test_bit - helper to test feature bits. For use by transports. + * Devices should normally use virtio_has_feature, + * which includes more checks. * @vdev: the device * @fbit: the feature bit */ -static inline bool virtio_has_feature(const struct virtio_device *vdev, +static inline bool __virtio_test_bit(const struct virtio_device *vdev, + unsigned int fbit) +{ + /* Did you forget to fix assumptions on max features? */ + if (__builtin_constant_p(fbit)) + BUILD_BUG_ON(fbit >= 64); + else + BUG_ON(fbit >= 64); + + return vdev->features & BIT_ULL(fbit); +} + +/** + * __virtio_set_bit - helper to set feature bits. For use by transports. + * @vdev: the device + * @fbit: the feature bit + */ +static inline void __virtio_set_bit(struct virtio_device *vdev, + unsigned int fbit) +{ + /* Did you forget to fix assumptions on max features? */ + if (__builtin_constant_p(fbit)) + BUILD_BUG_ON(fbit >= 64); + else + BUG_ON(fbit >= 64); + + vdev->features |= BIT_ULL(fbit); +} + +/** + * __virtio_clear_bit - helper to clear feature bits. For use by transports. + * @vdev: the device + * @fbit: the feature bit + */ +static inline void __virtio_clear_bit(struct virtio_device *vdev, unsigned int fbit) { /* Did you forget to fix assumptions on max features? */ if (__builtin_constant_p(fbit)) - BUILD_BUG_ON(fbit >= 32); + BUILD_BUG_ON(fbit >= 64); else - BUG_ON(fbit >= 32); + BUG_ON(fbit >= 64); + vdev->features &= ~BIT_ULL(fbit); +} + +/** + * virtio_has_feature - helper to determine if this device has this feature. + * @vdev: the device + * @fbit: the feature bit + */ +static inline bool virtio_has_feature(const struct virtio_device *vdev, + unsigned int fbit) +{ if (fbit < VIRTIO_TRANSPORT_F_START) virtio_check_driver_offered_feature(vdev, fbit); - return test_bit(fbit, vdev->features); + return __virtio_test_bit(vdev, fbit); } static inline @@ -152,6 +201,37 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu) return 0; } +/* Memory accessors */ +static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val) +{ + return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val) +{ + return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val) +{ + return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val) +{ + return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val) +{ + return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + +static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) +{ + return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); +} + /* Config space accessors. */ #define virtio_cread(vdev, structname, member, ptr) \ do { \ @@ -239,12 +319,13 @@ static inline u16 virtio_cread16(struct virtio_device *vdev, { u16 ret; vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return ret; + return virtio16_to_cpu(vdev, (__force __virtio16)ret); } static inline void virtio_cwrite16(struct virtio_device *vdev, unsigned int offset, u16 val) { + val = (__force u16)cpu_to_virtio16(vdev, val); vdev->config->set(vdev, offset, &val, sizeof(val)); } @@ -253,12 +334,13 @@ static inline u32 virtio_cread32(struct virtio_device *vdev, { u32 ret; vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return ret; + return virtio32_to_cpu(vdev, (__force __virtio32)ret); } static inline void virtio_cwrite32(struct virtio_device *vdev, unsigned int offset, u32 val) { + val = (__force u32)cpu_to_virtio32(vdev, val); vdev->config->set(vdev, offset, &val, sizeof(val)); } @@ -267,12 +349,13 @@ static inline u64 virtio_cread64(struct virtio_device *vdev, { u64 ret; vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return ret; + return virtio64_to_cpu(vdev, (__force __virtio64)ret); } static inline void virtio_cwrite64(struct virtio_device *vdev, unsigned int offset, u64 val) { + val = (__force u64)cpu_to_virtio64(vdev, val); vdev->config->set(vdev, offset, &val, sizeof(val)); } diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 730334cdf037..9246d32dc973 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -90,6 +90,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_DEBUG_VM_VMACACHE VMACACHE_FIND_CALLS, VMACACHE_FIND_HITS, + VMACACHE_FULL_FLUSHES, #endif NR_VM_EVENT_ITEMS }; diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h index 023430e265fe..5691f752ce8f 100644 --- a/include/linux/vmw_vmci_api.h +++ b/include/linux/vmw_vmci_api.h @@ -24,6 +24,7 @@ #define VMCI_KERNEL_API_VERSION_2 2 #define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2 +struct msghdr; typedef void (vmci_device_shutdown_fn) (void *device_registration, void *user_data); @@ -75,8 +76,8 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size, ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, void *iov, size_t iov_size, int mode); ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, - void *iov, size_t iov_size, int mode); -ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, void *iov, size_t iov_size, + struct msghdr *msg, size_t iov_size, int mode); +ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size, int mode); #endif /* !__VMW_VMCI_API_H__ */ diff --git a/include/linux/wait.h b/include/linux/wait.h index e4a8eb9312ea..2232ed16635a 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -13,9 +13,12 @@ typedef struct __wait_queue wait_queue_t; typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); +/* __wait_queue::flags */ +#define WQ_FLAG_EXCLUSIVE 0x01 +#define WQ_FLAG_WOKEN 0x02 + struct __wait_queue { unsigned int flags; -#define WQ_FLAG_EXCLUSIVE 0x01 void *private; wait_queue_func_t func; struct list_head task_list; @@ -258,11 +261,37 @@ __out: __ret; \ */ #define wait_event(wq, condition) \ do { \ + might_sleep(); \ if (condition) \ break; \ __wait_event(wq, condition); \ } while (0) +#define __wait_event_freezable(wq, condition) \ + ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ + schedule(); try_to_freeze()) + +/** + * wait_event - sleep (or freeze) until a condition gets true + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * + * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute + * to system load) until the @condition evaluates to true. The + * @condition is checked each time the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + */ +#define wait_event_freezable(wq, condition) \ +({ \ + int __ret = 0; \ + might_sleep(); \ + if (!(condition)) \ + __ret = __wait_event_freezable(wq, condition); \ + __ret; \ +}) + #define __wait_event_timeout(wq, condition, timeout) \ ___wait_event(wq, ___wait_cond_timeout(condition), \ TASK_UNINTERRUPTIBLE, 0, timeout, \ @@ -290,11 +319,30 @@ do { \ #define wait_event_timeout(wq, condition, timeout) \ ({ \ long __ret = timeout; \ + might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_timeout(wq, condition, timeout); \ __ret; \ }) +#define __wait_event_freezable_timeout(wq, condition, timeout) \ + ___wait_event(wq, ___wait_cond_timeout(condition), \ + TASK_INTERRUPTIBLE, 0, timeout, \ + __ret = schedule_timeout(__ret); try_to_freeze()) + +/* + * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid + * increasing load and is freezable. + */ +#define wait_event_freezable_timeout(wq, condition, timeout) \ +({ \ + long __ret = timeout; \ + might_sleep(); \ + if (!___wait_cond_timeout(condition)) \ + __ret = __wait_event_freezable_timeout(wq, condition, timeout); \ + __ret; \ +}) + #define __wait_event_cmd(wq, condition, cmd1, cmd2) \ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ cmd1; schedule(); cmd2) @@ -315,6 +363,7 @@ do { \ */ #define wait_event_cmd(wq, condition, cmd1, cmd2) \ do { \ + might_sleep(); \ if (condition) \ break; \ __wait_event_cmd(wq, condition, cmd1, cmd2); \ @@ -342,6 +391,7 @@ do { \ #define wait_event_interruptible(wq, condition) \ ({ \ int __ret = 0; \ + might_sleep(); \ if (!(condition)) \ __ret = __wait_event_interruptible(wq, condition); \ __ret; \ @@ -375,6 +425,7 @@ do { \ #define wait_event_interruptible_timeout(wq, condition, timeout) \ ({ \ long __ret = timeout; \ + might_sleep(); \ if (!___wait_cond_timeout(condition)) \ __ret = __wait_event_interruptible_timeout(wq, \ condition, timeout); \ @@ -425,6 +476,7 @@ do { \ #define wait_event_hrtimeout(wq, condition, timeout) \ ({ \ int __ret = 0; \ + might_sleep(); \ if (!(condition)) \ __ret = __wait_event_hrtimeout(wq, condition, timeout, \ TASK_UNINTERRUPTIBLE); \ @@ -450,6 +502,7 @@ do { \ #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ ({ \ long __ret = 0; \ + might_sleep(); \ if (!(condition)) \ __ret = __wait_event_hrtimeout(wq, condition, timeout, \ TASK_INTERRUPTIBLE); \ @@ -463,12 +516,27 @@ do { \ #define wait_event_interruptible_exclusive(wq, condition) \ ({ \ int __ret = 0; \ + might_sleep(); \ if (!(condition)) \ __ret = __wait_event_interruptible_exclusive(wq, condition);\ __ret; \ }) +#define __wait_event_freezable_exclusive(wq, condition) \ + ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ + schedule(); try_to_freeze()) + +#define wait_event_freezable_exclusive(wq, condition) \ +({ \ + int __ret = 0; \ + might_sleep(); \ + if (!(condition)) \ + __ret = __wait_event_freezable_exclusive(wq, condition);\ + __ret; \ +}) + + #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ ({ \ int __ret = 0; \ @@ -637,6 +705,7 @@ do { \ #define wait_event_killable(wq, condition) \ ({ \ int __ret = 0; \ + might_sleep(); \ if (!(condition)) \ __ret = __wait_event_killable(wq, condition); \ __ret; \ @@ -830,6 +899,8 @@ void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int sta long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state); void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key); +long wait_woken(wait_queue_t *wait, unsigned mode, long timeout); +int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); @@ -886,6 +957,7 @@ extern int bit_wait_io_timeout(struct wait_bit_key *); static inline int wait_on_bit(void *word, int bit, unsigned mode) { + might_sleep(); if (!test_bit(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, @@ -910,6 +982,7 @@ wait_on_bit(void *word, int bit, unsigned mode) static inline int wait_on_bit_io(void *word, int bit, unsigned mode) { + might_sleep(); if (!test_bit(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, @@ -936,6 +1009,7 @@ wait_on_bit_io(void *word, int bit, unsigned mode) static inline int wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) { + might_sleep(); if (!test_bit(bit, word)) return 0; return out_of_line_wait_on_bit(word, bit, action, mode); @@ -963,6 +1037,7 @@ wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode static inline int wait_on_bit_lock(void *word, int bit, unsigned mode) { + might_sleep(); if (!test_and_set_bit(bit, word)) return 0; return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode); @@ -986,6 +1061,7 @@ wait_on_bit_lock(void *word, int bit, unsigned mode) static inline int wait_on_bit_lock_io(void *word, int bit, unsigned mode) { + might_sleep(); if (!test_and_set_bit(bit, word)) return 0; return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode); @@ -1011,6 +1087,7 @@ wait_on_bit_lock_io(void *word, int bit, unsigned mode) static inline int wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) { + might_sleep(); if (!test_and_set_bit(bit, word)) return 0; return out_of_line_wait_on_bit_lock(word, bit, action, mode); @@ -1029,6 +1106,7 @@ wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned static inline int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode) { + might_sleep(); if (atomic_read(val) == 0) return 0; return out_of_line_wait_on_atomic_t(val, action, mode); diff --git a/include/media/davinci/vpbe.h b/include/media/davinci/vpbe.h index 57585c7004a4..4376beeb28c2 100644 --- a/include/media/davinci/vpbe.h +++ b/include/media/davinci/vpbe.h @@ -63,7 +63,7 @@ struct vpbe_output { * output basis. If per mode is needed, we may have to move this to * mode_info structure */ - enum v4l2_mbus_pixelcode if_params; + u32 if_params; }; /* encoder configuration info */ diff --git a/include/media/davinci/vpbe_display.h b/include/media/davinci/vpbe_display.h index 637749a91432..fa0247ad815f 100644 --- a/include/media/davinci/vpbe_display.h +++ b/include/media/davinci/vpbe_display.h @@ -70,8 +70,6 @@ struct vpbe_disp_buffer { /* vpbe display object structure */ struct vpbe_layer { - /* number of buffers in fbuffers */ - unsigned int numbuffers; /* Pointer to the vpbe_display */ struct vpbe_display *disp_dev; /* Pointer pointing to current v4l2_buffer */ @@ -91,10 +89,6 @@ struct vpbe_layer { /* V4l2 specific parameters */ /* Identifies video device for this layer */ struct video_device video_dev; - /* This field keeps track of type of buffer exchange mechanism user - * has selected - */ - enum v4l2_memory memory; /* Used to store pixel format */ struct v4l2_pix_format pix_fmt; enum v4l2_field buf_field; @@ -106,12 +100,8 @@ struct vpbe_layer { unsigned char window_enable; /* number of open instances of the layer */ unsigned int usrs; - /* number of users performing IO */ - unsigned int io_usrs; /* Indicates id of the field which is being displayed */ unsigned int field_id; - /* Indicates whether streaming started */ - unsigned char started; /* Identifies device object */ enum vpbe_display_device_id device_id; /* facilitation of ioctl ops lock by v4l2*/ @@ -131,17 +121,6 @@ struct vpbe_display { struct osd_state *osd_device; }; -/* File handle structure */ -struct vpbe_fh { - struct v4l2_fh fh; - /* vpbe device structure */ - struct vpbe_display *disp_dev; - /* pointer to layer object for opened device */ - struct vpbe_layer *layer; - /* Indicates whether this file handle is doing IO */ - unsigned char io_allowed; -}; - struct buf_config_params { unsigned char min_numbuffers; unsigned char numbuffers[VPBE_DISPLAY_MAX_DEVICES]; diff --git a/include/media/davinci/vpbe_venc.h b/include/media/davinci/vpbe_venc.h index 476fafc2f522..3dbd20026107 100644 --- a/include/media/davinci/vpbe_venc.h +++ b/include/media/davinci/vpbe_venc.h @@ -30,11 +30,10 @@ #define VENC_SECOND_FIELD BIT(2) struct venc_platform_data { - int (*setup_pinmux)(enum v4l2_mbus_pixelcode if_type, - int field); + int (*setup_pinmux)(u32 if_type, int field); int (*setup_clock)(enum vpbe_enc_timings_type type, unsigned int pixclock); - int (*setup_if_config)(enum v4l2_mbus_pixelcode pixcode); + int (*setup_if_config)(u32 pixcode); /* Number of LCD outputs supported */ int num_lcd_outputs; struct vpbe_if_params *lcd_if_params; diff --git a/include/media/exynos-fimc.h b/include/media/exynos-fimc.h index aa44660e2041..69bcd2a07d5c 100644 --- a/include/media/exynos-fimc.h +++ b/include/media/exynos-fimc.h @@ -101,7 +101,7 @@ struct fimc_source_info { * @flags: flags indicating which operation mode format applies to */ struct fimc_fmt { - enum v4l2_mbus_pixelcode mbus_code; + u32 mbus_code; char *name; u32 fourcc; u32 color; diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h index 78f0637ca68d..05e7ad5d2c8b 100644 --- a/include/media/lirc_dev.h +++ b/include/media/lirc_dev.h @@ -29,14 +29,13 @@ struct lirc_buffer { /* Using chunks instead of bytes pretends to simplify boundary checking * And should allow for some performance fine tunning later */ struct kfifo fifo; - u8 fifo_initialized; }; static inline void lirc_buffer_clear(struct lirc_buffer *buf) { unsigned long flags; - if (buf->fifo_initialized) { + if (kfifo_initialized(&buf->fifo)) { spin_lock_irqsave(&buf->fifo_lock, flags); kfifo_reset(&buf->fifo); spin_unlock_irqrestore(&buf->fifo_lock, flags); @@ -56,17 +55,14 @@ static inline int lirc_buffer_init(struct lirc_buffer *buf, buf->chunk_size = chunk_size; buf->size = size; ret = kfifo_alloc(&buf->fifo, size * chunk_size, GFP_KERNEL); - if (ret == 0) - buf->fifo_initialized = 1; return ret; } static inline void lirc_buffer_free(struct lirc_buffer *buf) { - if (buf->fifo_initialized) { + if (kfifo_initialized(&buf->fifo)) { kfifo_free(&buf->fifo); - buf->fifo_initialized = 0; } else WARN(1, "calling %s on an uninitialized lirc_buffer\n", __func__); diff --git a/include/media/radio-si4713.h b/include/media/radio-si4713.h deleted file mode 100644 index f6aae29c7741..000000000000 --- a/include/media/radio-si4713.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * include/media/radio-si4713.h - * - * Board related data definitions for Si4713 radio transmitter chip. - * - * Copyright (c) 2009 Nokia Corporation - * Contact: Eduardo Valentin <eduardo.valentin@nokia.com> - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - * - */ - -#ifndef RADIO_SI4713_H -#define RADIO_SI4713_H - -#include <linux/i2c.h> - -#define SI4713_NAME "radio-si4713" - -/* - * Platform dependent definition - */ -struct radio_si4713_platform_data { - int i2c_bus; - struct i2c_board_info *subdev_board_info; -}; - -#endif /* ifndef RADIO_SI4713_H*/ diff --git a/include/media/si4713.h b/include/media/si4713.h index f98a0a7af61c..be4f58e2440b 100644 --- a/include/media/si4713.h +++ b/include/media/si4713.h @@ -23,9 +23,7 @@ * Platform dependent definition */ struct si4713_platform_data { - const char * const *supply_names; - unsigned supplies; - int gpio_reset; /* < 0 if not used */ + bool is_platform_device; }; /* diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h index 865246b00127..2f6261f3e570 100644 --- a/include/media/soc_camera.h +++ b/include/media/soc_camera.h @@ -296,7 +296,7 @@ const struct soc_camera_format_xlate *soc_camera_xlate_by_fourcc( * format setup. */ struct soc_camera_format_xlate { - enum v4l2_mbus_pixelcode code; + u32 code; const struct soc_mbus_pixelfmt *host_fmt; }; diff --git a/include/media/soc_mediabus.h b/include/media/soc_mediabus.h index d33f6d059692..2ff773785fb6 100644 --- a/include/media/soc_mediabus.h +++ b/include/media/soc_mediabus.h @@ -91,16 +91,16 @@ struct soc_mbus_pixelfmt { * @fmt: pixel format description */ struct soc_mbus_lookup { - enum v4l2_mbus_pixelcode code; + u32 code; struct soc_mbus_pixelfmt fmt; }; const struct soc_mbus_pixelfmt *soc_mbus_find_fmtdesc( - enum v4l2_mbus_pixelcode code, + u32 code, const struct soc_mbus_lookup *lookup, int n); const struct soc_mbus_pixelfmt *soc_mbus_get_fmtdesc( - enum v4l2_mbus_pixelcode code); + u32 code); s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf); s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf, u32 bytes_per_line, u32 height); diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index 48f974866f13..1cc0c5ba16b3 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -80,24 +80,9 @@ /* ------------------------------------------------------------------------- */ -/* Control helper functions */ +/* Control helper function */ -int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl, - const char * const *menu_items); -const char *v4l2_ctrl_get_name(u32 id); -const char * const *v4l2_ctrl_get_menu(u32 id); -const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len); int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def); -int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, - struct v4l2_queryctrl *qctrl, const char * const *menu_items); -#define V4L2_CTRL_MENU_IDS_END (0xffffffff) -int v4l2_ctrl_query_menu_valid_items(struct v4l2_querymenu *qmenu, const u32 *ids); - -/* Note: ctrl_classes points to an array of u32 pointers. Each u32 array is a - 0-terminated array of control IDs. Each array must be sorted low to high - and belong to the same control class. The array of u32 pointers must also - be sorted, from low class IDs to high class IDs. */ -u32 v4l2_ctrl_next(const u32 * const *ctrl_classes, u32 id); /* ------------------------------------------------------------------------- */ diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index b7cd7a665e35..911f3e542834 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -670,6 +670,31 @@ static inline int v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, */ void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv); +/** v4l2_ctrl_get_name() - Get the name of the control + * @id: The control ID. + * + * This function returns the name of the given control ID or NULL if it isn't + * a known control. + */ +const char *v4l2_ctrl_get_name(u32 id); + +/** v4l2_ctrl_get_menu() - Get the menu string array of the control + * @id: The control ID. + * + * This function returns the NULL-terminated menu string array name of the + * given control ID or NULL if it isn't a known menu control. + */ +const char * const *v4l2_ctrl_get_menu(u32 id); + +/** v4l2_ctrl_get_int_menu() - Get the integer menu array of the control + * @id: The control ID. + * @len: The size of the integer array. + * + * This function returns the integer array of the given control ID or NULL if it + * if it isn't a known integer menu control. + */ +const s64 *v4l2_ctrl_get_int_menu(u32 id, u32 *len); + /** v4l2_ctrl_g_ctrl() - Helper function to get the control's value from within a driver. * @ctrl: The control. * diff --git a/include/media/v4l2-image-sizes.h b/include/media/v4l2-image-sizes.h index 10daf92ff1ab..a07d7a683bd9 100644 --- a/include/media/v4l2-image-sizes.h +++ b/include/media/v4l2-image-sizes.h @@ -25,10 +25,19 @@ #define QVGA_WIDTH 320 #define QVGA_HEIGHT 240 +#define SVGA_WIDTH 800 +#define SVGA_HEIGHT 600 + #define SXGA_WIDTH 1280 #define SXGA_HEIGHT 1024 #define VGA_WIDTH 640 #define VGA_HEIGHT 480 +#define UXGA_WIDTH 1600 +#define UXGA_HEIGHT 1200 + +#define XGA_WIDTH 1024 +#define XGA_HEIGHT 768 + #endif /* _IMAGE_SIZES_H */ diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h index 395c4a95a42a..38d960d8dccd 100644 --- a/include/media/v4l2-mediabus.h +++ b/include/media/v4l2-mediabus.h @@ -94,16 +94,20 @@ static inline void v4l2_fill_pix_format(struct v4l2_pix_format *pix_fmt, pix_fmt->height = mbus_fmt->height; pix_fmt->field = mbus_fmt->field; pix_fmt->colorspace = mbus_fmt->colorspace; + pix_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc; + pix_fmt->quantization = mbus_fmt->quantization; } static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt, const struct v4l2_pix_format *pix_fmt, - enum v4l2_mbus_pixelcode code) + u32 code) { mbus_fmt->width = pix_fmt->width; mbus_fmt->height = pix_fmt->height; mbus_fmt->field = pix_fmt->field; mbus_fmt->colorspace = pix_fmt->colorspace; + mbus_fmt->ycbcr_enc = pix_fmt->ycbcr_enc; + mbus_fmt->quantization = pix_fmt->quantization; mbus_fmt->code = code; } diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index d7465725773d..5860292d42eb 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -341,7 +341,7 @@ struct v4l2_subdev_video_ops { int (*query_dv_timings)(struct v4l2_subdev *sd, struct v4l2_dv_timings *timings); int (*enum_mbus_fmt)(struct v4l2_subdev *sd, unsigned int index, - enum v4l2_mbus_pixelcode *code); + u32 *code); int (*enum_mbus_fsizes)(struct v4l2_subdev *sd, struct v4l2_frmsizeenum *fsize); int (*g_mbus_fmt)(struct v4l2_subdev *sd, diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 6ef2d01197da..bd2cec2d6c3d 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -82,19 +82,23 @@ struct vb2_threadio_data; * unmap_dmabuf. */ struct vb2_mem_ops { - void *(*alloc)(void *alloc_ctx, unsigned long size, gfp_t gfp_flags); + void *(*alloc)(void *alloc_ctx, unsigned long size, + enum dma_data_direction dma_dir, + gfp_t gfp_flags); void (*put)(void *buf_priv); struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags); void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr, - unsigned long size, int write); + unsigned long size, + enum dma_data_direction dma_dir); void (*put_userptr)(void *buf_priv); void (*prepare)(void *buf_priv); void (*finish)(void *buf_priv); void *(*attach_dmabuf)(void *alloc_ctx, struct dma_buf *dbuf, - unsigned long size, int write); + unsigned long size, + enum dma_data_direction dma_dir); void (*detach_dmabuf)(void *buf_priv); int (*map_dmabuf)(void *buf_priv); void (*unmap_dmabuf)(void *buf_priv); @@ -270,22 +274,24 @@ struct vb2_buffer { * queue setup from completing successfully; optional. * @buf_prepare: called every time the buffer is queued from userspace * and from the VIDIOC_PREPARE_BUF ioctl; drivers may - * perform any initialization required before each hardware - * operation in this callback; drivers that support - * VIDIOC_CREATE_BUFS must also validate the buffer size; - * if an error is returned, the buffer will not be queued - * in driver; optional. + * perform any initialization required before each + * hardware operation in this callback; drivers can + * access/modify the buffer here as it is still synced for + * the CPU; drivers that support VIDIOC_CREATE_BUFS must + * also validate the buffer size; if an error is returned, + * the buffer will not be queued in driver; optional. * @buf_finish: called before every dequeue of the buffer back to - * userspace; drivers may perform any operations required - * before userspace accesses the buffer; optional. The - * buffer state can be one of the following: DONE and - * ERROR occur while streaming is in progress, and the - * PREPARED state occurs when the queue has been canceled - * and all pending buffers are being returned to their - * default DEQUEUED state. Typically you only have to do - * something if the state is VB2_BUF_STATE_DONE, since in - * all other cases the buffer contents will be ignored - * anyway. + * userspace; the buffer is synced for the CPU, so drivers + * can access/modify the buffer contents; drivers may + * perform any operations required before userspace + * accesses the buffer; optional. The buffer state can be + * one of the following: DONE and ERROR occur while + * streaming is in progress, and the PREPARED state occurs + * when the queue has been canceled and all pending + * buffers are being returned to their default DEQUEUED + * state. Typically you only have to do something if the + * state is VB2_BUF_STATE_DONE, since in all other cases + * the buffer contents will be ignored anyway. * @buf_cleanup: called once before the buffer is freed; drivers may * perform any additional cleanup; optional. * @start_streaming: called once to enter 'streaming' state; the driver may diff --git a/include/media/videobuf2-dma-sg.h b/include/media/videobuf2-dma-sg.h index 7b89852779af..14ce3068b642 100644 --- a/include/media/videobuf2-dma-sg.h +++ b/include/media/videobuf2-dma-sg.h @@ -21,6 +21,9 @@ static inline struct sg_table *vb2_dma_sg_plane_desc( return (struct sg_table *)vb2_plane_cookie(vb, plane_no); } +void *vb2_dma_sg_init_ctx(struct device *dev); +void vb2_dma_sg_cleanup_ctx(void *alloc_ctx); + extern const struct vb2_mem_ops vb2_dma_sg_memops; #endif diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index d184df1d0d41..dc03d77ad23b 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h @@ -372,12 +372,12 @@ lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset) return skb->len + uncomp_header - ret; } -typedef int (*skb_delivery_cb)(struct sk_buff *skb, struct net_device *dev); - -int lowpan_process_data(struct sk_buff *skb, struct net_device *dev, - const u8 *saddr, const u8 saddr_type, const u8 saddr_len, - const u8 *daddr, const u8 daddr_type, const u8 daddr_len, - u8 iphc0, u8 iphc1, skb_delivery_cb skb_deliver); +int +lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev, + const u8 *saddr, const u8 saddr_type, + const u8 saddr_len, const u8 *daddr, + const u8 daddr_type, const u8 daddr_len, + u8 iphc0, u8 iphc1); int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *_daddr, const void *_saddr, unsigned int len); diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h index d9fa68f26c41..2a25dec30211 100644 --- a/include/net/9p/transport.h +++ b/include/net/9p/transport.h @@ -34,7 +34,6 @@ * @list: used to maintain a list of currently available transports * @name: the human-readable name of the transport * @maxsize: transport provided maximum packet size - * @pref: Preferences of this transport * @def: set if this transport should be considered the default * @create: member function to create a new connection on this transport * @close: member function to discard a connection on this transport diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h index 085940f7eeec..7d38e2ffd256 100644 --- a/include/net/af_ieee802154.h +++ b/include/net/af_ieee802154.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Sergey Lapin <slapin@ossfans.org> * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index 428277869400..0d87674fb775 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -103,14 +103,14 @@ struct vsock_transport { int (*dgram_dequeue)(struct kiocb *kiocb, struct vsock_sock *vsk, struct msghdr *msg, size_t len, int flags); int (*dgram_enqueue)(struct vsock_sock *, struct sockaddr_vm *, - struct iovec *, size_t len); + struct msghdr *, size_t len); bool (*dgram_allow)(u32 cid, u32 port); /* STREAM. */ /* TODO: stream_bind() */ - ssize_t (*stream_dequeue)(struct vsock_sock *, struct iovec *, + ssize_t (*stream_dequeue)(struct vsock_sock *, struct msghdr *, size_t len, int flags); - ssize_t (*stream_enqueue)(struct vsock_sock *, struct iovec *, + ssize_t (*stream_enqueue)(struct vsock_sock *, struct msghdr *, size_t len); s64 (*stream_has_data)(struct vsock_sock *); s64 (*stream_has_space)(struct vsock_sock *); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 6e8f24967308..40129b3838b2 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -129,6 +129,15 @@ enum { * during the hdev->setup vendor callback. */ HCI_QUIRK_INVALID_BDADDR, + + /* When this quirk is set, the duplicate filtering during + * scanning is based on Bluetooth devices addresses. To allow + * RSSI based updates, restart scanning if needed. + * + * This quirk can be set before hci_register_dev is called or + * during the hdev->setup vendor callback. + */ + HCI_QUIRK_STRICT_DUPLICATE_FILTER, }; /* HCI device flags */ @@ -154,6 +163,7 @@ enum { enum { HCI_DUT_MODE, HCI_FORCE_SC, + HCI_FORCE_LESC, HCI_FORCE_STATIC_ADDR, }; @@ -265,6 +275,7 @@ enum { /* Low Energy links do not have defined link type. Use invented one */ #define LE_LINK 0x80 #define AMP_LINK 0x81 +#define INVALID_LINK 0xff /* LMP features */ #define LMP_3SLOT 0x01 @@ -332,6 +343,7 @@ enum { #define HCI_LE_ENCRYPTION 0x01 #define HCI_LE_CONN_PARAM_REQ_PROC 0x02 #define HCI_LE_PING 0x10 +#define HCI_LE_EXT_SCAN_POLICY 0x80 /* Connection modes */ #define HCI_CM_ACTIVE 0x0000 @@ -401,6 +413,7 @@ enum { /* The core spec defines 127 as the "not available" value */ #define HCI_TX_POWER_INVALID 127 +#define HCI_RSSI_INVALID 127 #define HCI_ROLE_MASTER 0x00 #define HCI_ROLE_SLAVE 0x01 @@ -629,7 +642,7 @@ struct hci_cp_user_passkey_reply { struct hci_cp_remote_oob_data_reply { bdaddr_t bdaddr; __u8 hash[16]; - __u8 randomizer[16]; + __u8 rand[16]; } __packed; #define HCI_OP_REMOTE_OOB_DATA_NEG_REPLY 0x0433 @@ -721,9 +734,9 @@ struct hci_rp_set_csb { struct hci_cp_remote_oob_ext_data_reply { bdaddr_t bdaddr; __u8 hash192[16]; - __u8 randomizer192[16]; + __u8 rand192[16]; __u8 hash256[16]; - __u8 randomizer256[16]; + __u8 rand256[16]; } __packed; #define HCI_OP_SNIFF_MODE 0x0803 @@ -930,7 +943,7 @@ struct hci_cp_write_ssp_mode { struct hci_rp_read_local_oob_data { __u8 status; __u8 hash[16]; - __u8 randomizer[16]; + __u8 rand[16]; } __packed; #define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58 @@ -1014,9 +1027,9 @@ struct hci_cp_write_sc_support { struct hci_rp_read_local_oob_ext_data { __u8 status; __u8 hash192[16]; - __u8 randomizer192[16]; + __u8 rand192[16]; __u8 hash256[16]; - __u8 randomizer256[16]; + __u8 rand256[16]; } __packed; #define HCI_OP_READ_LOCAL_VERSION 0x1001 @@ -1463,6 +1476,11 @@ struct hci_ev_cmd_status { __le16 opcode; } __packed; +#define HCI_EV_HARDWARE_ERROR 0x10 +struct hci_ev_hardware_error { + __u8 code; +} __packed; + #define HCI_EV_ROLE_CHANGE 0x12 struct hci_ev_role_change { __u8 status; @@ -1734,6 +1752,25 @@ struct hci_ev_le_conn_complete { __u8 clk_accurancy; } __packed; +/* Advertising report event types */ +#define LE_ADV_IND 0x00 +#define LE_ADV_DIRECT_IND 0x01 +#define LE_ADV_SCAN_IND 0x02 +#define LE_ADV_NONCONN_IND 0x03 +#define LE_ADV_SCAN_RSP 0x04 + +#define ADDR_LE_DEV_PUBLIC 0x00 +#define ADDR_LE_DEV_RANDOM 0x01 + +#define HCI_EV_LE_ADVERTISING_REPORT 0x02 +struct hci_ev_le_advertising_info { + __u8 evt_type; + __u8 bdaddr_type; + bdaddr_t bdaddr; + __u8 length; + __u8 data[0]; +} __packed; + #define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03 struct hci_ev_le_conn_update_complete { __u8 status; @@ -1759,23 +1796,14 @@ struct hci_ev_le_remote_conn_param_req { __le16 timeout; } __packed; -/* Advertising report event types */ -#define LE_ADV_IND 0x00 -#define LE_ADV_DIRECT_IND 0x01 -#define LE_ADV_SCAN_IND 0x02 -#define LE_ADV_NONCONN_IND 0x03 -#define LE_ADV_SCAN_RSP 0x04 - -#define ADDR_LE_DEV_PUBLIC 0x00 -#define ADDR_LE_DEV_RANDOM 0x01 - -#define HCI_EV_LE_ADVERTISING_REPORT 0x02 -struct hci_ev_le_advertising_info { +#define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B +struct hci_ev_le_direct_adv_info { __u8 evt_type; __u8 bdaddr_type; bdaddr_t bdaddr; - __u8 length; - __u8 data[0]; + __u8 direct_addr_type; + bdaddr_t direct_addr; + __s8 rssi; } __packed; /* Internal events generated by Bluetooth stack */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 37ff1aef0845..3c7827005c25 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -75,6 +75,10 @@ struct discovery_state { u32 last_adv_flags; u8 last_adv_data[HCI_MAX_AD_LENGTH]; u8 last_adv_data_len; + bool report_invalid_rssi; + s8 rssi; + u16 uuid_count; + u8 (*uuids)[16]; }; struct hci_conn_hash { @@ -108,6 +112,7 @@ struct smp_csrk { struct smp_ltk { struct list_head list; + struct rcu_head rcu; bdaddr_t bdaddr; u8 bdaddr_type; u8 authenticated; @@ -120,6 +125,7 @@ struct smp_ltk { struct smp_irk { struct list_head list; + struct rcu_head rcu; bdaddr_t rpa; bdaddr_t bdaddr; u8 addr_type; @@ -128,6 +134,7 @@ struct smp_irk { struct link_key { struct list_head list; + struct rcu_head rcu; bdaddr_t bdaddr; u8 type; u8 val[HCI_LINK_KEY_SIZE]; @@ -137,10 +144,11 @@ struct link_key { struct oob_data { struct list_head list; bdaddr_t bdaddr; + u8 bdaddr_type; u8 hash192[16]; - u8 randomizer192[16]; + u8 rand192[16]; u8 hash256[16]; - u8 randomizer256[16]; + u8 rand256[16]; }; #define HCI_MAX_SHORT_NAME_LENGTH 10 @@ -303,6 +311,7 @@ struct hci_dev { __u32 req_result; void *smp_data; + void *smp_bredr_data; struct discovery_state discovery; struct hci_conn_hash conn_hash; @@ -398,6 +407,8 @@ struct hci_conn { __u16 le_conn_interval; __u16 le_conn_latency; __u16 le_supv_timeout; + __u8 le_adv_data[HCI_MAX_AD_LENGTH]; + __u8 le_adv_data_len; __s8 rssi; __s8 tx_power; __s8 max_tx_power; @@ -496,6 +507,17 @@ static inline void discovery_init(struct hci_dev *hdev) INIT_LIST_HEAD(&hdev->discovery.all); INIT_LIST_HEAD(&hdev->discovery.unknown); INIT_LIST_HEAD(&hdev->discovery.resolve); + hdev->discovery.report_invalid_rssi = true; + hdev->discovery.rssi = HCI_RSSI_INVALID; +} + +static inline void hci_discovery_filter_clear(struct hci_dev *hdev) +{ + hdev->discovery.report_invalid_rssi = true; + hdev->discovery.rssi = HCI_RSSI_INVALID; + hdev->discovery.uuid_count = 0; + kfree(hdev->discovery.uuids); + hdev->discovery.uuids = NULL; } bool hci_discovery_active(struct hci_dev *hdev); @@ -553,6 +575,8 @@ enum { HCI_CONN_STK_ENCRYPT, HCI_CONN_AUTH_INITIATOR, HCI_CONN_DROP, + HCI_CONN_PARAM_REMOVAL_PEND, + HCI_CONN_NEW_LINK_KEY, }; static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) @@ -643,6 +667,26 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev) return c->acl_num + c->amp_num + c->sco_num + c->le_num; } +static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + __u8 type = INVALID_LINK; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->handle == handle) { + type = c->type; + break; + } + } + + rcu_read_unlock(); + + return type; +} + static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, __u16 handle) { @@ -853,6 +897,7 @@ int hci_register_dev(struct hci_dev *hdev); void hci_unregister_dev(struct hci_dev *hdev); int hci_suspend_dev(struct hci_dev *hdev); int hci_resume_dev(struct hci_dev *hdev); +int hci_reset_dev(struct hci_dev *hdev); int hci_dev_open(__u16 dev); int hci_dev_close(__u16 dev); int hci_dev_reset(__u16 dev); @@ -894,13 +939,11 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len, bool *persistent); -struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand, - u8 role); struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, u8 authenticated, u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand); -struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 addr_type, u8 role); +struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, u8 role); int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); void hci_smp_ltks_clear(struct hci_dev *hdev); int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); @@ -915,13 +958,12 @@ void hci_smp_irks_clear(struct hci_dev *hdev); void hci_remote_oob_data_clear(struct hci_dev *hdev); struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, - bdaddr_t *bdaddr); + bdaddr_t *bdaddr, u8 bdaddr_type); int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 *hash, u8 *randomizer); -int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 *hash192, u8 *randomizer192, - u8 *hash256, u8 *randomizer256); -int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr); + u8 bdaddr_type, u8 *hash192, u8 *rand192, + u8 *hash256, u8 *rand256); +int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 bdaddr_type); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); @@ -972,6 +1014,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \ !test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) +#define bredr_sc_enabled(dev) ((lmp_sc_capable(dev) || \ + test_bit(HCI_FORCE_SC, &(dev)->dbg_flags)) && \ + test_bit(HCI_SC_ENABLED, &(dev)->dev_flags)) /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 @@ -1310,9 +1355,8 @@ int mgmt_update_adv_data(struct hci_dev *hdev); void mgmt_discoverable_timeout(struct hci_dev *hdev); void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent); -void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, - u8 addr_type, u32 flags, u8 *name, u8 name_len, - u8 *dev_class); +void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, + u32 flags, u8 *name, u8 name_len); void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 reason, bool mgmt_connected); @@ -1349,8 +1393,8 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, u8 status); void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192, - u8 *randomizer192, u8 *hash256, - u8 *randomizer256, u8 status); + u8 *rand192, u8 *hash256, u8 *rand256, + u8 status); void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len); diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index ead99f032f7a..d1bb342d083f 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -28,6 +28,7 @@ #define __L2CAP_H #include <asm/unaligned.h> +#include <linux/atomic.h> /* L2CAP defaults */ #define L2CAP_DEFAULT_MTU 672 @@ -140,6 +141,7 @@ struct l2cap_conninfo { #define L2CAP_FC_ATT 0x10 #define L2CAP_FC_SIG_LE 0x20 #define L2CAP_FC_SMP_LE 0x40 +#define L2CAP_FC_SMP_BREDR 0x80 /* L2CAP Control Field bit masks */ #define L2CAP_CTRL_SAR 0xC000 @@ -254,6 +256,7 @@ struct l2cap_conn_rsp { #define L2CAP_CID_ATT 0x0004 #define L2CAP_CID_LE_SIGNALING 0x0005 #define L2CAP_CID_SMP 0x0006 +#define L2CAP_CID_SMP_BREDR 0x0007 #define L2CAP_CID_DYN_START 0x0040 #define L2CAP_CID_DYN_END 0xffff #define L2CAP_CID_LE_DYN_END 0x007f @@ -481,6 +484,7 @@ struct l2cap_chan { struct hci_conn *hs_hcon; struct hci_chan *hs_hchan; struct kref kref; + atomic_t nesting; __u8 state; @@ -604,10 +608,6 @@ struct l2cap_ops { struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb); - int (*memcpy_fromiovec) (struct l2cap_chan *chan, - unsigned char *kdata, - struct iovec *iov, - int len); }; struct l2cap_conn { @@ -617,8 +617,8 @@ struct l2cap_conn { unsigned int mtu; __u32 feat_mask; - __u8 fixed_chan_mask; - bool hs_enabled; + __u8 remote_fixed_chan; + __u8 local_fixed_chan; __u8 info_state; __u8 info_ident; @@ -713,6 +713,17 @@ enum { FLAG_HOLD_HCI_CONN, }; +/* Lock nesting levels for L2CAP channels. We need these because lockdep + * otherwise considers all channels equal and will e.g. complain about a + * connection oriented channel triggering SMP procedures or a listening + * channel creating and locking a child channel. + */ +enum { + L2CAP_NESTING_SMP, + L2CAP_NESTING_NORMAL, + L2CAP_NESTING_PARENT, +}; + enum { L2CAP_TX_STATE_XMIT, L2CAP_TX_STATE_WAIT_F, @@ -778,7 +789,7 @@ void l2cap_chan_put(struct l2cap_chan *c); static inline void l2cap_chan_lock(struct l2cap_chan *chan) { - mutex_lock(&chan->lock); + mutex_lock_nested(&chan->lock, atomic_read(&chan->nesting)); } static inline void l2cap_chan_unlock(struct l2cap_chan *chan) @@ -890,31 +901,6 @@ static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan) return 0; } -static inline int l2cap_chan_no_memcpy_fromiovec(struct l2cap_chan *chan, - unsigned char *kdata, - struct iovec *iov, - int len) -{ - /* Following is safe since for compiler definitions of kvec and - * iovec are identical, yielding the same in-core layout and alignment - */ - struct kvec *vec = (struct kvec *)iov; - - while (len > 0) { - if (vec->iov_len) { - int copy = min_t(unsigned int, len, vec->iov_len); - memcpy(kdata, vec->iov_base, copy); - len -= copy; - kdata += copy; - vec->iov_base += copy; - vec->iov_len -= copy; - } - vec++; - } - - return 0; -} - extern bool disable_ertm; int l2cap_init_sockets(void); diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 414cd2f9a437..95c34d5180fa 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -184,6 +184,9 @@ struct mgmt_cp_load_link_keys { #define MGMT_LTK_UNAUTHENTICATED 0x00 #define MGMT_LTK_AUTHENTICATED 0x01 +#define MGMT_LTK_P256_UNAUTH 0x02 +#define MGMT_LTK_P256_AUTH 0x03 +#define MGMT_LTK_P256_DEBUG 0x04 struct mgmt_ltk_info { struct mgmt_addr_info addr; @@ -299,28 +302,28 @@ struct mgmt_cp_user_passkey_neg_reply { #define MGMT_READ_LOCAL_OOB_DATA_SIZE 0 struct mgmt_rp_read_local_oob_data { __u8 hash[16]; - __u8 randomizer[16]; + __u8 rand[16]; } __packed; struct mgmt_rp_read_local_oob_ext_data { __u8 hash192[16]; - __u8 randomizer192[16]; + __u8 rand192[16]; __u8 hash256[16]; - __u8 randomizer256[16]; + __u8 rand256[16]; } __packed; #define MGMT_OP_ADD_REMOTE_OOB_DATA 0x0021 struct mgmt_cp_add_remote_oob_data { struct mgmt_addr_info addr; __u8 hash[16]; - __u8 randomizer[16]; + __u8 rand[16]; } __packed; #define MGMT_ADD_REMOTE_OOB_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 32) struct mgmt_cp_add_remote_oob_ext_data { struct mgmt_addr_info addr; __u8 hash192[16]; - __u8 randomizer192[16]; + __u8 rand192[16]; __u8 hash256[16]; - __u8 randomizer256[16]; + __u8 rand256[16]; } __packed; #define MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 64) @@ -495,6 +498,15 @@ struct mgmt_cp_set_public_address { } __packed; #define MGMT_SET_PUBLIC_ADDRESS_SIZE 6 +#define MGMT_OP_START_SERVICE_DISCOVERY 0x003A +struct mgmt_cp_start_service_discovery { + __u8 type; + __s8 rssi; + __le16 uuid_count; + __u8 uuids[0][16]; +} __packed; +#define MGMT_START_SERVICE_DISCOVERY_SIZE 4 + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h new file mode 100644 index 000000000000..e01d903633ef --- /dev/null +++ b/include/net/bond_3ad.h @@ -0,0 +1,283 @@ +/* + * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + */ + +#ifndef _NET_BOND_3AD_H +#define _NET_BOND_3AD_H + +#include <asm/byteorder.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/if_ether.h> + +/* General definitions */ +#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW) +#define AD_TIMER_INTERVAL 100 /*msec*/ + +#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02} + +#define AD_LACP_SLOW 0 +#define AD_LACP_FAST 1 + +typedef struct mac_addr { + u8 mac_addr_value[ETH_ALEN]; +} __packed mac_addr_t; + +enum { + BOND_AD_STABLE = 0, + BOND_AD_BANDWIDTH = 1, + BOND_AD_COUNT = 2, +}; + +/* rx machine states(43.4.11 in the 802.3ad standard) */ +typedef enum { + AD_RX_DUMMY, + AD_RX_INITIALIZE, /* rx Machine */ + AD_RX_PORT_DISABLED, /* rx Machine */ + AD_RX_LACP_DISABLED, /* rx Machine */ + AD_RX_EXPIRED, /* rx Machine */ + AD_RX_DEFAULTED, /* rx Machine */ + AD_RX_CURRENT /* rx Machine */ +} rx_states_t; + +/* periodic machine states(43.4.12 in the 802.3ad standard) */ +typedef enum { + AD_PERIODIC_DUMMY, + AD_NO_PERIODIC, /* periodic machine */ + AD_FAST_PERIODIC, /* periodic machine */ + AD_SLOW_PERIODIC, /* periodic machine */ + AD_PERIODIC_TX /* periodic machine */ +} periodic_states_t; + +/* mux machine states(43.4.13 in the 802.3ad standard) */ +typedef enum { + AD_MUX_DUMMY, + AD_MUX_DETACHED, /* mux machine */ + AD_MUX_WAITING, /* mux machine */ + AD_MUX_ATTACHED, /* mux machine */ + AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */ +} mux_states_t; + +/* tx machine states(43.4.15 in the 802.3ad standard) */ +typedef enum { + AD_TX_DUMMY, + AD_TRANSMIT /* tx Machine */ +} tx_states_t; + +/* rx indication types */ +typedef enum { + AD_TYPE_LACPDU = 1, /* type lacpdu */ + AD_TYPE_MARKER /* type marker */ +} pdu_type_t; + +/* rx marker indication types */ +typedef enum { + AD_MARKER_INFORMATION_SUBTYPE = 1, /* marker imformation subtype */ + AD_MARKER_RESPONSE_SUBTYPE /* marker response subtype */ +} bond_marker_subtype_t; + +/* timers types(43.4.9 in the 802.3ad standard) */ +typedef enum { + AD_CURRENT_WHILE_TIMER, + AD_ACTOR_CHURN_TIMER, + AD_PERIODIC_TIMER, + AD_PARTNER_CHURN_TIMER, + AD_WAIT_WHILE_TIMER +} ad_timers_t; + +#pragma pack(1) + +/* Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) */ +typedef struct lacpdu { + u8 subtype; /* = LACP(= 0x01) */ + u8 version_number; + u8 tlv_type_actor_info; /* = actor information(type/length/value) */ + u8 actor_information_length; /* = 20 */ + __be16 actor_system_priority; + struct mac_addr actor_system; + __be16 actor_key; + __be16 actor_port_priority; + __be16 actor_port; + u8 actor_state; + u8 reserved_3_1[3]; /* = 0 */ + u8 tlv_type_partner_info; /* = partner information */ + u8 partner_information_length; /* = 20 */ + __be16 partner_system_priority; + struct mac_addr partner_system; + __be16 partner_key; + __be16 partner_port_priority; + __be16 partner_port; + u8 partner_state; + u8 reserved_3_2[3]; /* = 0 */ + u8 tlv_type_collector_info; /* = collector information */ + u8 collector_information_length;/* = 16 */ + __be16 collector_max_delay; + u8 reserved_12[12]; + u8 tlv_type_terminator; /* = terminator */ + u8 terminator_length; /* = 0 */ + u8 reserved_50[50]; /* = 0 */ +} __packed lacpdu_t; + +typedef struct lacpdu_header { + struct ethhdr hdr; + struct lacpdu lacpdu; +} __packed lacpdu_header_t; + +/* Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) */ +typedef struct bond_marker { + u8 subtype; /* = 0x02 (marker PDU) */ + u8 version_number; /* = 0x01 */ + u8 tlv_type; /* = 0x01 (marker information) */ + /* = 0x02 (marker response information) */ + u8 marker_length; /* = 0x16 */ + u16 requester_port; /* The number assigned to the port by the requester */ + struct mac_addr requester_system; /* The requester's system id */ + u32 requester_transaction_id; /* The transaction id allocated by the requester, */ + u16 pad; /* = 0 */ + u8 tlv_type_terminator; /* = 0x00 */ + u8 terminator_length; /* = 0x00 */ + u8 reserved_90[90]; /* = 0 */ +} __packed bond_marker_t; + +typedef struct bond_marker_header { + struct ethhdr hdr; + struct bond_marker marker; +} __packed bond_marker_header_t; + +#pragma pack() + +struct slave; +struct bonding; +struct ad_info; +struct port; + +#ifdef __ia64__ +#pragma pack(8) +#endif + +/* aggregator structure(43.4.5 in the 802.3ad standard) */ +typedef struct aggregator { + struct mac_addr aggregator_mac_address; + u16 aggregator_identifier; + bool is_individual; + u16 actor_admin_aggregator_key; + u16 actor_oper_aggregator_key; + struct mac_addr partner_system; + u16 partner_system_priority; + u16 partner_oper_aggregator_key; + u16 receive_state; /* BOOLEAN */ + u16 transmit_state; /* BOOLEAN */ + struct port *lag_ports; + /* ****** PRIVATE PARAMETERS ****** */ + struct slave *slave; /* pointer to the bond slave that this aggregator belongs to */ + u16 is_active; /* BOOLEAN. Indicates if this aggregator is active */ + u16 num_of_ports; +} aggregator_t; + +struct port_params { + struct mac_addr system; + u16 system_priority; + u16 key; + u16 port_number; + u16 port_priority; + u16 port_state; +}; + +/* port structure(43.4.6 in the 802.3ad standard) */ +typedef struct port { + u16 actor_port_number; + u16 actor_port_priority; + struct mac_addr actor_system; /* This parameter is added here although it is not specified in the standard, just for simplification */ + u16 actor_system_priority; /* This parameter is added here although it is not specified in the standard, just for simplification */ + u16 actor_port_aggregator_identifier; + bool ntt; + u16 actor_admin_port_key; + u16 actor_oper_port_key; + u8 actor_admin_port_state; + u8 actor_oper_port_state; + + struct port_params partner_admin; + struct port_params partner_oper; + + bool is_enabled; + + /* ****** PRIVATE PARAMETERS ****** */ + u16 sm_vars; /* all state machines variables for this port */ + rx_states_t sm_rx_state; /* state machine rx state */ + u16 sm_rx_timer_counter; /* state machine rx timer counter */ + periodic_states_t sm_periodic_state; /* state machine periodic state */ + u16 sm_periodic_timer_counter; /* state machine periodic timer counter */ + mux_states_t sm_mux_state; /* state machine mux state */ + u16 sm_mux_timer_counter; /* state machine mux timer counter */ + tx_states_t sm_tx_state; /* state machine tx state */ + u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */ + struct slave *slave; /* pointer to the bond slave that this port belongs to */ + struct aggregator *aggregator; /* pointer to an aggregator that this port related to */ + struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */ + u32 transaction_id; /* continuous number for identification of Marker PDU's; */ + struct lacpdu lacpdu; /* the lacpdu that will be sent for this port */ +} port_t; + +/* system structure */ +struct ad_system { + u16 sys_priority; + struct mac_addr sys_mac_addr; +}; + +#ifdef __ia64__ +#pragma pack() +#endif + +/* ========== AD Exported structures to the main bonding code ========== */ +#define BOND_AD_INFO(bond) ((bond)->ad_info) +#define SLAVE_AD_INFO(slave) ((slave)->ad_info) + +struct ad_bond_info { + struct ad_system system; /* 802.3ad system structure */ + u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */ + u16 aggregator_identifier; +}; + +struct ad_slave_info { + struct aggregator aggregator; /* 802.3ad aggregator structure */ + struct port port; /* 802.3ad port structure */ + u16 id; +}; + +/* ========== AD Exported functions to the main bonding code ========== */ +void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution); +void bond_3ad_bind_slave(struct slave *slave); +void bond_3ad_unbind_slave(struct slave *slave); +void bond_3ad_state_machine_handler(struct work_struct *); +void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout); +void bond_3ad_adapter_speed_changed(struct slave *slave); +void bond_3ad_adapter_duplex_changed(struct slave *slave); +void bond_3ad_handle_link_change(struct slave *slave, char link); +int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); +int __bond_3ad_get_active_agg_info(struct bonding *bond, + struct ad_info *ad_info); +int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); +int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, + struct slave *slave); +int bond_3ad_set_carrier(struct bonding *bond); +void bond_3ad_update_lacp_rate(struct bonding *bond); +#endif /* _NET_BOND_3AD_H */ + diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h new file mode 100644 index 000000000000..313a8d3b3069 --- /dev/null +++ b/include/net/bond_alb.h @@ -0,0 +1,181 @@ +/* + * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + */ + +#ifndef _NET_BOND_ALB_H +#define _NET_BOND_ALB_H + +#include <linux/if_ether.h> + +struct bonding; +struct slave; + +#define BOND_ALB_INFO(bond) ((bond)->alb_info) +#define SLAVE_TLB_INFO(slave) ((slave)->tlb_info) + +#define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */ +#define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing. + * Used for division - never set + * to zero !!! + */ +#define BOND_ALB_DEFAULT_LP_INTERVAL 1 +#define BOND_ALB_LP_INTERVAL(bond) (bond->params.lp_interval) /* In seconds, periodic send of + * learning packets to the switch + */ + +#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \ + * ALB_TIMER_TICKS_PER_SEC) + +#define BOND_ALB_LP_TICKS(bond) (BOND_ALB_LP_INTERVAL(bond) \ + * ALB_TIMER_TICKS_PER_SEC) + +#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table. + * Note that this value MUST NOT be smaller + * because the key hash table is BYTE wide ! + */ + + +#define TLB_NULL_INDEX 0xffffffff + +/* rlb defs */ +#define RLB_HASH_TABLE_SIZE 256 +#define RLB_NULL_INDEX 0xffffffff +#define RLB_UPDATE_DELAY (2*ALB_TIMER_TICKS_PER_SEC) /* 2 seconds */ +#define RLB_ARP_BURST_SIZE 2 +#define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb + * rebalance interval (5 min). + */ +/* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is + * promiscuous after failover + */ +#define RLB_PROMISC_TIMEOUT (10*ALB_TIMER_TICKS_PER_SEC) + + +struct tlb_client_info { + struct slave *tx_slave; /* A pointer to slave used for transmiting + * packets to a Client that the Hash function + * gave this entry index. + */ + u32 tx_bytes; /* Each Client accumulates the BytesTx that + * were transmitted to it, and after each + * CallBack the LoadHistory is divided + * by the balance interval + */ + u32 load_history; /* This field contains the amount of Bytes + * that were transmitted to this client by + * the server on the previous balance + * interval in Bps. + */ + u32 next; /* The next Hash table entry index, assigned + * to use the same adapter for transmit. + */ + u32 prev; /* The previous Hash table entry index, + * assigned to use the same + */ +}; + +/* ------------------------------------------------------------------------- + * struct rlb_client_info contains all info related to a specific rx client + * connection. This is the Clients Hash Table entry struct. + * Note that this is not a proper hash table; if a new client's IP address + * hash collides with an existing client entry, the old entry is replaced. + * + * There is a linked list (linked by the used_next and used_prev members) + * linking all the used entries of the hash table. This allows updating + * all the clients without walking over all the unused elements of the table. + * + * There are also linked lists of entries with identical hash(ip_src). These + * allow cleaning up the table from ip_src<->mac_src associations that have + * become outdated and would cause sending out invalid ARP updates to the + * network. These are linked by the (src_next and src_prev members). + * ------------------------------------------------------------------------- + */ +struct rlb_client_info { + __be32 ip_src; /* the server IP address */ + __be32 ip_dst; /* the client IP address */ + u8 mac_src[ETH_ALEN]; /* the server MAC address */ + u8 mac_dst[ETH_ALEN]; /* the client MAC address */ + + /* list of used hash table entries, starting at rx_hashtbl_used_head */ + u32 used_next; + u32 used_prev; + + /* ip_src based hashing */ + u32 src_next; /* next entry with same hash(ip_src) */ + u32 src_prev; /* prev entry with same hash(ip_src) */ + u32 src_first; /* first entry with hash(ip_src) == this entry's index */ + + u8 assigned; /* checking whether this entry is assigned */ + u8 ntt; /* flag - need to transmit client info */ + struct slave *slave; /* the slave assigned to this client */ + unsigned short vlan_id; /* VLAN tag associated with IP address */ +}; + +struct tlb_slave_info { + u32 head; /* Index to the head of the bi-directional clients + * hash table entries list. The entries in the list + * are the entries that were assigned to use this + * slave for transmit. + */ + u32 load; /* Each slave sums the loadHistory of all clients + * assigned to it + */ +}; + +struct alb_bond_info { + struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ + u32 unbalanced_load; + int tx_rebalance_counter; + int lp_counter; + /* -------- rlb parameters -------- */ + int rlb_enabled; + struct rlb_client_info *rx_hashtbl; /* Receive hash table */ + u32 rx_hashtbl_used_head; + u8 rx_ntt; /* flag - need to transmit + * to all rx clients + */ + struct slave *rx_slave;/* last slave to xmit from */ + u8 primary_is_promisc; /* boolean */ + u32 rlb_promisc_timeout_counter;/* counts primary + * promiscuity time + */ + u32 rlb_update_delay_counter; + u32 rlb_update_retry_counter;/* counter of retries + * of client update + */ + u8 rlb_rebalance; /* flag - indicates that the + * rx traffic should be + * rebalanced + */ +}; + +int bond_alb_initialize(struct bonding *bond, int rlb_enabled); +void bond_alb_deinitialize(struct bonding *bond); +int bond_alb_init_slave(struct bonding *bond, struct slave *slave); +void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave); +void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link); +void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave); +int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev); +int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev); +void bond_alb_monitor(struct work_struct *); +int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr); +void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id); +#endif /* _NET_BOND_ALB_H */ + diff --git a/include/net/bond_options.h b/include/net/bond_options.h new file mode 100644 index 000000000000..ea6546d2c946 --- /dev/null +++ b/include/net/bond_options.h @@ -0,0 +1,130 @@ +/* + * drivers/net/bond/bond_options.h - bonding options + * Copyright (c) 2013 Nikolay Aleksandrov <nikolay@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _NET_BOND_OPTIONS_H +#define _NET_BOND_OPTIONS_H + +#define BOND_OPT_MAX_NAMELEN 32 +#define BOND_OPT_VALID(opt) ((opt) < BOND_OPT_LAST) +#define BOND_MODE_ALL_EX(x) (~(x)) + +/* Option flags: + * BOND_OPTFLAG_NOSLAVES - check if the bond device is empty before setting + * BOND_OPTFLAG_IFDOWN - check if the bond device is down before setting + * BOND_OPTFLAG_RAWVAL - the option parses the value itself + */ +enum { + BOND_OPTFLAG_NOSLAVES = BIT(0), + BOND_OPTFLAG_IFDOWN = BIT(1), + BOND_OPTFLAG_RAWVAL = BIT(2) +}; + +/* Value type flags: + * BOND_VALFLAG_DEFAULT - mark the value as default + * BOND_VALFLAG_(MIN|MAX) - mark the value as min/max + */ +enum { + BOND_VALFLAG_DEFAULT = BIT(0), + BOND_VALFLAG_MIN = BIT(1), + BOND_VALFLAG_MAX = BIT(2) +}; + +/* Option IDs, their bit positions correspond to their IDs */ +enum { + BOND_OPT_MODE, + BOND_OPT_PACKETS_PER_SLAVE, + BOND_OPT_XMIT_HASH, + BOND_OPT_ARP_VALIDATE, + BOND_OPT_ARP_ALL_TARGETS, + BOND_OPT_FAIL_OVER_MAC, + BOND_OPT_ARP_INTERVAL, + BOND_OPT_ARP_TARGETS, + BOND_OPT_DOWNDELAY, + BOND_OPT_UPDELAY, + BOND_OPT_LACP_RATE, + BOND_OPT_MINLINKS, + BOND_OPT_AD_SELECT, + BOND_OPT_NUM_PEER_NOTIF, + BOND_OPT_MIIMON, + BOND_OPT_PRIMARY, + BOND_OPT_PRIMARY_RESELECT, + BOND_OPT_USE_CARRIER, + BOND_OPT_ACTIVE_SLAVE, + BOND_OPT_QUEUE_ID, + BOND_OPT_ALL_SLAVES_ACTIVE, + BOND_OPT_RESEND_IGMP, + BOND_OPT_LP_INTERVAL, + BOND_OPT_SLAVES, + BOND_OPT_TLB_DYNAMIC_LB, + BOND_OPT_LAST +}; + +/* This structure is used for storing option values and for passing option + * values when changing an option. The logic when used as an arg is as follows: + * - if string != NULL -> parse it, if the opt is RAW type then return it, else + * return the parse result + * - if string == NULL -> parse value + */ +struct bond_opt_value { + char *string; + u64 value; + u32 flags; +}; + +struct bonding; + +struct bond_option { + int id; + const char *name; + const char *desc; + u32 flags; + + /* unsuppmodes is used to denote modes in which the option isn't + * supported. + */ + unsigned long unsuppmodes; + /* supported values which this option can have, can be a subset of + * BOND_OPTVAL_RANGE's value range + */ + const struct bond_opt_value *values; + + int (*set)(struct bonding *bond, const struct bond_opt_value *val); +}; + +int __bond_opt_set(struct bonding *bond, unsigned int option, + struct bond_opt_value *val); +int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf); + +const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt, + struct bond_opt_value *val); +const struct bond_option *bond_opt_get(unsigned int option); +const struct bond_option *bond_opt_get_by_name(const char *name); +const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val); + +/* This helper is used to initialize a bond_opt_value structure for parameter + * passing. There should be either a valid string or value, but not both. + * When value is ULLONG_MAX then string will be used. + */ +static inline void __bond_opt_init(struct bond_opt_value *optval, + char *string, u64 value) +{ + memset(optval, 0, sizeof(*optval)); + optval->value = ULLONG_MAX; + if (value == ULLONG_MAX) + optval->string = string; + else + optval->value = value; +} +#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value) +#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX) + +void bond_option_arp_ip_targets_clear(struct bonding *bond); + +#endif /* _NET_BOND_OPTIONS_H */ diff --git a/include/net/bonding.h b/include/net/bonding.h new file mode 100644 index 000000000000..983a94b86b95 --- /dev/null +++ b/include/net/bonding.h @@ -0,0 +1,654 @@ +/* + * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'. + * + * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes + * NCM: Network and Communications Management, Inc. + * + * BUT, I'm the one who modified it for ethernet, so: + * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov + * + * This software may be used and distributed according to the terms + * of the GNU Public License, incorporated herein by reference. + * + */ + +#ifndef _NET_BONDING_H +#define _NET_BONDING_H + +#include <linux/timer.h> +#include <linux/proc_fs.h> +#include <linux/if_bonding.h> +#include <linux/cpumask.h> +#include <linux/in6.h> +#include <linux/netpoll.h> +#include <linux/inetdevice.h> +#include <linux/etherdevice.h> +#include <linux/reciprocal_div.h> +#include <linux/if_link.h> + +#include <net/bond_3ad.h> +#include <net/bond_alb.h> +#include <net/bond_options.h> + +#define DRV_VERSION "3.7.1" +#define DRV_RELDATE "April 27, 2011" +#define DRV_NAME "bonding" +#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" + +#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" + +#define BOND_MAX_ARP_TARGETS 16 + +#define BOND_DEFAULT_MIIMON 100 + +/* + * Less bad way to call ioctl from within the kernel; this needs to be + * done some other way to get the call out of interrupt context. + * Needs "ioctl" variable to be supplied by calling context. + */ +#define IOCTL(dev, arg, cmd) ({ \ + int res = 0; \ + mm_segment_t fs = get_fs(); \ + set_fs(get_ds()); \ + res = ioctl(dev, arg, cmd); \ + set_fs(fs); \ + res; }) + +#define BOND_MODE(bond) ((bond)->params.mode) + +/* slave list primitives */ +#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower) + +#define bond_has_slaves(bond) !list_empty(bond_slave_list(bond)) + +/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */ +#define bond_first_slave(bond) \ + (bond_has_slaves(bond) ? \ + netdev_adjacent_get_private(bond_slave_list(bond)->next) : \ + NULL) +#define bond_last_slave(bond) \ + (bond_has_slaves(bond) ? \ + netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \ + NULL) + +/* Caller must have rcu_read_lock */ +#define bond_first_slave_rcu(bond) \ + netdev_lower_get_first_private_rcu(bond->dev) + +#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond)) +#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond)) + +/** + * bond_for_each_slave - iterate over all slaves + * @bond: the bond holding this list + * @pos: current slave + * @iter: list_head * iterator + * + * Caller must hold RTNL + */ +#define bond_for_each_slave(bond, pos, iter) \ + netdev_for_each_lower_private((bond)->dev, pos, iter) + +/* Caller must have rcu_read_lock */ +#define bond_for_each_slave_rcu(bond, pos, iter) \ + netdev_for_each_lower_private_rcu((bond)->dev, pos, iter) + +#ifdef CONFIG_NET_POLL_CONTROLLER +extern atomic_t netpoll_block_tx; + +static inline void block_netpoll_tx(void) +{ + atomic_inc(&netpoll_block_tx); +} + +static inline void unblock_netpoll_tx(void) +{ + atomic_dec(&netpoll_block_tx); +} + +static inline int is_netpoll_tx_blocked(struct net_device *dev) +{ + if (unlikely(netpoll_tx_running(dev))) + return atomic_read(&netpoll_block_tx); + return 0; +} +#else +#define block_netpoll_tx() +#define unblock_netpoll_tx() +#define is_netpoll_tx_blocked(dev) (0) +#endif + +struct bond_params { + int mode; + int xmit_policy; + int miimon; + u8 num_peer_notif; + int arp_interval; + int arp_validate; + int arp_all_targets; + int use_carrier; + int fail_over_mac; + int updelay; + int downdelay; + int lacp_fast; + unsigned int min_links; + int ad_select; + char primary[IFNAMSIZ]; + int primary_reselect; + __be32 arp_targets[BOND_MAX_ARP_TARGETS]; + int tx_queues; + int all_slaves_active; + int resend_igmp; + int lp_interval; + int packets_per_slave; + int tlb_dynamic_lb; + struct reciprocal_value reciprocal_packets_per_slave; +}; + +struct bond_parm_tbl { + char *modename; + int mode; +}; + +struct slave { + struct net_device *dev; /* first - useful for panic debug */ + struct bonding *bond; /* our master */ + int delay; + /* all three in jiffies */ + unsigned long last_link_up; + unsigned long last_rx; + unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS]; + s8 link; /* one of BOND_LINK_XXXX */ + s8 new_link; + u8 backup:1, /* indicates backup slave. Value corresponds with + BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ + inactive:1, /* indicates inactive slave */ + should_notify:1; /* indicateds whether the state changed */ + u8 duplex; + u32 original_mtu; + u32 link_failure_count; + u32 speed; + u16 queue_id; + u8 perm_hwaddr[ETH_ALEN]; + struct ad_slave_info *ad_info; + struct tlb_slave_info tlb_info; +#ifdef CONFIG_NET_POLL_CONTROLLER + struct netpoll *np; +#endif + struct kobject kobj; + struct rtnl_link_stats64 slave_stats; +}; + +struct bond_up_slave { + unsigned int count; + struct rcu_head rcu; + struct slave *arr[0]; +}; + +/* + * Link pseudo-state only used internally by monitors + */ +#define BOND_LINK_NOCHANGE -1 + +/* + * Here are the locking policies for the two bonding locks: + * Get rcu_read_lock when reading or RTNL when writing slave list. + */ +struct bonding { + struct net_device *dev; /* first - useful for panic debug */ + struct slave __rcu *curr_active_slave; + struct slave __rcu *current_arp_slave; + struct slave __rcu *primary_slave; + struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */ + bool force_primary; + s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ + int (*recv_probe)(const struct sk_buff *, struct bonding *, + struct slave *); + /* mode_lock is used for mode-specific locking needs, currently used by: + * 3ad mode (4) - protect against running bond_3ad_unbind_slave() and + * bond_3ad_state_machine_handler() concurrently and also + * the access to the state machine shared variables. + * TLB mode (5) - to sync the use and modifications of its hash table + * ALB mode (6) - to sync the use and modifications of its hash table + */ + spinlock_t mode_lock; + u8 send_peer_notif; + u8 igmp_retrans; +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *proc_entry; + char proc_file_name[IFNAMSIZ]; +#endif /* CONFIG_PROC_FS */ + struct list_head bond_list; + u32 rr_tx_counter; + struct ad_bond_info ad_info; + struct alb_bond_info alb_info; + struct bond_params params; + struct workqueue_struct *wq; + struct delayed_work mii_work; + struct delayed_work arp_work; + struct delayed_work alb_work; + struct delayed_work ad_work; + struct delayed_work mcast_work; + struct delayed_work slave_arr_work; +#ifdef CONFIG_DEBUG_FS + /* debugging support via debugfs */ + struct dentry *debug_dir; +#endif /* CONFIG_DEBUG_FS */ + struct rtnl_link_stats64 bond_stats; +}; + +#define bond_slave_get_rcu(dev) \ + ((struct slave *) rcu_dereference(dev->rx_handler_data)) + +#define bond_slave_get_rtnl(dev) \ + ((struct slave *) rtnl_dereference(dev->rx_handler_data)) + +struct bond_vlan_tag { + __be16 vlan_proto; + unsigned short vlan_id; +}; + +/** + * Returns NULL if the net_device does not belong to any of the bond's slaves + * + * Caller must hold bond lock for read + */ +static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, + struct net_device *slave_dev) +{ + return netdev_lower_dev_get_private(bond->dev, slave_dev); +} + +static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) +{ + return slave->bond; +} + +static inline bool bond_should_override_tx_queue(struct bonding *bond) +{ + return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || + BOND_MODE(bond) == BOND_MODE_ROUNDROBIN; +} + +static inline bool bond_is_lb(const struct bonding *bond) +{ + return BOND_MODE(bond) == BOND_MODE_TLB || + BOND_MODE(bond) == BOND_MODE_ALB; +} + +static inline bool bond_is_nondyn_tlb(const struct bonding *bond) +{ + return (BOND_MODE(bond) == BOND_MODE_TLB) && + (bond->params.tlb_dynamic_lb == 0); +} + +static inline bool bond_mode_uses_xmit_hash(const struct bonding *bond) +{ + return (BOND_MODE(bond) == BOND_MODE_8023AD || + BOND_MODE(bond) == BOND_MODE_XOR || + bond_is_nondyn_tlb(bond)); +} + +static inline bool bond_mode_uses_arp(int mode) +{ + return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB && + mode != BOND_MODE_ALB; +} + +static inline bool bond_mode_uses_primary(int mode) +{ + return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB || + mode == BOND_MODE_ALB; +} + +static inline bool bond_uses_primary(struct bonding *bond) +{ + return bond_mode_uses_primary(BOND_MODE(bond)); +} + +static inline bool bond_slave_is_up(struct slave *slave) +{ + return netif_running(slave->dev) && netif_carrier_ok(slave->dev); +} + +static inline void bond_set_active_slave(struct slave *slave) +{ + if (slave->backup) { + slave->backup = 0; + rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); + } +} + +static inline void bond_set_backup_slave(struct slave *slave) +{ + if (!slave->backup) { + slave->backup = 1; + rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); + } +} + +static inline void bond_set_slave_state(struct slave *slave, + int slave_state, bool notify) +{ + if (slave->backup == slave_state) + return; + + slave->backup = slave_state; + if (notify) { + rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); + slave->should_notify = 0; + } else { + if (slave->should_notify) + slave->should_notify = 0; + else + slave->should_notify = 1; + } +} + +static inline void bond_slave_state_change(struct bonding *bond) +{ + struct list_head *iter; + struct slave *tmp; + + bond_for_each_slave(bond, tmp, iter) { + if (tmp->link == BOND_LINK_UP) + bond_set_active_slave(tmp); + else if (tmp->link == BOND_LINK_DOWN) + bond_set_backup_slave(tmp); + } +} + +static inline void bond_slave_state_notify(struct bonding *bond) +{ + struct list_head *iter; + struct slave *tmp; + + bond_for_each_slave(bond, tmp, iter) { + if (tmp->should_notify) { + rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_ATOMIC); + tmp->should_notify = 0; + } + } +} + +static inline int bond_slave_state(struct slave *slave) +{ + return slave->backup; +} + +static inline bool bond_is_active_slave(struct slave *slave) +{ + return !bond_slave_state(slave); +} + +static inline bool bond_slave_can_tx(struct slave *slave) +{ + return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP && + bond_is_active_slave(slave); +} + +#define BOND_PRI_RESELECT_ALWAYS 0 +#define BOND_PRI_RESELECT_BETTER 1 +#define BOND_PRI_RESELECT_FAILURE 2 + +#define BOND_FOM_NONE 0 +#define BOND_FOM_ACTIVE 1 +#define BOND_FOM_FOLLOW 2 + +#define BOND_ARP_TARGETS_ANY 0 +#define BOND_ARP_TARGETS_ALL 1 + +#define BOND_ARP_VALIDATE_NONE 0 +#define BOND_ARP_VALIDATE_ACTIVE (1 << BOND_STATE_ACTIVE) +#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP) +#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ + BOND_ARP_VALIDATE_BACKUP) +#define BOND_ARP_FILTER (BOND_ARP_VALIDATE_ALL + 1) +#define BOND_ARP_FILTER_ACTIVE (BOND_ARP_VALIDATE_ACTIVE | \ + BOND_ARP_FILTER) +#define BOND_ARP_FILTER_BACKUP (BOND_ARP_VALIDATE_BACKUP | \ + BOND_ARP_FILTER) + +#define BOND_SLAVE_NOTIFY_NOW true +#define BOND_SLAVE_NOTIFY_LATER false + +static inline int slave_do_arp_validate(struct bonding *bond, + struct slave *slave) +{ + return bond->params.arp_validate & (1 << bond_slave_state(slave)); +} + +static inline int slave_do_arp_validate_only(struct bonding *bond) +{ + return bond->params.arp_validate & BOND_ARP_FILTER; +} + +static inline int bond_is_ip_target_ok(__be32 addr) +{ + return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr); +} + +/* Get the oldest arp which we've received on this slave for bond's + * arp_targets. + */ +static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond, + struct slave *slave) +{ + int i = 1; + unsigned long ret = slave->target_last_arp_rx[0]; + + for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++) + if (time_before(slave->target_last_arp_rx[i], ret)) + ret = slave->target_last_arp_rx[i]; + + return ret; +} + +static inline unsigned long slave_last_rx(struct bonding *bond, + struct slave *slave) +{ + if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL) + return slave_oldest_target_arp_rx(bond, slave); + + return slave->last_rx; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static inline void bond_netpoll_send_skb(const struct slave *slave, + struct sk_buff *skb) +{ + struct netpoll *np = slave->np; + + if (np) + netpoll_send_skb(np, skb); +} +#else +static inline void bond_netpoll_send_skb(const struct slave *slave, + struct sk_buff *skb) +{ +} +#endif + +static inline void bond_set_slave_inactive_flags(struct slave *slave, + bool notify) +{ + if (!bond_is_lb(slave->bond)) + bond_set_slave_state(slave, BOND_STATE_BACKUP, notify); + if (!slave->bond->params.all_slaves_active) + slave->inactive = 1; +} + +static inline void bond_set_slave_active_flags(struct slave *slave, + bool notify) +{ + bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify); + slave->inactive = 0; +} + +static inline bool bond_is_slave_inactive(struct slave *slave) +{ + return slave->inactive; +} + +static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local) +{ + struct in_device *in_dev; + __be32 addr = 0; + + rcu_read_lock(); + in_dev = __in_dev_get_rcu(dev); + + if (in_dev) + addr = inet_confirm_addr(dev_net(dev), in_dev, dst, local, + RT_SCOPE_HOST); + rcu_read_unlock(); + return addr; +} + +struct bond_net { + struct net *net; /* Associated network namespace */ + struct list_head dev_list; +#ifdef CONFIG_PROC_FS + struct proc_dir_entry *proc_dir; +#endif + struct class_attribute class_attr_bonding_masters; +}; + +int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); +void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); +int bond_create(struct net *net, const char *name); +int bond_create_sysfs(struct bond_net *net); +void bond_destroy_sysfs(struct bond_net *net); +void bond_prepare_sysfs_group(struct bonding *bond); +int bond_sysfs_slave_add(struct slave *slave); +void bond_sysfs_slave_del(struct slave *slave); +int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); +int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); +u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb); +void bond_select_active_slave(struct bonding *bond); +void bond_change_active_slave(struct bonding *bond, struct slave *new_active); +void bond_create_debugfs(void); +void bond_destroy_debugfs(void); +void bond_debug_register(struct bonding *bond); +void bond_debug_unregister(struct bonding *bond); +void bond_debug_reregister(struct bonding *bond); +const char *bond_mode_name(int mode); +void bond_setup(struct net_device *bond_dev); +unsigned int bond_get_num_tx_queues(void); +int bond_netlink_init(void); +void bond_netlink_fini(void); +struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond); +const char *bond_slave_link_status(s8 link); +struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev, + struct net_device *end_dev, + int level); +int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave); +void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay); + +#ifdef CONFIG_PROC_FS +void bond_create_proc_entry(struct bonding *bond); +void bond_remove_proc_entry(struct bonding *bond); +void bond_create_proc_dir(struct bond_net *bn); +void bond_destroy_proc_dir(struct bond_net *bn); +#else +static inline void bond_create_proc_entry(struct bonding *bond) +{ +} + +static inline void bond_remove_proc_entry(struct bonding *bond) +{ +} + +static inline void bond_create_proc_dir(struct bond_net *bn) +{ +} + +static inline void bond_destroy_proc_dir(struct bond_net *bn) +{ +} +#endif + +static inline struct slave *bond_slave_has_mac(struct bonding *bond, + const u8 *mac) +{ + struct list_head *iter; + struct slave *tmp; + + bond_for_each_slave(bond, tmp, iter) + if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) + return tmp; + + return NULL; +} + +/* Caller must hold rcu_read_lock() for read */ +static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond, + const u8 *mac) +{ + struct list_head *iter; + struct slave *tmp; + + bond_for_each_slave_rcu(bond, tmp, iter) + if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) + return tmp; + + return NULL; +} + +/* Caller must hold rcu_read_lock() for read */ +static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac) +{ + struct list_head *iter; + struct slave *tmp; + struct netdev_hw_addr *ha; + + bond_for_each_slave_rcu(bond, tmp, iter) + if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) + return true; + + if (netdev_uc_empty(bond->dev)) + return false; + + netdev_for_each_uc_addr(ha, bond->dev) + if (ether_addr_equal_64bits(mac, ha->addr)) + return true; + + return false; +} + +/* Check if the ip is present in arp ip list, or first free slot if ip == 0 + * Returns -1 if not found, index if found + */ +static inline int bond_get_targets_ip(__be32 *targets, __be32 ip) +{ + int i; + + for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) + if (targets[i] == ip) + return i; + else if (targets[i] == 0) + break; + + return -1; +} + +/* exported from bond_main.c */ +extern int bond_net_id; +extern const struct bond_parm_tbl bond_lacp_tbl[]; +extern const struct bond_parm_tbl xmit_hashtype_tbl[]; +extern const struct bond_parm_tbl arp_validate_tbl[]; +extern const struct bond_parm_tbl arp_all_targets_tbl[]; +extern const struct bond_parm_tbl fail_over_mac_tbl[]; +extern const struct bond_parm_tbl pri_reselect_tbl[]; +extern struct bond_parm_tbl ad_select_tbl[]; + +/* exported from bond_netlink.c */ +extern struct rtnl_link_ops bond_link_ops; + +static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb) +{ + atomic_long_inc(&dev->tx_dropped); + dev_kfree_skb_any(skb); +} + +#endif /* _NET_BONDING_H */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index a2ddcf2398fd..4ebb816241fa 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -319,9 +319,12 @@ struct ieee80211_supported_band { /** * struct vif_params - describes virtual interface parameters * @use_4addr: use 4-address frames - * @macaddr: address to use for this virtual interface. This will only - * be used for non-netdevice interfaces. If this parameter is set - * to zero address the driver may determine the address as needed. + * @macaddr: address to use for this virtual interface. + * If this parameter is set to zero address the driver may + * determine the address as needed. + * This feature is only fully supported by drivers that enable the + * %NL80211_FEATURE_MAC_ON_CREATE flag. Others may support creating + ** only p2p devices with specified MAC. */ struct vif_params { int use_4addr; @@ -799,6 +802,22 @@ struct station_parameters { }; /** + * struct station_del_parameters - station deletion parameters + * + * Used to delete a station entry (or all stations). + * + * @mac: MAC address of the station to remove or NULL to remove all stations + * @subtype: Management frame subtype to use for indicating removal + * (10 = Disassociation, 12 = Deauthentication) + * @reason_code: Reason code for the Disassociation/Deauthentication frame + */ +struct station_del_parameters { + const u8 *mac; + u8 subtype; + u16 reason_code; +}; + +/** * enum cfg80211_station_type - the type of station being modified * @CFG80211_STA_AP_CLIENT: client of an AP interface * @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has @@ -1340,6 +1359,16 @@ struct mesh_setup { }; /** + * struct ocb_setup - 802.11p OCB mode setup configuration + * @chandef: defines the channel to use + * + * These parameters are fixed when connecting to the network + */ +struct ocb_setup { + struct cfg80211_chan_def chandef; +}; + +/** * struct ieee80211_txq_params - TX queue parameters * @ac: AC identifier * @txop: Maximum burst time in units of 32 usecs, 0 meaning disabled @@ -1408,6 +1437,10 @@ struct cfg80211_ssid { * @aborted: (internal) scan request was notified as aborted * @notified: (internal) scan request was notified as done or aborted * @no_cck: used to send probe requests at non CCK rate in 2GHz band + * @mac_addr: MAC address used with randomisation + * @mac_addr_mask: MAC address mask used with randomisation, bits that + * are 0 in the mask should be randomised, bits that are 1 should + * be taken from the @mac_addr */ struct cfg80211_scan_request { struct cfg80211_ssid *ssids; @@ -1422,6 +1455,9 @@ struct cfg80211_scan_request { struct wireless_dev *wdev; + u8 mac_addr[ETH_ALEN] __aligned(2); + u8 mac_addr_mask[ETH_ALEN] __aligned(2); + /* internal */ struct wiphy *wiphy; unsigned long scan_start; @@ -1432,6 +1468,17 @@ struct cfg80211_scan_request { struct ieee80211_channel *channels[0]; }; +static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask) +{ + int i; + + get_random_bytes(buf, ETH_ALEN); + for (i = 0; i < ETH_ALEN; i++) { + buf[i] &= ~mask[i]; + buf[i] |= addr[i] & mask[i]; + } +} + /** * struct cfg80211_match_set - sets of attributes to match * @@ -1465,6 +1512,10 @@ struct cfg80211_match_set { * @channels: channels to scan * @min_rssi_thold: for drivers only supporting a single threshold, this * contains the minimum over all matchsets + * @mac_addr: MAC address used with randomisation + * @mac_addr_mask: MAC address mask used with randomisation, bits that + * are 0 in the mask should be randomised, bits that are 1 should + * be taken from the @mac_addr */ struct cfg80211_sched_scan_request { struct cfg80211_ssid *ssids; @@ -1479,6 +1530,9 @@ struct cfg80211_sched_scan_request { int n_match_sets; s32 min_rssi_thold; + u8 mac_addr[ETH_ALEN] __aligned(2); + u8 mac_addr_mask[ETH_ALEN] __aligned(2); + /* internal */ struct wiphy *wiphy; struct net_device *dev; @@ -1911,6 +1965,7 @@ struct cfg80211_wowlan_tcp { * @rfkill_release: wake up when rfkill is released * @tcp: TCP connection establishment/wakeup parameters, see nl80211.h. * NULL if not configured. + * @nd_config: configuration for the scan to be used for net detect wake. */ struct cfg80211_wowlan { bool any, disconnect, magic_pkt, gtk_rekey_failure, @@ -1919,6 +1974,7 @@ struct cfg80211_wowlan { struct cfg80211_pkt_pattern *patterns; struct cfg80211_wowlan_tcp *tcp; int n_patterns; + struct cfg80211_sched_scan_request *nd_config; }; /** @@ -1951,6 +2007,35 @@ struct cfg80211_coalesce { }; /** + * struct cfg80211_wowlan_nd_match - information about the match + * + * @ssid: SSID of the match that triggered the wake up + * @n_channels: Number of channels where the match occurred. This + * value may be zero if the driver can't report the channels. + * @channels: center frequencies of the channels where a match + * occurred (in MHz) + */ +struct cfg80211_wowlan_nd_match { + struct cfg80211_ssid ssid; + int n_channels; + u32 channels[]; +}; + +/** + * struct cfg80211_wowlan_nd_info - net detect wake up information + * + * @n_matches: Number of match information instances provided in + * @matches. This value may be zero if the driver can't provide + * match information. + * @matches: Array of pointers to matches containing information about + * the matches that triggered the wake up. + */ +struct cfg80211_wowlan_nd_info { + int n_matches; + struct cfg80211_wowlan_nd_match *matches[]; +}; + +/** * struct cfg80211_wowlan_wakeup - wakeup report * @disconnect: woke up by getting disconnected * @magic_pkt: woke up by receiving magic packet @@ -1969,6 +2054,7 @@ struct cfg80211_coalesce { * @tcp_match: TCP wakeup packet received * @tcp_connlost: TCP connection lost or failed to establish * @tcp_nomoretokens: TCP data ran out of tokens + * @net_detect: if not %NULL, woke up because of net detect */ struct cfg80211_wowlan_wakeup { bool disconnect, magic_pkt, gtk_rekey_failure, @@ -1978,6 +2064,7 @@ struct cfg80211_wowlan_wakeup { s32 pattern_idx; u32 packet_present_len, packet_len; const void *packet; + struct cfg80211_wowlan_nd_info *net_detect; }; /** @@ -2132,7 +2219,7 @@ struct cfg80211_qos_map { * @stop_ap: Stop being an AP, including stopping beaconing. * * @add_station: Add a new station. - * @del_station: Remove a station; @mac may be NULL to remove all stations. + * @del_station: Remove a station * @change_station: Modify a given station. Note that flags changes are not much * validated in cfg80211, in particular the auth/assoc/authorized flags * might come to the driver in invalid combinations -- make sure to check @@ -2146,6 +2233,8 @@ struct cfg80211_qos_map { * @change_mpath: change a given mesh path * @get_mpath: get a mesh path for the given parameters * @dump_mpath: dump mesh path callback -- resume dump at index @idx + * @get_mpp: get a mesh proxy path for the given parameters + * @dump_mpp: dump mesh proxy path callback -- resume dump at index @idx * @join_mesh: join the mesh network with the specified parameters * (invoked with the wireless_dev mutex held) * @leave_mesh: leave the current mesh network @@ -2331,6 +2420,17 @@ struct cfg80211_qos_map { * with the peer followed by immediate teardown when the addition is later * rejected) * @del_tx_ts: remove an existing TX TS + * + * @join_ocb: join the OCB network with the specified parameters + * (invoked with the wireless_dev mutex held) + * @leave_ocb: leave the current OCB network + * (invoked with the wireless_dev mutex held) + * + * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver + * is responsible for continually initiating channel-switching operations + * and returning to the base channel for communication with the AP. + * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both + * peers must be on the base channel when the call completes. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -2376,7 +2476,7 @@ struct cfg80211_ops { const u8 *mac, struct station_parameters *params); int (*del_station)(struct wiphy *wiphy, struct net_device *dev, - const u8 *mac); + struct station_del_parameters *params); int (*change_station)(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params); @@ -2396,6 +2496,11 @@ struct cfg80211_ops { int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *dst, u8 *next_hop, struct mpath_info *pinfo); + int (*get_mpp)(struct wiphy *wiphy, struct net_device *dev, + u8 *dst, u8 *mpp, struct mpath_info *pinfo); + int (*dump_mpp)(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *dst, u8 *mpp, + struct mpath_info *pinfo); int (*get_mesh_config)(struct wiphy *wiphy, struct net_device *dev, struct mesh_config *conf); @@ -2407,6 +2512,10 @@ struct cfg80211_ops { const struct mesh_setup *setup); int (*leave_mesh)(struct wiphy *wiphy, struct net_device *dev); + int (*join_ocb)(struct wiphy *wiphy, struct net_device *dev, + struct ocb_setup *setup); + int (*leave_ocb)(struct wiphy *wiphy, struct net_device *dev); + int (*change_bss)(struct wiphy *wiphy, struct net_device *dev, struct bss_parameters *params); @@ -2577,6 +2686,14 @@ struct cfg80211_ops { u16 admitted_time); int (*del_tx_ts)(struct wiphy *wiphy, struct net_device *dev, u8 tsid, const u8 *peer); + + int (*tdls_channel_switch)(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr, u8 oper_class, + struct cfg80211_chan_def *chandef); + void (*tdls_cancel_channel_switch)(struct wiphy *wiphy, + struct net_device *dev, + const u8 *addr); }; /* @@ -2623,13 +2740,9 @@ struct cfg80211_ops { * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels. * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in * beaconing mode (AP, IBSS, Mesh, ...). - * @WIPHY_FLAG_SUPPORTS_WMM_ADMISSION: the device supports setting up WMM - * TSPEC sessions (TID aka TSID 0-7) with the NL80211_CMD_ADD_TX_TS - * command. Standard IEEE 802.11 TSPEC setup is not yet supported, it - * needs to be able to handle Block-Ack agreements and other things. */ enum wiphy_flags { - WIPHY_FLAG_SUPPORTS_WMM_ADMISSION = BIT(0), + /* use hole at 0 */ /* use hole at 1 */ /* use hole at 2 */ WIPHY_FLAG_NETNS_OK = BIT(3), @@ -2755,6 +2868,7 @@ struct ieee80211_txrx_stypes { * @WIPHY_WOWLAN_EAP_IDENTITY_REQ: supports wakeup on EAP identity request * @WIPHY_WOWLAN_4WAY_HANDSHAKE: supports wakeup on 4-way handshake failure * @WIPHY_WOWLAN_RFKILL_RELEASE: supports wakeup on RF-kill release + * @WIPHY_WOWLAN_NET_DETECT: supports wakeup on network detection */ enum wiphy_wowlan_support_flags { WIPHY_WOWLAN_ANY = BIT(0), @@ -2765,6 +2879,7 @@ enum wiphy_wowlan_support_flags { WIPHY_WOWLAN_EAP_IDENTITY_REQ = BIT(5), WIPHY_WOWLAN_4WAY_HANDSHAKE = BIT(6), WIPHY_WOWLAN_RFKILL_RELEASE = BIT(7), + WIPHY_WOWLAN_NET_DETECT = BIT(8), }; struct wiphy_wowlan_tcp_support { @@ -2783,6 +2898,11 @@ struct wiphy_wowlan_tcp_support { * @pattern_max_len: maximum length of each pattern * @pattern_min_len: minimum length of each pattern * @max_pkt_offset: maximum Rx packet offset + * @max_nd_match_sets: maximum number of matchsets for net-detect, + * similar, but not necessarily identical, to max_match_sets for + * scheduled scans. + * See &struct cfg80211_sched_scan_request.@match_sets for more + * details. * @tcp: TCP wakeup support information */ struct wiphy_wowlan_support { @@ -2791,6 +2911,7 @@ struct wiphy_wowlan_support { int pattern_max_len; int pattern_min_len; int max_pkt_offset; + int max_nd_match_sets; const struct wiphy_wowlan_tcp_support *tcp; }; @@ -3166,6 +3287,23 @@ static inline const char *wiphy_name(const struct wiphy *wiphy) } /** + * wiphy_new_nm - create a new wiphy for use with cfg80211 + * + * @ops: The configuration operations for this device + * @sizeof_priv: The size of the private area to allocate + * @requested_name: Request a particular name. + * NULL is valid value, and means use the default phy%d naming. + * + * Create a new wiphy and associate the given operations with it. + * @sizeof_priv bytes are allocated for private use. + * + * Return: A pointer to the new wiphy. This pointer must be + * assigned to each netdev's ieee80211_ptr for proper operation. + */ +struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, + const char *requested_name); + +/** * wiphy_new - create a new wiphy for use with cfg80211 * * @ops: The configuration operations for this device @@ -3177,7 +3315,11 @@ static inline const char *wiphy_name(const struct wiphy *wiphy) * Return: A pointer to the new wiphy. This pointer must be * assigned to each netdev's ieee80211_ptr for proper operation. */ -struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv); +static inline struct wiphy *wiphy_new(const struct cfg80211_ops *ops, + int sizeof_priv) +{ + return wiphy_new_nm(ops, sizeof_priv, NULL); +} /** * wiphy_register - register a wiphy with cfg80211 @@ -4501,33 +4643,6 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev, gfp_t gfp); /** - * cfg80211_radar_event - radar detection event - * @wiphy: the wiphy - * @chandef: chandef for the current channel - * @gfp: context flags - * - * This function is called when a radar is detected on the current chanenl. - */ -void cfg80211_radar_event(struct wiphy *wiphy, - struct cfg80211_chan_def *chandef, gfp_t gfp); - -/** - * cfg80211_cac_event - Channel availability check (CAC) event - * @netdev: network device - * @chandef: chandef for the current channel - * @event: type of event - * @gfp: context flags - * - * This function is called when a Channel availability check (CAC) is finished - * or aborted. This must be called to notify the completion of a CAC process, - * also by full-MAC drivers. - */ -void cfg80211_cac_event(struct net_device *netdev, - const struct cfg80211_chan_def *chandef, - enum nl80211_radar_event event, gfp_t gfp); - - -/** * cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer * @dev: network device * @peer: peer's MAC address @@ -4555,6 +4670,42 @@ void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer, u32 num_packets, u32 rate, u32 intvl, gfp_t gfp); /** + * cfg80211_cqm_beacon_loss_notify - beacon loss event + * @dev: network device + * @gfp: context flags + * + * Notify userspace about beacon loss from the connected AP. + */ +void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp); + +/** + * cfg80211_radar_event - radar detection event + * @wiphy: the wiphy + * @chandef: chandef for the current channel + * @gfp: context flags + * + * This function is called when a radar is detected on the current chanenl. + */ +void cfg80211_radar_event(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, gfp_t gfp); + +/** + * cfg80211_cac_event - Channel availability check (CAC) event + * @netdev: network device + * @chandef: chandef for the current channel + * @event: type of event + * @gfp: context flags + * + * This function is called when a Channel availability check (CAC) is finished + * or aborted. This must be called to notify the completion of a CAC process, + * also by full-MAC drivers. + */ +void cfg80211_cac_event(struct net_device *netdev, + const struct cfg80211_chan_def *chandef, + enum nl80211_radar_event event, gfp_t gfp); + + +/** * cfg80211_gtk_rekey_notify - notify userspace about driver rekeying * @dev: network device * @bssid: BSSID of AP (to avoid races) @@ -4657,6 +4808,20 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy, void cfg80211_ch_switch_notify(struct net_device *dev, struct cfg80211_chan_def *chandef); +/* + * cfg80211_ch_switch_started_notify - notify channel switch start + * @dev: the device on which the channel switch started + * @chandef: the future channel definition + * @count: the number of TBTTs until the channel switch happens + * + * Inform the userspace about the channel switch that has just + * started, so that it can take appropriate actions (eg. starting + * channel switch on other vifs), if necessary. + */ +void cfg80211_ch_switch_started_notify(struct net_device *dev, + struct cfg80211_chan_def *chandef, + u8 count); + /** * ieee80211_operating_class_to_band - convert operating class to band * diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h new file mode 100644 index 000000000000..7f713acfa106 --- /dev/null +++ b/include/net/cfg802154.h @@ -0,0 +1,161 @@ +/* + * Copyright (C) 2007, 2008, 2009 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Written by: + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + */ + +#ifndef __NET_CFG802154_H +#define __NET_CFG802154_H + +#include <linux/ieee802154.h> +#include <linux/netdevice.h> +#include <linux/mutex.h> +#include <linux/bug.h> + +#include <net/nl802154.h> + +struct wpan_phy; + +struct cfg802154_ops { + struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, + const char *name, + int type); + void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, + struct net_device *dev); + int (*add_virtual_intf)(struct wpan_phy *wpan_phy, + const char *name, + enum nl802154_iftype type, + __le64 extended_addr); + int (*del_virtual_intf)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev); + int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel); + int (*set_pan_id)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, __le16 pan_id); + int (*set_short_addr)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, __le16 short_addr); + int (*set_backoff_exponent)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, u8 min_be, + u8 max_be); + int (*set_max_csma_backoffs)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + u8 max_csma_backoffs); + int (*set_max_frame_retries)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, + s8 max_frame_retries); + int (*set_lbt_mode)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, bool mode); +}; + +struct wpan_phy { + struct mutex pib_lock; + + /* If multiple wpan_phys are registered and you're handed e.g. + * a regular netdev with assigned ieee802154_ptr, you won't + * know whether it points to a wpan_phy your driver has registered + * or not. Assign this to something global to your driver to + * help determine whether you own this wpan_phy or not. + */ + const void *privid; + + /* + * This is a PIB according to 802.15.4-2011. + * We do not provide timing-related variables, as they + * aren't used outside of driver + */ + u8 current_channel; + u8 current_page; + u32 channels_supported[IEEE802154_MAX_PAGE + 1]; + s8 transmit_power; + u8 cca_mode; + + __le64 perm_extended_addr; + + s32 cca_ed_level; + + /* PHY depended MAC PIB values */ + + /* 802.15.4 acronym: Tdsym in usec */ + u8 symbol_duration; + /* lifs and sifs periods timing */ + u16 lifs_period; + u16 sifs_period; + + struct device dev; + + char priv[0] __aligned(NETDEV_ALIGN); +}; + +struct wpan_dev { + struct wpan_phy *wpan_phy; + int iftype; + + /* the remainder of this struct should be private to cfg802154 */ + struct list_head list; + struct net_device *netdev; + + u32 identifier; + + /* MAC PIB */ + __le16 pan_id; + __le16 short_addr; + __le64 extended_addr; + + /* MAC BSN field */ + u8 bsn; + /* MAC DSN field */ + u8 dsn; + + u8 min_be; + u8 max_be; + u8 csma_retries; + s8 frame_retries; + + bool lbt; + + bool promiscuous_mode; +}; + +#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev) + +struct wpan_phy * +wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size); +static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev) +{ + phy->dev.parent = dev; +} + +int wpan_phy_register(struct wpan_phy *phy); +void wpan_phy_unregister(struct wpan_phy *phy); +void wpan_phy_free(struct wpan_phy *phy); +/* Same semantics as for class_for_each_device */ +int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data); + +static inline void *wpan_phy_priv(struct wpan_phy *phy) +{ + BUG_ON(!phy); + return &phy->priv; +} + +struct wpan_phy *wpan_phy_find(const char *str); + +static inline void wpan_phy_put(struct wpan_phy *phy) +{ + put_device(&phy->dev); +} + +static inline const char *wpan_phy_name(struct wpan_phy *phy) +{ + return dev_name(&phy->dev); +} + +#endif /* __NET_CFG802154_H */ diff --git a/include/net/checksum.h b/include/net/checksum.h index 6465bae80a4f..e339a9513e29 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -151,4 +151,20 @@ static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, (__force __be32)to, pseudohdr); } +static inline __wsum remcsum_adjust(void *ptr, __wsum csum, + int start, int offset) +{ + __sum16 *psum = (__sum16 *)(ptr + offset); + __wsum delta; + + /* Subtract out checksum up to start */ + csum = csum_sub(csum, csum_partial(ptr, start, 0)); + + /* Set derived checksum in packet */ + delta = csum_sub(csum_fold(csum), *psum); + *psum = csum_fold(csum); + + return delta; +} + #endif diff --git a/include/net/compat.h b/include/net/compat.h index 3b603b199c01..42a9c8431177 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -40,9 +40,8 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *); #define compat_mmsghdr mmsghdr #endif /* defined(CONFIG_COMPAT) */ -int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *); -int verify_compat_iovec(struct msghdr *, struct iovec *, - struct sockaddr_storage *, int); +ssize_t get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, + struct sockaddr __user **, struct iovec **); asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *, unsigned int); asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *, diff --git a/include/net/dsa.h b/include/net/dsa.h index b76559293535..ed3c34bbb67a 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -38,6 +38,9 @@ struct dsa_chip_data { struct device *host_dev; int sw_addr; + /* set to size of eeprom if supported by the switch */ + int eeprom_len; + /* Device tree node pointer for this specific switch chip * used during switch setup in case additional properties * and resources needs to be used @@ -139,6 +142,14 @@ struct dsa_switch { */ struct device *master_dev; +#ifdef CONFIG_NET_DSA_HWMON + /* + * Hardware monitoring information + */ + char hwmon_name[IFNAMSIZ + 8]; + struct device *hwmon_dev; +#endif + /* * Slave mii_bus and devices for the individual ports. */ @@ -242,6 +253,28 @@ struct dsa_switch_driver { struct ethtool_eee *e); int (*get_eee)(struct dsa_switch *ds, int port, struct ethtool_eee *e); + +#ifdef CONFIG_NET_DSA_HWMON + /* Hardware monitoring */ + int (*get_temp)(struct dsa_switch *ds, int *temp); + int (*get_temp_limit)(struct dsa_switch *ds, int *temp); + int (*set_temp_limit)(struct dsa_switch *ds, int temp); + int (*get_temp_alarm)(struct dsa_switch *ds, bool *alarm); +#endif + + /* EEPROM access */ + int (*get_eeprom_len)(struct dsa_switch *ds); + int (*get_eeprom)(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data); + int (*set_eeprom)(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data); + + /* + * Register access. + */ + int (*get_regs_len)(struct dsa_switch *ds, int port); + void (*get_regs)(struct dsa_switch *ds, int port, + struct ethtool_regs *regs, void *p); }; void register_switch_driver(struct dsa_switch_driver *type); diff --git a/include/net/fou.h b/include/net/fou.h new file mode 100644 index 000000000000..19b8a0c62a98 --- /dev/null +++ b/include/net/fou.h @@ -0,0 +1,19 @@ +#ifndef __NET_FOU_H +#define __NET_FOU_H + +#include <linux/skbuff.h> + +#include <net/flow.h> +#include <net/gue.h> +#include <net/ip_tunnels.h> +#include <net/udp.h> + +size_t fou_encap_hlen(struct ip_tunnel_encap *e); +static size_t gue_encap_hlen(struct ip_tunnel_encap *e); + +int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi4 *fl4); +int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi4 *fl4); + +#endif diff --git a/include/net/gue.h b/include/net/gue.h index b6c332788084..3f28ec7f1c7f 100644 --- a/include/net/gue.h +++ b/include/net/gue.h @@ -1,23 +1,116 @@ #ifndef __NET_GUE_H #define __NET_GUE_H +/* Definitions for the GUE header, standard and private flags, lengths + * of optional fields are below. + * + * Diagram of GUE header: + * + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |Ver|C| Hlen | Proto/ctype | Standard flags |P| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | + * ~ Fields (optional) ~ + * | | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Private flags (optional, P bit is set) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | + * ~ Private fields (optional) ~ + * | | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * C bit indicates contol message when set, data message when unset. + * For a control message, proto/ctype is interpreted as a type of + * control message. For data messages, proto/ctype is the IP protocol + * of the next header. + * + * P bit indicates private flags field is present. The private flags + * may refer to options placed after this field. + */ + struct guehdr { union { struct { #if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 hlen:4, - version:4; + __u8 hlen:5, + control:1, + version:2; #elif defined (__BIG_ENDIAN_BITFIELD) - __u8 version:4, - hlen:4; + __u8 version:2, + control:1, + hlen:5; #else #error "Please fix <asm/byteorder.h>" #endif - __u8 next_hdr; + __u8 proto_ctype; __u16 flags; }; __u32 word; }; }; +/* Standard flags in GUE header */ + +#define GUE_FLAG_PRIV htons(1<<0) /* Private flags are in options */ +#define GUE_LEN_PRIV 4 + +#define GUE_FLAGS_ALL (GUE_FLAG_PRIV) + +/* Private flags in the private option extension */ + +#define GUE_PFLAG_REMCSUM htonl(1 << 31) +#define GUE_PLEN_REMCSUM 4 + +#define GUE_PFLAGS_ALL (GUE_PFLAG_REMCSUM) + +/* Functions to compute options length corresponding to flags. + * If we ever have a lot of flags this can be potentially be + * converted to a more optimized algorithm (table lookup + * for instance). + */ +static inline size_t guehdr_flags_len(__be16 flags) +{ + return ((flags & GUE_FLAG_PRIV) ? GUE_LEN_PRIV : 0); +} + +static inline size_t guehdr_priv_flags_len(__be32 flags) +{ + return 0; +} + +/* Validate standard and private flags. Returns non-zero (meaning invalid) + * if there is an unknown standard or private flags, or the options length for + * the flags exceeds the options length specific in hlen of the GUE header. + */ +static inline int validate_gue_flags(struct guehdr *guehdr, + size_t optlen) +{ + size_t len; + __be32 flags = guehdr->flags; + + if (flags & ~GUE_FLAGS_ALL) + return 1; + + len = guehdr_flags_len(flags); + if (len > optlen) + return 1; + + if (flags & GUE_FLAG_PRIV) { + /* Private flags are last four bytes accounted in + * guehdr_flags_len + */ + flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV); + + if (flags & ~GUE_PFLAGS_ALL) + return 1; + + len += guehdr_priv_flags_len(flags); + if (len > optlen) + return 1; + } + + return 0; +} + #endif diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index 3b53c8e405e4..83bb8a73d23c 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -12,10 +12,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * * Written by: * Pavel Smolenskiy <pavel.smolenskiy@gmail.com> * Maxim Gorbachyov <maxim.gorbachev@siemens.com> @@ -27,10 +23,10 @@ #ifndef IEEE802154_NETDEVICE_H #define IEEE802154_NETDEVICE_H -#include <net/ieee802154.h> #include <net/af_ieee802154.h> #include <linux/netdevice.h> #include <linux/skbuff.h> +#include <linux/ieee802154.h> struct ieee802154_sechdr { #if defined(__LITTLE_ENDIAN_BITFIELD) @@ -427,8 +423,6 @@ struct ieee802154_mlme_ops { /* The fields below are required. */ - struct wpan_phy *(*get_phy)(const struct net_device *dev); - /* * FIXME: these should become the part of PIB/MIB interface. * However we still don't have IB interface of any kind @@ -438,16 +432,6 @@ struct ieee802154_mlme_ops { u8 (*get_dsn)(const struct net_device *dev); }; -/* The IEEE 802.15.4 standard defines 2 type of the devices: - * - FFD - full functionality device - * - RFD - reduce functionality device - * - * So 2 sets of mlme operations are needed - */ -struct ieee802154_reduced_mlme_ops { - struct wpan_phy *(*get_phy)(const struct net_device *dev); -}; - static inline struct ieee802154_mlme_ops * ieee802154_mlme_ops(const struct net_device *dev) { diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index d1d272843b3b..9201afe083fa 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -99,4 +99,14 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, const struct in6_addr *daddr, const __be16 dport, const int dif); #endif /* IS_ENABLED(CONFIG_IPV6) */ + +#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ + (((__sk)->sk_portpair == (__ports)) && \ + ((__sk)->sk_family == AF_INET6) && \ + ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \ + ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \ + (!(__sk)->sk_bound_dev_if || \ + ((__sk)->sk_bound_dev_if == (__dif))) && \ + net_eq(sock_net(__sk), (__net))) + #endif /* _INET6_HASHTABLES_H */ diff --git a/include/net/inet_common.h b/include/net/inet_common.h index fe7994c48b75..b2828a06a5a6 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -37,6 +37,8 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int inet_ctl_sock_create(struct sock **sk, unsigned short family, unsigned short type, unsigned char protocol, struct net *net); +int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, + int *addr_len); static inline void inet_ctl_sock_destroy(struct sock *sk) { diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index a5593dab6af7..9326c41c2d7f 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -65,7 +65,8 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t); void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst); int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); -int ip6_tnl_xmit_ctl(struct ip6_tnl *t); +int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, + const struct in6_addr *raddr); __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw); __u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, const struct in6_addr *raddr); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index dc9d2a27c315..09a819ee2151 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -201,8 +201,8 @@ void fib_free_table(struct fib_table *tb); #ifndef CONFIG_IP_MULTIPLE_TABLES -#define TABLE_LOCAL_INDEX 0 -#define TABLE_MAIN_INDEX 1 +#define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1)) +#define TABLE_MAIN_INDEX (RT_TABLE_MAIN & (FIB_TABLE_HASHSZ - 1)) static inline struct fib_table *fib_get_table(struct net *net, u32 id) { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 5bc6edeb7143..25a59eb388a6 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -117,6 +117,22 @@ struct ip_tunnel_net { struct hlist_head tunnels[IP_TNL_HASH_SIZE]; }; +struct ip_tunnel_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *e); + int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi4 *fl4); +}; + +#define MAX_IPTUN_ENCAP_OPS 8 + +extern const struct ip_tunnel_encap_ops __rcu * + iptun_encaps[MAX_IPTUN_ENCAP_OPS]; + +int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op, + unsigned int num); +int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, + unsigned int num); + #ifdef CONFIG_INET int ip_tunnel_init(struct net_device *dev); diff --git a/include/net/ipx.h b/include/net/ipx.h index 0143180fecc9..e5cff6811b30 100644 --- a/include/net/ipx.h +++ b/include/net/ipx.h @@ -42,6 +42,9 @@ struct ipxhdr { struct ipx_address ipx_source __packed; }; +/* From af_ipx.c */ +extern int sysctl_ipx_pprop_broadcasting; + static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb) { return (struct ipxhdr *)skb_transport_header(skb); @@ -147,7 +150,7 @@ int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc, unsigned char *node); void ipxrtr_del_routes(struct ipx_interface *intrfc); int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx, - struct iovec *iov, size_t len, int noblock); + struct msghdr *msg, size_t len, int noblock); int ipxrtr_route_skb(struct sk_buff *skb); struct ipx_route *ipxrtr_lookup(__be32 net); int ipxrtr_ioctl(unsigned int cmd, void __user *arg); diff --git a/include/net/irda/irda.h b/include/net/irda/irda.h index a059465101ff..92c8fb575213 100644 --- a/include/net/irda/irda.h +++ b/include/net/irda/irda.h @@ -55,16 +55,6 @@ typedef __u32 magic_t; #endif #ifdef CONFIG_IRDA_DEBUG - -extern unsigned int irda_debug; - -/* use 0 for production, 1 for verification, >2 for debug */ -#define IRDA_DEBUG_LEVEL 0 - -#define IRDA_DEBUG(n, args...) \ -do { if (irda_debug >= (n)) \ - printk(KERN_DEBUG args); \ -} while (0) #define IRDA_ASSERT(expr, func) \ do { if(!(expr)) { \ printk( "Assertion failed! %s:%s:%d %s\n", \ @@ -72,15 +62,10 @@ do { if(!(expr)) { \ func } } while (0) #define IRDA_ASSERT_LABEL(label) label #else -#define IRDA_DEBUG(n, args...) do { } while (0) #define IRDA_ASSERT(expr, func) do { (void)(expr); } while (0) #define IRDA_ASSERT_LABEL(label) #endif /* CONFIG_IRDA_DEBUG */ -#define IRDA_WARNING(args...) do { if (net_ratelimit()) printk(KERN_WARNING args); } while (0) -#define IRDA_MESSAGE(args...) do { if (net_ratelimit()) printk(KERN_INFO args); } while (0) -#define IRDA_ERROR(args...) do { if (net_ratelimit()) printk(KERN_ERR args); } while (0) - /* * Magic numbers used by Linux-IrDA. Random numbers which must be unique to * give the best protection diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h index fb4b76d5d7f1..6f23e820618c 100644 --- a/include/net/irda/irlap.h +++ b/include/net/irda/irlap.h @@ -303,7 +303,7 @@ static inline void irlap_next_state(struct irlap_cb *self, IRLAP_STATE state) if (!self || self->magic != LAP_MAGIC) return; - IRDA_DEBUG(4, "next LAP state = %s\n", irlap_state[state]); + pr_debug("next LAP state = %s\n", irlap_state[state]); */ self->state = state; } diff --git a/include/net/irda/parameters.h b/include/net/irda/parameters.h index 42713c931d1f..2d9cd0007cba 100644 --- a/include/net/irda/parameters.h +++ b/include/net/irda/parameters.h @@ -71,17 +71,17 @@ typedef int (*PV_HANDLER)(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func); typedef struct { - PI_HANDLER func; /* Handler for this parameter identifier */ + const PI_HANDLER func; /* Handler for this parameter identifier */ PV_TYPE type; /* Data type for this parameter */ } pi_minor_info_t; typedef struct { - pi_minor_info_t *pi_minor_call_table; + const pi_minor_info_t *pi_minor_call_table; int len; } pi_major_info_t; typedef struct { - pi_major_info_t *tables; + const pi_major_info_t *tables; int len; __u8 pi_mask; int pi_major_offset; diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h index 0e79cfba4b3b..48f3f891b2f9 100644 --- a/include/net/llc_c_st.h +++ b/include/net/llc_c_st.h @@ -35,8 +35,8 @@ struct llc_conn_state_trans { llc_conn_ev_t ev; u8 next_state; - llc_conn_ev_qfyr_t *ev_qualifiers; - llc_conn_action_t *ev_actions; + const llc_conn_ev_qfyr_t *ev_qualifiers; + const llc_conn_action_t *ev_actions; }; struct llc_conn_state { diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h index 567c681f1f3e..c4359e203013 100644 --- a/include/net/llc_s_st.h +++ b/include/net/llc_s_st.h @@ -19,7 +19,7 @@ struct llc_sap_state_trans { llc_sap_ev_t ev; u8 next_state; - llc_sap_action_t *ev_actions; + const llc_sap_action_t *ev_actions; }; struct llc_sap_state { diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 0ad1f47d2dc7..58d719ddaa60 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -263,6 +263,7 @@ struct ieee80211_vif_chanctx_switch { * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed, * note that this is only called when it changes after the channel * context had been assigned. + * @BSS_CHANGED_OCB: OCB join status changed */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@ -287,6 +288,7 @@ enum ieee80211_bss_change { BSS_CHANGED_P2P_PS = 1<<19, BSS_CHANGED_BEACON_INFO = 1<<20, BSS_CHANGED_BANDWIDTH = 1<<21, + BSS_CHANGED_OCB = 1<<22, /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -739,7 +741,8 @@ struct ieee80211_tx_info { u8 ampdu_ack_len; u8 ampdu_len; u8 antenna; - void *status_driver_data[21 / sizeof(void *)]; + u16 tx_time; + void *status_driver_data[19 / sizeof(void *)]; } status; struct { struct ieee80211_tx_rate driver_rates[ @@ -879,6 +882,9 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * subframes share the same sequence number. Reported subframes can be * either regular MSDU or singly A-MSDUs. Subframes must not be * interleaved with other frames. + * @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific + * radiotap data in the skb->data (before the frame) as described by + * the &struct ieee80211_vendor_radiotap. */ enum mac80211_rx_flags { RX_FLAG_MMIC_ERROR = BIT(0), @@ -908,6 +914,7 @@ enum mac80211_rx_flags { RX_FLAG_10MHZ = BIT(28), RX_FLAG_5MHZ = BIT(29), RX_FLAG_AMSDU_MORE = BIT(30), + RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31), }; #define RX_FLAG_STBC_SHIFT 26 @@ -979,6 +986,39 @@ struct ieee80211_rx_status { }; /** + * struct ieee80211_vendor_radiotap - vendor radiotap data information + * @present: presence bitmap for this vendor namespace + * (this could be extended in the future if any vendor needs more + * bits, the radiotap spec does allow for that) + * @align: radiotap vendor namespace alignment. This defines the needed + * alignment for the @data field below, not for the vendor namespace + * description itself (which has a fixed 2-byte alignment) + * Must be a power of two, and be set to at least 1! + * @oui: radiotap vendor namespace OUI + * @subns: radiotap vendor sub namespace + * @len: radiotap vendor sub namespace skip length, if alignment is done + * then that's added to this, i.e. this is only the length of the + * @data field. + * @pad: number of bytes of padding after the @data, this exists so that + * the skb data alignment can be preserved even if the data has odd + * length + * @data: the actual vendor namespace data + * + * This struct, including the vendor data, goes into the skb->data before + * the 802.11 header. It's split up in mac80211 using the align/oui/subns + * data. + */ +struct ieee80211_vendor_radiotap { + u32 present; + u8 align; + u8 oui[3]; + u8 subns; + u8 pad; + u16 len; + u8 data[]; +} __packed; + +/** * enum ieee80211_conf_flags - configuration flags * * Flags to define PHY configuration options @@ -1117,6 +1157,8 @@ struct ieee80211_conf { * Function (TSF) timer when the frame containing the channel switch * announcement was received. This is simply the rx.mactime parameter * the driver passed into mac80211. + * @device_timestamp: arbitrary timestamp for the device, this is the + * rx.device_timestamp parameter the driver passed to mac80211. * @block_tx: Indicates whether transmission must be blocked before the * scheduled channel switch, as indicated by the AP. * @chandef: the new channel to switch to @@ -1124,6 +1166,7 @@ struct ieee80211_conf { */ struct ieee80211_channel_switch { u64 timestamp; + u32 device_timestamp; bool block_tx; struct cfg80211_chan_def chandef; u8 count; @@ -1423,6 +1466,8 @@ struct ieee80211_sta_rates { * @smps_mode: current SMPS mode (off, static or dynamic) * @rates: rate control selection table * @tdls: indicates whether the STA is a TDLS peer + * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only + * valid if the STA is a TDLS peer in the first place. */ struct ieee80211_sta { u32 supp_rates[IEEE80211_NUM_BANDS]; @@ -1438,6 +1483,7 @@ struct ieee80211_sta { enum ieee80211_smps_mode smps_mode; struct ieee80211_sta_rates __rcu *rates; bool tdls; + bool tdls_initiator; /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); @@ -1576,6 +1622,10 @@ struct ieee80211_tx_control { * a virtual monitor interface when monitor interfaces are the only * active interfaces. * + * @IEEE80211_HW_NO_AUTO_VIF: The driver would like for no wlanX to + * be created. It is expected user-space will create vifs as + * desired (and thus have them named as desired). + * * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface * queue mapping in order to use different queues (not just one per AC) * for different virtual interfaces. See the doc section on HW queue @@ -1622,7 +1672,8 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, IEEE80211_HW_MFP_CAPABLE = 1<<13, IEEE80211_HW_WANT_MONITOR_VIF = 1<<14, - /* free slots */ + IEEE80211_HW_NO_AUTO_VIF = 1<<15, + /* free slot */ IEEE80211_HW_SUPPORTS_UAPSD = 1<<17, IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, IEEE80211_HW_CONNECTION_MONITOR = 1<<19, @@ -1776,6 +1827,31 @@ struct ieee80211_scan_request { }; /** + * struct ieee80211_tdls_ch_sw_params - TDLS channel switch parameters + * + * @sta: peer this TDLS channel-switch request/response came from + * @chandef: channel referenced in a TDLS channel-switch request + * @action_code: see &enum ieee80211_tdls_actioncode + * @status: channel-switch response status + * @timestamp: time at which the frame was received + * @switch_time: switch-timing parameter received in the frame + * @switch_timeout: switch-timing parameter received in the frame + * @tmpl_skb: TDLS switch-channel response template + * @ch_sw_tm_ie: offset of the channel-switch timing IE inside @tmpl_skb + */ +struct ieee80211_tdls_ch_sw_params { + struct ieee80211_sta *sta; + struct cfg80211_chan_def *chandef; + u8 action_code; + u32 status; + u32 timestamp; + u16 switch_time; + u16 switch_timeout; + struct sk_buff *tmpl_skb; + u32 ch_sw_tm_ie; +}; + +/** * wiphy_to_ieee80211_hw - return a mac80211 driver hw struct from a wiphy * * @wiphy: the &struct wiphy which we want to query @@ -2375,6 +2451,22 @@ enum ieee80211_roc_type { }; /** + * enum ieee80211_reconfig_complete_type - reconfig type + * + * This enum is used by the reconfig_complete() callback to indicate what + * reconfiguration type was completed. + * + * @IEEE80211_RECONFIG_TYPE_RESTART: hw restart type + * (also due to resume() callback returning 1) + * @IEEE80211_RECONFIG_TYPE_SUSPEND: suspend type (regardless + * of wowlan configuration) + */ +enum ieee80211_reconfig_type { + IEEE80211_RECONFIG_TYPE_RESTART, + IEEE80211_RECONFIG_TYPE_SUSPEND, +}; + +/** * struct ieee80211_ops - callbacks from mac80211 to the driver * * This structure contains various callbacks that the driver may @@ -2530,7 +2622,9 @@ enum ieee80211_roc_type { * * @sw_scan_start: Notifier function that is called just before a software scan * is started. Can be NULL, if the driver doesn't need this notification. - * The callback can sleep. + * The mac_addr parameter allows supporting NL80211_SCAN_FLAG_RANDOM_ADDR, + * the driver may set the NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR flag if it + * can use this parameter. The callback can sleep. * * @sw_scan_complete: Notifier function that is called just after a * software scan finished. Can be NULL, if the driver doesn't need @@ -2601,6 +2695,9 @@ enum ieee80211_roc_type { * uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since * otherwise the rate control algorithm is notified directly. * Must be atomic. + * @sta_rate_tbl_update: Notifies the driver that the rate table changed. This + * is only used if the configured rate control algorithm actually uses + * the new rate table API, and is therefore optional. Must be atomic. * * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), * bursting) for a hardware TX queue. @@ -2809,11 +2906,11 @@ enum ieee80211_roc_type { * disabled/enabled via @bss_info_changed. * @stop_ap: Stop operation on the AP interface. * - * @restart_complete: Called after a call to ieee80211_restart_hw(), when the - * reconfiguration has completed. This can help the driver implement the - * reconfiguration step. Also called when reconfiguring because the - * driver's resume function returned 1, as this is just like an "inline" - * hardware restart. This callback may sleep. + * @reconfig_complete: Called after a call to ieee80211_restart_hw() and + * during resume, when the reconfiguration has completed. + * This can help the driver implement the reconfiguration step (and + * indicate mac80211 is ready to receive frames). + * This callback may sleep. * * @ipv6_addr_change: IPv6 address assignment on the given interface changed. * Currently, this is only called for managed or P2P client interfaces. @@ -2829,6 +2926,13 @@ enum ieee80211_roc_type { * transmitted and then call ieee80211_csa_finish(). * If the CSA count starts as zero or 1, this function will not be called, * since there won't be any time to beacon before the switch anyway. + * @pre_channel_switch: This is an optional callback that is called + * before a channel switch procedure is started (ie. when a STA + * gets a CSA or an userspace initiated channel-switch), allowing + * the driver to prepare for the channel switch. + * @post_channel_switch: This is an optional callback that is called + * after a channel switch procedure is completed, allowing the + * driver to go back to a normal configuration. * * @join_ibss: Join an IBSS (on an IBSS interface); this is called after all * information in bss_conf is set up and the beacon can be retrieved. A @@ -2838,6 +2942,26 @@ enum ieee80211_roc_type { * @get_expected_throughput: extract the expected throughput towards the * specified station. The returned value is expressed in Kbps. It returns 0 * if the RC algorithm does not have proper data to provide. + * + * @get_txpower: get current maximum tx power (in dBm) based on configuration + * and hardware limits. + * + * @tdls_channel_switch: Start channel-switching with a TDLS peer. The driver + * is responsible for continually initiating channel-switching operations + * and returning to the base channel for communication with the AP. The + * driver receives a channel-switch request template and the location of + * the switch-timing IE within the template as part of the invocation. + * The template is valid only within the call, and the driver can + * optionally copy the skb for further re-use. + * @tdls_cancel_channel_switch: Stop channel-switching with a TDLS peer. Both + * peers must be on the base channel when the call completes. + * @tdls_recv_channel_switch: a TDLS channel-switch related frame (request or + * response) has been received from a remote peer. The driver gets + * parameters parsed from the incoming frame and may use them to continue + * an ongoing channel-switch operation. In addition, a channel-switch + * response template is provided, together with the location of the + * switch-timing IE within the template. The skb can only be used within + * the function call. */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -2897,8 +3021,11 @@ struct ieee80211_ops { struct ieee80211_scan_ies *ies); int (*sched_scan_stop)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); - void (*sw_scan_start)(struct ieee80211_hw *hw); - void (*sw_scan_complete)(struct ieee80211_hw *hw); + void (*sw_scan_start)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr); + void (*sw_scan_complete)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); int (*get_stats)(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats); void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx, @@ -2932,6 +3059,9 @@ struct ieee80211_ops { struct ieee80211_vif *vif, struct ieee80211_sta *sta, u32 changed); + void (*sta_rate_tbl_update)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); int (*conf_tx)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 ac, const struct ieee80211_tx_queue_params *params); @@ -2959,6 +3089,7 @@ struct ieee80211_ops { void (*flush)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop); void (*channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, struct ieee80211_channel_switch *ch_switch); int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant); int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); @@ -3025,7 +3156,8 @@ struct ieee80211_ops { int n_vifs, enum ieee80211_chanctx_switch_mode mode); - void (*restart_complete)(struct ieee80211_hw *hw); + void (*reconfig_complete)(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type); #if IS_ENABLED(CONFIG_IPV6) void (*ipv6_addr_change)(struct ieee80211_hw *hw, @@ -3035,14 +3167,54 @@ struct ieee80211_ops { void (*channel_switch_beacon)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_chan_def *chandef); + int (*pre_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *ch_switch); + + int (*post_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); u32 (*get_expected_throughput)(struct ieee80211_sta *sta); + int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + int *dbm); + + int (*tdls_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u8 oper_class, + struct cfg80211_chan_def *chandef, + struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie); + void (*tdls_cancel_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + void (*tdls_recv_channel_switch)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_tdls_ch_sw_params *params); }; /** - * ieee80211_alloc_hw - Allocate a new hardware device + * ieee80211_alloc_hw_nm - Allocate a new hardware device + * + * This must be called once for each hardware device. The returned pointer + * must be used to refer to this device when calling other functions. + * mac80211 allocates a private data area for the driver pointed to by + * @priv in &struct ieee80211_hw, the size of this area is given as + * @priv_data_len. + * + * @priv_data_len: length of private data + * @ops: callbacks for this device + * @requested_name: Requested name for this device. + * NULL is valid value, and means use the default naming (phy%d) + * + * Return: A pointer to the new hardware device, or %NULL on error. + */ +struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, + const struct ieee80211_ops *ops, + const char *requested_name); + +/** + * ieee80211_alloc_hw - Allocate a new hardware device * * This must be called once for each hardware device. The returned pointer * must be used to refer to this device when calling other functions. @@ -3055,8 +3227,12 @@ struct ieee80211_ops { * * Return: A pointer to the new hardware device, or %NULL on error. */ +static inline struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, - const struct ieee80211_ops *ops); + const struct ieee80211_ops *ops) +{ + return ieee80211_alloc_hw_nm(priv_data_len, ops, NULL); +} /** * ieee80211_register_hw - Register hardware device @@ -3443,6 +3619,26 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb); /** + * ieee80211_tx_status_noskb - transmit status callback without skb + * + * This function can be used as a replacement for ieee80211_tx_status + * in drivers that cannot reliably map tx status information back to + * specific skbs. + * + * Calls to this function for a single hardware must be synchronized + * against each other. Calls to this function, ieee80211_tx_status_ni() + * and ieee80211_tx_status_irqsafe() may not be mixed for a single hardware. + * + * @hw: the hardware the frame was transmitted by + * @sta: the receiver station to which this packet is sent + * (NULL for multicast packets) + * @info: tx status information + */ +void ieee80211_tx_status_noskb(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct ieee80211_tx_info *info); + +/** * ieee80211_tx_status_ni - transmit status callback (in process context) * * Like ieee80211_tx_status() but can be called in process context. @@ -3655,7 +3851,7 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, /** * ieee80211_probereq_get - retrieve a Probe Request template * @hw: pointer obtained from ieee80211_alloc_hw(). - * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @src_addr: source MAC address * @ssid: SSID buffer * @ssid_len: length of SSID * @tailroom: tailroom to reserve at end of SKB for IEs @@ -3666,7 +3862,7 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, * Return: The Probe Request template. %NULL on error. */ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, + const u8 *src_addr, const u8 *ssid, size_t ssid_len, size_t tailroom); @@ -4172,6 +4368,22 @@ void ieee80211_iterate_active_interfaces_rtnl(struct ieee80211_hw *hw, void *data); /** + * ieee80211_iterate_stations_atomic - iterate stations + * + * This function iterates over all stations associated with a given + * hardware that are currently uploaded to the driver and calls the callback + * function for them. + * This function requires the iterator callback function to be atomic, + * + * @hw: the hardware struct of which the interfaces should be iterated over + * @iterator: the iterator function to call, cannot sleep + * @data: first argument of the iterator function + */ +void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw, + void (*iterator)(void *data, + struct ieee80211_sta *sta), + void *data); +/** * ieee80211_queue_work - add work onto the mac80211 workqueue * * Drivers and mac80211 use this to add work onto the mac80211 workqueue. @@ -4480,6 +4692,14 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, gfp_t gfp); /** + * ieee80211_cqm_beacon_loss_notify - inform CQM of beacon loss + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @gfp: context flags + */ +void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp); + +/** * ieee80211_radar_detected - inform that a radar was detected * * @hw: pointer as obtained from ieee80211_alloc_hw() @@ -4637,6 +4857,10 @@ struct rate_control_ops { void (*free_sta)(void *priv, struct ieee80211_sta *sta, void *priv_sta); + void (*tx_status_noskb)(void *priv, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct ieee80211_tx_info *info); void (*tx_status)(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, struct sk_buff *skb); @@ -4888,4 +5112,69 @@ void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf); void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer, enum nl80211_tdls_operation oper, u16 reason_code, gfp_t gfp); + +/** + * ieee80211_reserve_tid - request to reserve a specific TID + * + * There is sometimes a need (such as in TDLS) for blocking the driver from + * using a specific TID so that the FW can use it for certain operations such + * as sending PTI requests. To make sure that the driver doesn't use that TID, + * this function must be called as it flushes out packets on this TID and marks + * it as blocked, so that any transmit for the station on this TID will be + * redirected to the alternative TID in the same AC. + * + * Note that this function blocks and may call back into the driver, so it + * should be called without driver locks held. Also note this function should + * only be called from the driver's @sta_state callback. + * + * @sta: the station to reserve the TID for + * @tid: the TID to reserve + * + * Returns: 0 on success, else on failure + */ +int ieee80211_reserve_tid(struct ieee80211_sta *sta, u8 tid); + +/** + * ieee80211_unreserve_tid - request to unreserve a specific TID + * + * Once there is no longer any need for reserving a certain TID, this function + * should be called, and no longer will packets have their TID modified for + * preventing use of this TID in the driver. + * + * Note that this function blocks and acquires a lock, so it should be called + * without driver locks held. Also note this function should only be called + * from the driver's @sta_state callback. + * + * @sta: the station + * @tid: the TID to unreserve + */ +void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); + +/** + * ieee80211_ie_split - split an IE buffer according to ordering + * + * @ies: the IE buffer + * @ielen: the length of the IE buffer + * @ids: an array with element IDs that are allowed before + * the split + * @n_ids: the size of the element ID array + * @offset: offset where to start splitting in the buffer + * + * This function splits an IE buffer by updating the @offset + * variable to point to the location where the buffer should be + * split. + * + * It assumes that the given IE buffer is well-formed, this + * has to be guaranteed by the caller! + * + * It also assumes that the IEs in the buffer are ordered + * correctly, if not the result of using this function will not + * be ordered correctly either, i.e. it does no reordering. + * + * The function returns the offset where the next part of the + * buffer starts, which may be @ielen if the entire (remainder) + * of the buffer should be used. + */ +size_t ieee80211_ie_split(const u8 *ies, size_t ielen, + const u8 *ids, int n_ids, size_t offset); #endif /* MAC80211_H */ diff --git a/include/net/mac802154.h b/include/net/mac802154.h index 2e67cdd19cdc..c823d910b46c 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -12,14 +12,12 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef NET_MAC802154_H #define NET_MAC802154_H #include <net/af_ieee802154.h> +#include <linux/ieee802154.h> #include <linux/skbuff.h> /* General MAC frame format: @@ -35,13 +33,13 @@ */ /* indicates that the Short Address changed */ -#define IEEE802515_AFILT_SADDR_CHANGED 0x00000001 +#define IEEE802154_AFILT_SADDR_CHANGED 0x00000001 /* indicates that the IEEE Address changed */ -#define IEEE802515_AFILT_IEEEADDR_CHANGED 0x00000002 +#define IEEE802154_AFILT_IEEEADDR_CHANGED 0x00000002 /* indicates that the PAN ID changed */ -#define IEEE802515_AFILT_PANID_CHANGED 0x00000004 +#define IEEE802154_AFILT_PANID_CHANGED 0x00000004 /* indicates that PAN Coordinator status changed */ -#define IEEE802515_AFILT_PANC_CHANGED 0x00000008 +#define IEEE802154_AFILT_PANC_CHANGED 0x00000008 struct ieee802154_hw_addr_filt { __le16 pan_id; /* Each independent PAN selects a unique @@ -55,7 +53,14 @@ struct ieee802154_hw_addr_filt { u8 pan_coord; }; -struct ieee802154_dev { +struct ieee802154_vif { + int type; + + /* must be last */ + u8 drv_priv[0] __aligned(sizeof(void *)); +}; + +struct ieee802154_hw { /* filled by the driver */ int extra_tx_headroom; u32 flags; @@ -65,6 +70,7 @@ struct ieee802154_dev { struct ieee802154_hw_addr_filt hw_filt; void *priv; struct wpan_phy *phy; + size_t vif_data_size; }; /* Checksum is in hardware and is omitted from a packet @@ -76,28 +82,43 @@ struct ieee802154_dev { * however, so you are advised to review these flags carefully. */ -/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */ -#define IEEE802154_HW_OMIT_CKSUM 0x00000001 +/* Indicates that xmitter will add FCS on it's own. */ +#define IEEE802154_HW_TX_OMIT_CKSUM 0x00000001 /* Indicates that receiver will autorespond with ACK frames. */ -#define IEEE802154_HW_AACK 0x00000002 +#define IEEE802154_HW_AACK 0x00000002 /* Indicates that transceiver will support transmit power setting. */ -#define IEEE802154_HW_TXPOWER 0x00000004 +#define IEEE802154_HW_TXPOWER 0x00000004 /* Indicates that transceiver will support listen before transmit. */ -#define IEEE802154_HW_LBT 0x00000008 +#define IEEE802154_HW_LBT 0x00000008 /* Indicates that transceiver will support cca mode setting. */ -#define IEEE802154_HW_CCA_MODE 0x00000010 +#define IEEE802154_HW_CCA_MODE 0x00000010 /* Indicates that transceiver will support cca ed level setting. */ -#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020 +#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020 /* Indicates that transceiver will support csma (max_be, min_be, csma retries) * settings. */ -#define IEEE802154_HW_CSMA_PARAMS 0x00000040 +#define IEEE802154_HW_CSMA_PARAMS 0x00000040 /* Indicates that transceiver will support ARET frame retries setting. */ -#define IEEE802154_HW_FRAME_RETRIES 0x00000080 +#define IEEE802154_HW_FRAME_RETRIES 0x00000080 +/* Indicates that transceiver will support hardware address filter setting. */ +#define IEEE802154_HW_AFILT 0x00000100 +/* Indicates that transceiver will support promiscuous mode setting. */ +#define IEEE802154_HW_PROMISCUOUS 0x00000200 +/* Indicates that receiver omits FCS. */ +#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000400 +/* Indicates that receiver will not filter frames with bad checksum. */ +#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000800 + +/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */ +#define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \ + IEEE802154_HW_RX_OMIT_CKSUM) /* This groups the most common CSMA support fields into one. */ #define IEEE802154_HW_CSMA (IEEE802154_HW_CCA_MODE | \ IEEE802154_HW_CCA_ED_LEVEL | \ - IEEE802154_HW_CSMA_PARAMS | \ + IEEE802154_HW_CSMA_PARAMS) + +/* This groups the most common ARET support fields into one. */ +#define IEEE802154_HW_ARET (IEEE802154_HW_CSMA | \ IEEE802154_HW_FRAME_RETRIES) /* struct ieee802154_ops - callbacks from mac802154 to the driver @@ -112,12 +133,24 @@ struct ieee802154_dev { * stop: Handler that 802.15.4 module calls for device cleanup. * This function is called after the last interface is removed. * - * xmit: Handler that 802.15.4 module calls for each transmitted frame. + * xmit_sync: + * Handler that 802.15.4 module calls for each transmitted frame. + * skb cntains the buffer starting from the IEEE 802.15.4 header. + * The low-level driver should send the frame based on available + * configuration. This is called by a workqueue and useful for + * synchronous 802.15.4 drivers. + * This function should return zero or negative errno. + * + * WARNING: + * This will be deprecated soon. We don't accept synced xmit callbacks + * drivers anymore. + * + * xmit_async: + * Handler that 802.15.4 module calls for each transmitted frame. * skb cntains the buffer starting from the IEEE 802.15.4 header. * The low-level driver should send the frame based on available * configuration. - * This function should return zero or negative errno. Called with - * pib_lock held. + * This function should return zero or negative errno. * * ed: Handler that 802.15.4 module calls for Energy Detection. * This function should place the value for detected energy @@ -159,40 +192,75 @@ struct ieee802154_dev { * set_frame_retries * Sets the retransmission attempt limit. Called with pib_lock held. * Returns either zero, or negative errno. + * + * set_promiscuous_mode + * Enables or disable promiscuous mode. */ struct ieee802154_ops { struct module *owner; - int (*start)(struct ieee802154_dev *dev); - void (*stop)(struct ieee802154_dev *dev); - int (*xmit)(struct ieee802154_dev *dev, - struct sk_buff *skb); - int (*ed)(struct ieee802154_dev *dev, u8 *level); - int (*set_channel)(struct ieee802154_dev *dev, - int page, - int channel); - int (*set_hw_addr_filt)(struct ieee802154_dev *dev, - struct ieee802154_hw_addr_filt *filt, + int (*start)(struct ieee802154_hw *hw); + void (*stop)(struct ieee802154_hw *hw); + int (*xmit_sync)(struct ieee802154_hw *hw, + struct sk_buff *skb); + int (*xmit_async)(struct ieee802154_hw *hw, + struct sk_buff *skb); + int (*ed)(struct ieee802154_hw *hw, u8 *level); + int (*set_channel)(struct ieee802154_hw *hw, u8 page, + u8 channel); + int (*set_hw_addr_filt)(struct ieee802154_hw *hw, + struct ieee802154_hw_addr_filt *filt, unsigned long changed); - int (*ieee_addr)(struct ieee802154_dev *dev, __le64 addr); - int (*set_txpower)(struct ieee802154_dev *dev, int db); - int (*set_lbt)(struct ieee802154_dev *dev, bool on); - int (*set_cca_mode)(struct ieee802154_dev *dev, u8 mode); - int (*set_cca_ed_level)(struct ieee802154_dev *dev, + int (*set_txpower)(struct ieee802154_hw *hw, int db); + int (*set_lbt)(struct ieee802154_hw *hw, bool on); + int (*set_cca_mode)(struct ieee802154_hw *hw, u8 mode); + int (*set_cca_ed_level)(struct ieee802154_hw *hw, s32 level); - int (*set_csma_params)(struct ieee802154_dev *dev, + int (*set_csma_params)(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries); - int (*set_frame_retries)(struct ieee802154_dev *dev, + int (*set_frame_retries)(struct ieee802154_hw *hw, s8 retries); + int (*set_promiscuous_mode)(struct ieee802154_hw *hw, + const bool on); }; -/* Basic interface to register ieee802154 device */ -struct ieee802154_dev * -ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops); -void ieee802154_free_device(struct ieee802154_dev *dev); -int ieee802154_register_device(struct ieee802154_dev *dev); -void ieee802154_unregister_device(struct ieee802154_dev *dev); +/** + * ieee802154_be64_to_le64 - copies and convert be64 to le64 + * @le64_dst: le64 destination pointer + * @be64_src: be64 source pointer + */ +static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) +{ + __le64 tmp = (__force __le64)swab64p(be64_src); + + memcpy(le64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN); +} -void ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, +/** + * ieee802154_le64_to_be64 - copies and convert le64 to be64 + * @be64_dst: be64 destination pointer + * @le64_src: le64 source pointer + */ +static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) +{ + __be64 tmp = (__force __be64)swab64p(le64_src); + + memcpy(be64_dst, &tmp, IEEE802154_EXTENDED_ADDR_LEN); +} + +/* Basic interface to register ieee802154 hwice */ +struct ieee802154_hw * +ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops); +void ieee802154_free_hw(struct ieee802154_hw *hw); +int ieee802154_register_hw(struct ieee802154_hw *hw); +void ieee802154_unregister_hw(struct ieee802154_hw *hw); + +void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb); +void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi); +void ieee802154_wake_queue(struct ieee802154_hw *hw); +void ieee802154_stop_queue(struct ieee802154_hw *hw); +void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, + bool ifs_handling); + #endif /* NET_MAC802154_H */ diff --git a/include/net/mpls.h b/include/net/mpls.h new file mode 100644 index 000000000000..5b3b5addfb08 --- /dev/null +++ b/include/net/mpls.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2014 Nicira, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _NET_MPLS_H +#define _NET_MPLS_H 1 + +#include <linux/if_ether.h> +#include <linux/netdevice.h> + +#define MPLS_HLEN 4 + +static inline bool eth_p_mpls(__be16 eth_type) +{ + return eth_type == htons(ETH_P_MPLS_UC) || + eth_type == htons(ETH_P_MPLS_MC); +} + +/* + * For non-MPLS skbs this will correspond to the network header. + * For MPLS skbs it will be before the network_header as the MPLS + * label stack lies between the end of the mac header and the network + * header. That is, for MPLS skbs the end of the mac header + * is the top of the MPLS label stack. + */ +static inline unsigned char *skb_mpls_header(struct sk_buff *skb) +{ + return skb_mac_header(skb) + skb->mac_len; +} +#endif diff --git a/include/net/neighbour.h b/include/net/neighbour.h index f60558d0254c..eb070b3674a1 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -69,7 +69,7 @@ struct neigh_parms { struct net *net; #endif struct net_device *dev; - struct neigh_parms *next; + struct list_head list; int (*neigh_setup)(struct neighbour *); void (*neigh_cleanup)(struct neighbour *); struct neigh_table *tbl; @@ -203,6 +203,7 @@ struct neigh_table { void (*proxy_redo)(struct sk_buff *skb); char *id; struct neigh_parms parms; + struct list_head parms_list; int gc_interval; int gc_thresh1; int gc_thresh2; @@ -219,6 +220,13 @@ struct neigh_table { struct pneigh_entry **phash_buckets; }; +enum { + NEIGH_ARP_TABLE = 0, + NEIGH_ND_TABLE = 1, + NEIGH_DN_TABLE = 2, + NEIGH_NR_TABLES, +}; + static inline int neigh_parms_family(struct neigh_parms *p) { return p->tbl->family; @@ -239,8 +247,8 @@ static inline void *neighbour_priv(const struct neighbour *n) #define NEIGH_UPDATE_F_ISROUTER 0x40000000 #define NEIGH_UPDATE_F_ADMIN 0x80000000 -void neigh_table_init(struct neigh_table *tbl); -int neigh_table_clear(struct neigh_table *tbl); +void neigh_table_init(int index, struct neigh_table *tbl); +int neigh_table_clear(int index, struct neigh_table *tbl); struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev); struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index c8a7db605e03..f0daed2b54d1 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -92,12 +92,18 @@ struct nf_conn { /* Have we seen traffic both ways yet? (bitset) */ unsigned long status; - /* If we were expected by an expectation, this will be it */ - struct nf_conn *master; - /* Timer function; drops refcnt when it goes off. */ struct timer_list timeout; +#ifdef CONFIG_NET_NS + struct net *ct_net; +#endif + /* all members below initialized via memset */ + u8 __nfct_init_offset[0]; + + /* If we were expected by an expectation, this will be it */ + struct nf_conn *master; + #if defined(CONFIG_NF_CONNTRACK_MARK) u_int32_t mark; #endif @@ -108,9 +114,6 @@ struct nf_conn { /* Extensions */ struct nf_ct_ext *ext; -#ifdef CONFIG_NET_NS - struct net *ct_net; -#endif /* Storage reserved for other modules, must be the last member */ union nf_conntrack_proto proto; diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index cc0c18827602..f2f0fa3bb150 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -72,7 +72,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) return ret; } -int +void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *proto); diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index adc1fa3dd7ab..cdc920b4c4c2 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -38,8 +38,8 @@ struct nf_conntrack_l3proto { const struct nf_conntrack_tuple *orig); /* Print out the per-protocol part of the tuple. */ - int (*print_tuple)(struct seq_file *s, - const struct nf_conntrack_tuple *); + void (*print_tuple)(struct seq_file *s, + const struct nf_conntrack_tuple *); /* * Called before tracking. diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 4c8d573830b7..1f7061313d54 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -56,11 +56,11 @@ struct nf_conntrack_l4proto { u_int8_t pf, unsigned int hooknum); /* Print out the per-protocol part of the tuple. Return like seq_* */ - int (*print_tuple)(struct seq_file *s, - const struct nf_conntrack_tuple *); + void (*print_tuple)(struct seq_file *s, + const struct nf_conntrack_tuple *); /* Print out the private part of the conntrack. */ - int (*print_conntrack)(struct seq_file *s, struct nf_conn *); + void (*print_conntrack)(struct seq_file *s, struct nf_conn *); /* Return the array of timeouts for this protocol. */ unsigned int *(*get_timeouts)(struct net *net); diff --git a/include/net/netfilter/nf_nat_redirect.h b/include/net/netfilter/nf_nat_redirect.h new file mode 100644 index 000000000000..73b729543309 --- /dev/null +++ b/include/net/netfilter/nf_nat_redirect.h @@ -0,0 +1,12 @@ +#ifndef _NF_NAT_REDIRECT_H_ +#define _NF_NAT_REDIRECT_H_ + +unsigned int +nf_nat_redirect_ipv4(struct sk_buff *skb, + const struct nf_nat_ipv4_multi_range_compat *mr, + unsigned int hooknum); +unsigned int +nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, + unsigned int hooknum); + +#endif /* _NF_NAT_REDIRECT_H_ */ diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 845c596bf594..3ae969e3acf0 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -396,14 +396,12 @@ struct nft_rule { /** * struct nft_trans - nf_tables object update in transaction * - * @rcu_head: rcu head to defer release of transaction data * @list: used internally * @msg_type: message type * @ctx: transaction context * @data: internal information related to the transaction */ struct nft_trans { - struct rcu_head rcu_head; struct list_head list; int msg_type; struct nft_ctx ctx; diff --git a/include/net/netfilter/nf_tables_bridge.h b/include/net/netfilter/nf_tables_bridge.h new file mode 100644 index 000000000000..511fb79f6dad --- /dev/null +++ b/include/net/netfilter/nf_tables_bridge.h @@ -0,0 +1,7 @@ +#ifndef _NET_NF_TABLES_BRIDGE_H +#define _NET_NF_TABLES_BRIDGE_H + +int nft_bridge_iphdr_validate(struct sk_buff *skb); +int nft_bridge_ip6hdr_validate(struct sk_buff *skb); + +#endif /* _NET_NF_TABLES_BRIDGE_H */ diff --git a/include/net/netfilter/nft_redir.h b/include/net/netfilter/nft_redir.h new file mode 100644 index 000000000000..a2d67546afab --- /dev/null +++ b/include/net/netfilter/nft_redir.h @@ -0,0 +1,21 @@ +#ifndef _NFT_REDIR_H_ +#define _NFT_REDIR_H_ + +struct nft_redir { + enum nft_registers sreg_proto_min:8; + enum nft_registers sreg_proto_max:8; + u16 flags; +}; + +extern const struct nla_policy nft_redir_policy[]; + +int nft_redir_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]); + +int nft_redir_dump(struct sk_buff *skb, const struct nft_expr *expr); + +int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, + const struct nft_data **data); + +#endif /* _NFT_REDIR_H_ */ diff --git a/include/net/netlink.h b/include/net/netlink.h index 7b903e1bdbbb..64158353ecb2 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -1185,4 +1185,14 @@ static inline int nla_validate_nested(const struct nlattr *start, int maxtype, #define nla_for_each_nested(pos, nla, rem) \ nla_for_each_attr(pos, nla_data(nla), nla_len(nla), rem) +/** + * nla_is_last - Test if attribute is last in stream + * @nla: attribute to test + * @rem: bytes remaining in stream + */ +static inline bool nla_is_last(const struct nlattr *nla, int rem) +{ + return nla->nla_len == rem; +} + #endif diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 9da798256f0e..730d82ad6ee5 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -50,8 +50,8 @@ struct netns_xfrm { struct list_head policy_all; struct hlist_head *policy_byidx; unsigned int policy_idx_hmask; - struct hlist_head policy_inexact[XFRM_POLICY_MAX * 2]; - struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2]; + struct hlist_head policy_inexact[XFRM_POLICY_MAX]; + struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX]; unsigned int policy_count[XFRM_POLICY_MAX * 2]; struct work_struct policy_hash_work; struct xfrm_policy_hthresh policy_hthresh; diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h index d9a5cf7ac1c4..0ae101eef0f4 100644 --- a/include/net/nfc/digital.h +++ b/include/net/nfc/digital.h @@ -225,6 +225,19 @@ struct nfc_digital_dev { u8 curr_protocol; u8 curr_rf_tech; u8 curr_nfc_dep_pni; + u8 did; + + u8 local_payload_max; + u8 remote_payload_max; + + struct sk_buff *chaining_skb; + struct digital_data_exch *data_exch; + + int atn_count; + int nack_count; + + struct sk_buff *saved_skb; + unsigned int saved_skb_len; u16 target_fsc; diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index 7ee8f4cc610b..14bd0e1c47fa 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h @@ -57,10 +57,14 @@ struct nfc_hci_ops { int (*discover_se)(struct nfc_hci_dev *dev); int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx); int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx); + int (*se_io)(struct nfc_hci_dev *dev, u32 se_idx, + u8 *apdu, size_t apdu_length, + se_io_cb_t cb, void *cb_context); }; /* Pipes */ #define NFC_HCI_INVALID_PIPE 0x80 +#define NFC_HCI_DO_NOT_CREATE_PIPE 0x81 #define NFC_HCI_LINK_MGMT_PIPE 0x00 #define NFC_HCI_ADMIN_PIPE 0x01 diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h index 9eca9ae2280c..e7257a4653b4 100644 --- a/include/net/nfc/nci.h +++ b/include/net/nfc/nci.h @@ -28,6 +28,8 @@ #ifndef __NCI_H #define __NCI_H +#include <net/nfc/nfc.h> + /* NCI constants */ #define NCI_MAX_NUM_MAPPING_CONFIGS 10 #define NCI_MAX_NUM_RF_CONFIGS 10 @@ -73,6 +75,8 @@ #define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83 #define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85 +#define NCI_RF_TECH_MODE_LISTEN_MASK 0x80 + /* NCI RF Technologies */ #define NCI_NFC_RF_TECHNOLOGY_A 0x00 #define NCI_NFC_RF_TECHNOLOGY_B 0x01 @@ -106,6 +110,17 @@ /* NCI Configuration Parameter Tags */ #define NCI_PN_ATR_REQ_GEN_BYTES 0x29 +#define NCI_LN_ATR_RES_GEN_BYTES 0x61 +#define NCI_LA_SEL_INFO 0x32 +#define NCI_LF_PROTOCOL_TYPE 0x50 +#define NCI_LF_CON_BITR_F 0x54 + +/* NCI Configuration Parameters masks */ +#define NCI_LA_SEL_INFO_ISO_DEP_MASK 0x20 +#define NCI_LA_SEL_INFO_NFC_DEP_MASK 0x40 +#define NCI_LF_PROTOCOL_TYPE_NFC_DEP_MASK 0x02 +#define NCI_LF_CON_BITR_F_212 0x02 +#define NCI_LF_CON_BITR_F_424 0x04 /* NCI Reset types */ #define NCI_RESET_TYPE_KEEP_CONFIG 0x00 @@ -314,26 +329,31 @@ struct nci_core_intf_error_ntf { struct rf_tech_specific_params_nfca_poll { __u16 sens_res; __u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */ - __u8 nfcid1[10]; + __u8 nfcid1[NFC_NFCID1_MAXSIZE]; __u8 sel_res_len; /* 0 or 1 Bytes */ __u8 sel_res; } __packed; struct rf_tech_specific_params_nfcb_poll { __u8 sensb_res_len; - __u8 sensb_res[12]; /* 11 or 12 Bytes */ + __u8 sensb_res[NFC_SENSB_RES_MAXSIZE]; /* 11 or 12 Bytes */ } __packed; struct rf_tech_specific_params_nfcf_poll { __u8 bit_rate; __u8 sensf_res_len; - __u8 sensf_res[18]; /* 16 or 18 Bytes */ + __u8 sensf_res[NFC_SENSF_RES_MAXSIZE]; /* 16 or 18 Bytes */ } __packed; struct rf_tech_specific_params_nfcv_poll { __u8 res_flags; __u8 dsfid; - __u8 uid[8]; /* 8 Bytes */ + __u8 uid[NFC_ISO15693_UID_MAXSIZE]; /* 8 Bytes */ +} __packed; + +struct rf_tech_specific_params_nfcf_listen { + __u8 local_nfcid2_len; + __u8 local_nfcid2[NFC_NFCID2_MAXSIZE]; /* 0 or 8 Bytes */ } __packed; struct nci_rf_discover_ntf { @@ -365,7 +385,12 @@ struct activation_params_nfcb_poll_iso_dep { struct activation_params_poll_nfc_dep { __u8 atr_res_len; - __u8 atr_res[63]; + __u8 atr_res[NFC_ATR_RES_MAXSIZE - 2]; /* ATR_RES from byte 3 */ +}; + +struct activation_params_listen_nfc_dep { + __u8 atr_req_len; + __u8 atr_req[NFC_ATR_REQ_MAXSIZE - 2]; /* ATR_REQ from byte 3 */ }; struct nci_rf_intf_activated_ntf { @@ -382,6 +407,7 @@ struct nci_rf_intf_activated_ntf { struct rf_tech_specific_params_nfcb_poll nfcb_poll; struct rf_tech_specific_params_nfcf_poll nfcf_poll; struct rf_tech_specific_params_nfcv_poll nfcv_poll; + struct rf_tech_specific_params_nfcf_listen nfcf_listen; } rf_tech_specific_params; __u8 data_exch_rf_tech_and_mode; @@ -393,6 +419,7 @@ struct nci_rf_intf_activated_ntf { struct activation_params_nfca_poll_iso_dep nfca_poll_iso_dep; struct activation_params_nfcb_poll_iso_dep nfcb_poll_iso_dep; struct activation_params_poll_nfc_dep poll_nfc_dep; + struct activation_params_listen_nfc_dep listen_nfc_dep; } activation_params; } __packed; diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index 75d10e625c49..9e51bb4d841e 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -4,6 +4,7 @@ * * Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2013 Intel Corporation. All rights reserved. + * Copyright (C) 2014 Marvell International Ltd. * * Written by Ilan Elias <ilane@ti.com> * @@ -49,6 +50,8 @@ enum nci_state { NCI_W4_ALL_DISCOVERIES, NCI_W4_HOST_SELECT, NCI_POLL_ACTIVE, + NCI_LISTEN_ACTIVE, + NCI_LISTEN_SLEEP, }; /* NCI timeouts */ @@ -69,6 +72,12 @@ struct nci_ops { int (*send)(struct nci_dev *ndev, struct sk_buff *skb); int (*setup)(struct nci_dev *ndev); __u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol); + int (*discover_se)(struct nci_dev *ndev); + int (*disable_se)(struct nci_dev *ndev, u32 se_idx); + int (*enable_se)(struct nci_dev *ndev, u32 se_idx); + int (*se_io)(struct nci_dev *ndev, u32 se_idx, + u8 *apdu, size_t apdu_length, + se_io_cb_t cb, void *cb_context); }; #define NCI_MAX_SUPPORTED_RF_INTERFACES 4 diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 6c583e244de2..12adb817c27a 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -1,5 +1,6 @@ /* * Copyright (C) 2011 Instituto Nokia de Tecnologia + * Copyright (C) 2014 Marvell International Ltd. * * Authors: * Lauro Ramos Venancio <lauro.venancio@openbossa.org> @@ -87,6 +88,7 @@ struct nfc_ops { #define NFC_TARGET_IDX_ANY -1 #define NFC_MAX_GT_LEN 48 #define NFC_ATR_RES_GT_OFFSET 15 +#define NFC_ATR_REQ_GT_OFFSET 14 /** * struct nfc_target - NFC target descriptiom diff --git a/include/net/nl802154.h b/include/net/nl802154.h index b23548e04098..6dbd406ca41b 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -1,126 +1,122 @@ +#ifndef __NL802154_H +#define __NL802154_H /* - * nl802154.h + * 802.15.4 netlink interface public header * - * Copyright (C) 2007, 2008, 2009 Siemens AG + * Copyright 2014 Alexander Aring <aar@pengutronix.de> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ -#ifndef IEEE802154_NL_H -#define IEEE802154_NL_H +#define NL802154_GENL_NAME "nl802154" -struct net_device; -struct ieee802154_addr; +enum nl802154_commands { +/* don't change the order or add anything between, this is ABI! */ +/* currently we don't shipping this file via uapi, ignore the above one */ + NL802154_CMD_UNSPEC, -/** - * ieee802154_nl_assoc_indic - Notify userland of an association request. - * @dev: The network device on which this association request was - * received. - * @addr: The address of the device requesting association. - * @cap: The capability information field from the device. - * - * This informs a userland coordinator of a device requesting to - * associate with the PAN controlled by the coordinator. - * - * Note: This is in section 7.3.1 of the IEEE 802.15.4-2006 document. - */ -int ieee802154_nl_assoc_indic(struct net_device *dev, - struct ieee802154_addr *addr, u8 cap); - -/** - * ieee802154_nl_assoc_confirm - Notify userland of association. - * @dev: The device which has completed association. - * @short_addr: The short address assigned to the device. - * @status: The status of the association. - * - * Inform userland of the result of an association request. If the - * association request included asking the coordinator to allocate - * a short address then it is returned in @short_addr. - * - * Note: This is in section 7.3.2 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_assoc_confirm(struct net_device *dev, - __le16 short_addr, u8 status); - -/** - * ieee802154_nl_disassoc_indic - Notify userland of disassociation. - * @dev: The device on which disassociation was indicated. - * @addr: The device which is disassociating. - * @reason: The reason for the disassociation. - * - * Inform userland that a device has disassociated from the network. - * - * Note: This is in section 7.3.3 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_disassoc_indic(struct net_device *dev, - struct ieee802154_addr *addr, u8 reason); - -/** - * ieee802154_nl_disassoc_confirm - Notify userland of disassociation - * completion. - * @dev: The device on which disassociation was ordered. - * @status: The result of the disassociation. - * - * Inform userland of the result of requesting that a device - * disassociate, or the result of requesting that we disassociate from - * a PAN managed by another coordinator. - * - * Note: This is in section 7.1.4.3 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_disassoc_confirm(struct net_device *dev, - u8 status); - -/** - * ieee802154_nl_scan_confirm - Notify userland of completion of scan. - * @dev: The device which was instructed to scan. - * @status: The status of the scan operation. - * @scan_type: What type of scan was performed. - * @unscanned: Any channels that the device was unable to scan. - * @edl: The energy levels (if a passive scan). - * - * - * Note: This is in section 7.1.11 of the IEEE 802.15.4 document. - * Note: This API does not permit the return of an active scan result. - */ -int ieee802154_nl_scan_confirm(struct net_device *dev, - u8 status, u8 scan_type, u32 unscanned, u8 page, - u8 *edl/*, struct list_head *pan_desc_list */); - -/** - * ieee802154_nl_beacon_indic - Notify userland of a received beacon. - * @dev: The device on which a beacon was received. - * @panid: The PAN of the coordinator. - * @coord_addr: The short address of the coordinator on that PAN. - * - * Note: This is in section 7.1.5 of the IEEE 802.15.4 document. - * Note: This API does not provide extended information such as what - * channel the PAN is on or what the LQI of the beacon frame was on - * receipt. - * Note: This API cannot indicate a beacon frame for a coordinator - * operating in long addressing mode. - */ -int ieee802154_nl_beacon_indic(struct net_device *dev, __le16 panid, - __le16 coord_addr); + NL802154_CMD_GET_WPAN_PHY, /* can dump */ + NL802154_CMD_SET_WPAN_PHY, + NL802154_CMD_NEW_WPAN_PHY, + NL802154_CMD_DEL_WPAN_PHY, -/** - * ieee802154_nl_start_confirm - Notify userland of completion of start. - * @dev: The device which was instructed to scan. - * @status: The status of the scan operation. - * - * Note: This is in section 7.1.14 of the IEEE 802.15.4 document. - */ -int ieee802154_nl_start_confirm(struct net_device *dev, u8 status); + NL802154_CMD_GET_INTERFACE, /* can dump */ + NL802154_CMD_SET_INTERFACE, + NL802154_CMD_NEW_INTERFACE, + NL802154_CMD_DEL_INTERFACE, + + NL802154_CMD_SET_CHANNEL, + + NL802154_CMD_SET_PAN_ID, + NL802154_CMD_SET_SHORT_ADDR, + + NL802154_CMD_SET_TX_POWER, + NL802154_CMD_SET_CCA_MODE, + NL802154_CMD_SET_CCA_ED_LEVEL, + + NL802154_CMD_SET_MAX_FRAME_RETRIES, + + NL802154_CMD_SET_BACKOFF_EXPONENT, + NL802154_CMD_SET_MAX_CSMA_BACKOFFS, + + NL802154_CMD_SET_LBT_MODE, + + /* add new commands above here */ + + /* used to define NL802154_CMD_MAX below */ + __NL802154_CMD_AFTER_LAST, + NL802154_CMD_MAX = __NL802154_CMD_AFTER_LAST - 1 +}; + +enum nl802154_attrs { +/* don't change the order or add anything between, this is ABI! */ +/* currently we don't shipping this file via uapi, ignore the above one */ + NL802154_ATTR_UNSPEC, + + NL802154_ATTR_WPAN_PHY, + NL802154_ATTR_WPAN_PHY_NAME, + + NL802154_ATTR_IFINDEX, + NL802154_ATTR_IFNAME, + NL802154_ATTR_IFTYPE, + + NL802154_ATTR_WPAN_DEV, + + NL802154_ATTR_PAGE, + NL802154_ATTR_CHANNEL, + + NL802154_ATTR_PAN_ID, + NL802154_ATTR_SHORT_ADDR, + + NL802154_ATTR_TX_POWER, + + NL802154_ATTR_CCA_MODE, + NL802154_ATTR_CCA_MODE3_AND, + NL802154_ATTR_CCA_ED_LEVEL, + + NL802154_ATTR_MAX_FRAME_RETRIES, + + NL802154_ATTR_MAX_BE, + NL802154_ATTR_MIN_BE, + NL802154_ATTR_MAX_CSMA_BACKOFFS, + + NL802154_ATTR_LBT_MODE, + + NL802154_ATTR_GENERATION, + + NL802154_ATTR_CHANNELS_SUPPORTED, + NL802154_ATTR_SUPPORTED_CHANNEL, + + NL802154_ATTR_EXTENDED_ADDR, + + /* add attributes here, update the policy in nl802154.c */ + + __NL802154_ATTR_AFTER_LAST, + NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1 +}; + +enum nl802154_iftype { + /* for backwards compatibility TODO */ + NL802154_IFTYPE_UNSPEC = -1, + + NL802154_IFTYPE_NODE, + NL802154_IFTYPE_MONITOR, + NL802154_IFTYPE_COORD, + + /* keep last */ + NUM_NL802154_IFTYPES, + NL802154_IFTYPE_MAX = NUM_NL802154_IFTYPES - 1 +}; -#endif +#endif /* __NL802154_H */ diff --git a/include/net/ping.h b/include/net/ping.h index 026479b61a2d..f074060bc5de 100644 --- a/include/net/ping.h +++ b/include/net/ping.h @@ -82,7 +82,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len); int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); -void ping_rcv(struct sk_buff *skb); +bool ping_rcv(struct sk_buff *skb); #ifdef CONFIG_PROC_FS struct ping_seq_afinfo { diff --git a/include/net/regulatory.h b/include/net/regulatory.h index dad7ab20a8cb..b776d72d84be 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -136,6 +136,17 @@ struct regulatory_request { * otherwise initiating radiation is not allowed. This will enable the * relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration * option + * @REGULATORY_IGNORE_STALE_KICKOFF: the regulatory core will _not_ make sure + * all interfaces on this wiphy reside on allowed channels. If this flag + * is not set, upon a regdomain change, the interfaces are given a grace + * period (currently 60 seconds) to disconnect or move to an allowed + * channel. Interfaces on forbidden channels are forcibly disconnected. + * Currently these types of interfaces are supported for enforcement: + * NL80211_IFTYPE_ADHOC, NL80211_IFTYPE_STATION, NL80211_IFTYPE_AP, + * NL80211_IFTYPE_AP_VLAN, NL80211_IFTYPE_MONITOR, + * NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO, + * NL80211_IFTYPE_P2P_DEVICE. The flag will be set by default if a device + * includes any modes unsupported for enforcement checking. */ enum ieee80211_regulatory_flags { REGULATORY_CUSTOM_REG = BIT(0), @@ -144,6 +155,7 @@ enum ieee80211_regulatory_flags { REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3), REGULATORY_COUNTRY_IE_IGNORE = BIT(4), REGULATORY_ENABLE_RELAX_NO_IR = BIT(5), + REGULATORY_IGNORE_STALE_KICKOFF = BIT(6), }; struct ieee80211_freq_range { diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index d17ed6fb2f70..3d282cbb66bf 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -219,7 +219,6 @@ struct tcf_proto_ops { void (*destroy)(struct tcf_proto*); unsigned long (*get)(struct tcf_proto*, u32 handle); - void (*put)(struct tcf_proto*, unsigned long); int (*change)(struct net *net, struct sk_buff *, struct tcf_proto*, unsigned long, u32 handle, struct nlattr **, diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 72a31db47ded..487ef34bbd63 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -219,7 +219,7 @@ struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *, const struct sctp_chunk *, __u32 tsn); struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *, - const struct msghdr *, size_t msg_len); + struct msghdr *, size_t msg_len); struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *, const struct sctp_chunk *, const __u8 *, diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 4ff3f67be62c..2bb2fcf5b11f 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -531,7 +531,7 @@ struct sctp_datamsg { struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *, struct sctp_sndrcvinfo *, - struct msghdr *, int len); + struct iov_iter *); void sctp_datamsg_free(struct sctp_datamsg *); void sctp_datamsg_put(struct sctp_datamsg *); void sctp_chunk_fail(struct sctp_chunk *, int error); @@ -647,8 +647,8 @@ struct sctp_chunk { void sctp_chunk_hold(struct sctp_chunk *); void sctp_chunk_put(struct sctp_chunk *); -int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, - struct iovec *data); +int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len, + struct iov_iter *from); void sctp_chunk_free(struct sctp_chunk *); void *sctp_addto_chunk(struct sctp_chunk *, int len, const void *data); struct sctp_chunk *sctp_chunkify(struct sk_buff *, @@ -1116,7 +1116,6 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len, sctp_scope_t sctp_scope(const union sctp_addr *); int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope); int sctp_is_any(struct sock *sk, const union sctp_addr *addr); -int sctp_addr_is_valid(const union sctp_addr *addr); int sctp_is_ep_boundall(struct sock *sk); diff --git a/include/net/sock.h b/include/net/sock.h index 7db3db112baa..2210fec65669 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -54,8 +54,8 @@ #include <linux/security.h> #include <linux/slab.h> #include <linux/uaccess.h> +#include <linux/page_counter.h> #include <linux/memcontrol.h> -#include <linux/res_counter.h> #include <linux/static_key.h> #include <linux/aio.h> #include <linux/sched.h> @@ -273,6 +273,7 @@ struct cg_proto; * @sk_rcvtimeo: %SO_RCVTIMEO setting * @sk_sndtimeo: %SO_SNDTIMEO setting * @sk_rxhash: flow hash received from netif layer + * @sk_incoming_cpu: record cpu processing incoming packets * @sk_txhash: computed flow hash for use on transmit * @sk_filter: socket filtering instructions * @sk_protinfo: private area, net family specific, when not using slab @@ -350,6 +351,12 @@ struct sock { #ifdef CONFIG_RPS __u32 sk_rxhash; #endif + u16 sk_incoming_cpu; + /* 16bit hole + * Warned : sk_incoming_cpu can be set from softirq, + * Do not use this hole without fully understanding possible issues. + */ + __u32 sk_txhash; #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sk_napi_id; @@ -833,6 +840,11 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) return sk->sk_backlog_rcv(sk, skb); } +static inline void sk_incoming_cpu_update(struct sock *sk) +{ + sk->sk_incoming_cpu = raw_smp_processor_id(); +} + static inline void sock_rps_record_flow_hash(__u32 hash) { #ifdef CONFIG_RPS @@ -897,6 +909,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk) if (!__rc) { \ *(__timeo) = schedule_timeout(*(__timeo)); \ } \ + sched_annotate_sleep(); \ lock_sock(__sk); \ __rc = __condition; \ __rc; \ @@ -1061,7 +1074,7 @@ enum cg_proto_flags { }; struct cg_proto { - struct res_counter memory_allocated; /* Current allocated memory. */ + struct page_counter memory_allocated; /* Current allocated memory. */ struct percpu_counter sockets_allocated; /* Current number of sockets. */ int memory_pressure; long sysctl_mem[3]; @@ -1213,34 +1226,26 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot, unsigned long amt, int *parent_status) { - struct res_counter *fail; - int ret; + page_counter_charge(&prot->memory_allocated, amt); - ret = res_counter_charge_nofail(&prot->memory_allocated, - amt << PAGE_SHIFT, &fail); - if (ret < 0) + if (page_counter_read(&prot->memory_allocated) > + prot->memory_allocated.limit) *parent_status = OVER_LIMIT; } static inline void memcg_memory_allocated_sub(struct cg_proto *prot, unsigned long amt) { - res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT); -} - -static inline u64 memcg_memory_allocated_read(struct cg_proto *prot) -{ - u64 ret; - ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE); - return ret >> PAGE_SHIFT; + page_counter_uncharge(&prot->memory_allocated, amt); } static inline long sk_memory_allocated(const struct sock *sk) { struct proto *prot = sk->sk_prot; + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return memcg_memory_allocated_read(sk->sk_cgrp); + return page_counter_read(&sk->sk_cgrp->memory_allocated); return atomic_long_read(prot->memory_allocated); } @@ -1254,7 +1259,7 @@ sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status); /* update the root cgroup regardless */ atomic_long_add_return(amt, prot->memory_allocated); - return memcg_memory_allocated_read(sk->sk_cgrp); + return page_counter_read(&sk->sk_cgrp->memory_allocated); } return atomic_long_add_return(amt, prot->memory_allocated); @@ -1588,6 +1593,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, int *errcode, int max_page_order); void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); void sock_kfree_s(struct sock *sk, void *mem, int size); +void sock_kzfree_s(struct sock *sk, void *mem, int size); void sk_send_sigurg(struct sock *sk); /* @@ -1872,29 +1878,6 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from, return 0; } -static inline int skb_copy_to_page(struct sock *sk, char __user *from, - struct sk_buff *skb, struct page *page, - int off, int copy) -{ - if (skb->ip_summed == CHECKSUM_NONE) { - int err = 0; - __wsum csum = csum_and_copy_from_user(from, - page_address(page) + off, - copy, 0, &err); - if (err) - return err; - skb->csum = csum_block_add(skb->csum, csum, skb->len); - } else if (copy_from_user(page_address(page) + off, from, copy)) - return -EFAULT; - - skb->len += copy; - skb->data_len += copy; - skb->truesize += copy; - sk->sk_wmem_queued += copy; - sk_mem_charge(sk, copy); - return 0; -} - /** * sk_wmem_alloc_get - returns write allocations * @sk: socket @@ -2276,16 +2259,6 @@ bool sk_ns_capable(const struct sock *sk, bool sk_capable(const struct sock *sk, int cap); bool sk_net_capable(const struct sock *sk, int cap); -/* - * Enable debug/info messages - */ -extern int net_msg_warn; -#define NETDEBUG(fmt, args...) \ - do { if (net_msg_warn) printk(fmt,##args); } while (0) - -#define LIMIT_NETDEBUG(fmt, args...) \ - do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0) - extern __u32 sysctl_wmem_max; extern __u32 sysctl_rmem_max; diff --git a/include/net/switchdev.h b/include/net/switchdev.h new file mode 100644 index 000000000000..8a6d1641fd9b --- /dev/null +++ b/include/net/switchdev.h @@ -0,0 +1,37 @@ +/* + * include/net/switchdev.h - Switch device API + * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _LINUX_SWITCHDEV_H_ +#define _LINUX_SWITCHDEV_H_ + +#include <linux/netdevice.h> + +#ifdef CONFIG_NET_SWITCHDEV + +int netdev_switch_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid); +int netdev_switch_port_stp_update(struct net_device *dev, u8 state); + +#else + +static inline int netdev_switch_parent_id_get(struct net_device *dev, + struct netdev_phys_item_id *psid) +{ + return -EOPNOTSUPP; +} + +static inline int netdev_switch_port_stp_update(struct net_device *dev, + u8 state) +{ + return -EOPNOTSUPP; +} + +#endif + +#endif /* _LINUX_SWITCHDEV_H_ */ diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h new file mode 100644 index 000000000000..93b70ade1ff3 --- /dev/null +++ b/include/net/tc_act/tc_vlan.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __NET_TC_VLAN_H +#define __NET_TC_VLAN_H + +#include <net/act_api.h> + +#define VLAN_F_POP 0x1 +#define VLAN_F_PUSH 0x2 + +struct tcf_vlan { + struct tcf_common common; + int tcfv_action; + u16 tcfv_push_vid; + __be16 tcfv_push_proto; +}; +#define to_vlan(a) \ + container_of(a->priv, struct tcf_vlan, common) + +#endif /* __NET_TC_VLAN_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 4062b4f0d121..f50f29faf76f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -55,9 +55,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define MAX_TCP_HEADER (128 + MAX_HEADER) #define MAX_TCP_OPTION_SPACE 40 -/* +/* * Never offer a window over 32767 without using window scaling. Some - * poor stacks do signed 16bit maths! + * poor stacks do signed 16bit maths! */ #define MAX_TCP_WINDOW 32767U @@ -70,9 +70,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); /* After receiving this amount of duplicate ACKs fast retransmit starts. */ #define TCP_FASTRETRANS_THRESH 3 -/* Maximal reordering. */ -#define TCP_MAX_REORDERING 127 - /* Maximal number of ACKs sent quickly to accelerate slow-start. */ #define TCP_MAX_QUICKACKS 16U @@ -167,7 +164,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); /* * TCP option */ - + #define TCPOPT_NOP 1 /* Padding */ #define TCPOPT_EOL 0 /* End of options */ #define TCPOPT_MSS 2 /* Segment size negotiating */ @@ -252,6 +249,7 @@ extern int sysctl_tcp_abort_on_overflow; extern int sysctl_tcp_max_orphans; extern int sysctl_tcp_fack; extern int sysctl_tcp_reordering; +extern int sysctl_tcp_max_reordering; extern int sysctl_tcp_dsack; extern long sysctl_tcp_mem[3]; extern int sysctl_tcp_wmem[3]; @@ -492,17 +490,16 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, u16 *mssp); __u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mss); -#endif - __u32 cookie_init_timestamp(struct request_sock *req); -bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net, - bool *ecn_ok); +bool cookie_timestamp_decode(struct tcp_options_received *opt); +bool cookie_ecn_ok(const struct tcp_options_received *opt, + const struct net *net, const struct dst_entry *dst); /* From net/ipv6/syncookies.c */ int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th, u32 cookie); struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); -#ifdef CONFIG_SYN_COOKIES + u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp); __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, @@ -1104,16 +1101,16 @@ static inline int tcp_win_from_space(int space) space - (space>>sysctl_tcp_adv_win_scale); } -/* Note: caller must be prepared to deal with negative returns */ +/* Note: caller must be prepared to deal with negative returns */ static inline int tcp_space(const struct sock *sk) { return tcp_win_from_space(sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)); -} +} static inline int tcp_full_space(const struct sock *sk) { - return tcp_win_from_space(sk->sk_rcvbuf); + return tcp_win_from_space(sk->sk_rcvbuf); } static inline void tcp_openreq_init(struct request_sock *req, diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index a47790bcaa38..2a50a70ef587 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -100,6 +100,15 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb, return iptunnel_handle_offloads(skb, udp_csum, type); } +static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff) +{ + struct udphdr *uh; + + uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr)); + skb_shinfo(skb)->gso_type |= uh->check ? + SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; +} + static inline void udp_tunnel_encap_enable(struct socket *sock) { #if IS_ENABLED(CONFIG_IPV6) diff --git a/include/net/udplite.h b/include/net/udplite.h index 2caadabcd07b..ae7c8d1fbcad 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -19,7 +19,9 @@ extern struct udp_table udplite_table; static __inline__ int udplite_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { - return memcpy_fromiovecend(to, (struct iovec *) from, offset, len); + struct msghdr *msg = from; + /* XXX: stripping const */ + return memcpy_fromiovecend(to, (struct iovec *)msg->msg_iter.iov, offset, len); } /* Designate sk as UDP-Lite socket */ @@ -40,7 +42,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) * checksum. UDP-Lite (like IPv6) mandates checksums, hence packets * with a zero checksum field are illegal. */ if (uh->check == 0) { - LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: zeroed checksum field\n"); + net_dbg_ratelimited("UDPLite: zeroed checksum field\n"); return 1; } @@ -52,8 +54,8 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) /* * Coverage length violates RFC 3828: log and discard silently. */ - LIMIT_NETDEBUG(KERN_DEBUG "UDPLite: bad csum coverage %d/%d\n", - cscov, skb->len); + net_dbg_ratelimited("UDPLite: bad csum coverage %d/%d\n", + cscov, skb->len); return 1; } else if (cscov < skb->len) { diff --git a/include/net/vxlan.h b/include/net/vxlan.h index d5f59f3fc35d..57cccd0052e5 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -8,6 +8,12 @@ #define VNI_HASH_BITS 10 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) +/* VXLAN protocol header */ +struct vxlanhdr { + __be32 vx_flags; + __be32 vx_vni; +}; + struct vxlan_sock; typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key); @@ -45,6 +51,18 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, __be32 vni, bool xnet); +static inline bool vxlan_gso_check(struct sk_buff *skb) +{ + if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) && + (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB) || + (skb_inner_mac_header(skb) - skb_transport_header(skb) != + sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) + return false; + + return true; +} + /* IP header + UDP + VXLAN + Ethernet header */ #define VXLAN_HEADROOM (20 + 8 + 8 + 14) /* IPv6 header + UDP + VXLAN + Ethernet header */ diff --git a/include/net/wpan-phy.h b/include/net/wpan-phy.h deleted file mode 100644 index 10ab0fc6d4f7..000000000000 --- a/include/net/wpan-phy.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (C) 2007, 2008, 2009 Siemens AG - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Written by: - * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> - */ - -#ifndef WPAN_PHY_H -#define WPAN_PHY_H - -#include <linux/netdevice.h> -#include <linux/mutex.h> -#include <linux/bug.h> - -/* According to the IEEE 802.15.4 stadard the upper most significant bits of - * the 32-bit channel bitmaps shall be used as an integer value to specify 32 - * possible channel pages. The lower 27 bits of the channel bit map shall be - * used as a bit mask to specify channel numbers within a channel page. - */ -#define WPAN_NUM_CHANNELS 27 -#define WPAN_NUM_PAGES 32 - -struct wpan_phy { - struct mutex pib_lock; - - /* - * This is a PIB according to 802.15.4-2011. - * We do not provide timing-related variables, as they - * aren't used outside of driver - */ - u8 current_channel; - u8 current_page; - u32 channels_supported[32]; - s8 transmit_power; - u8 cca_mode; - u8 min_be; - u8 max_be; - u8 csma_retries; - s8 frame_retries; - - bool lbt; - s32 cca_ed_level; - - struct device dev; - int idx; - - struct net_device *(*add_iface)(struct wpan_phy *phy, - const char *name, int type); - void (*del_iface)(struct wpan_phy *phy, struct net_device *dev); - - int (*set_txpower)(struct wpan_phy *phy, int db); - int (*set_lbt)(struct wpan_phy *phy, bool on); - int (*set_cca_mode)(struct wpan_phy *phy, u8 cca_mode); - int (*set_cca_ed_level)(struct wpan_phy *phy, int level); - int (*set_csma_params)(struct wpan_phy *phy, u8 min_be, u8 max_be, - u8 retries); - int (*set_frame_retries)(struct wpan_phy *phy, s8 retries); - - char priv[0] __attribute__((__aligned__(NETDEV_ALIGN))); -}; - -#define to_phy(_dev) container_of(_dev, struct wpan_phy, dev) - -struct wpan_phy *wpan_phy_alloc(size_t priv_size); -static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev) -{ - phy->dev.parent = dev; -} -int wpan_phy_register(struct wpan_phy *phy); -void wpan_phy_unregister(struct wpan_phy *phy); -void wpan_phy_free(struct wpan_phy *phy); -/* Same semantics as for class_for_each_device */ -int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data); - -static inline void *wpan_phy_priv(struct wpan_phy *phy) -{ - BUG_ON(!phy); - return &phy->priv; -} - -struct wpan_phy *wpan_phy_find(const char *str); - -static inline void wpan_phy_put(struct wpan_phy *phy) -{ - put_device(&phy->dev); -} - -static inline const char *wpan_phy_name(struct wpan_phy *phy) -{ - return dev_name(&phy->dev); -} -#endif diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 52beadf9a29b..93d14daf0994 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -1105,8 +1105,6 @@ int fc_eh_abort(struct scsi_cmnd *); int fc_eh_device_reset(struct scsi_cmnd *); int fc_eh_host_reset(struct scsi_cmnd *); int fc_slave_alloc(struct scsi_device *); -int fc_change_queue_depth(struct scsi_device *, int qdepth, int reason); -int fc_change_queue_type(struct scsi_device *, int tag_type); /* * ELS/CT interface diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 728c9ad9feb0..4d1c46aac331 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -378,8 +378,6 @@ struct iscsi_host { /* * scsi host template */ -extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, - int reason); extern int iscsi_eh_abort(struct scsi_cmnd *sc); extern int iscsi_eh_recover_target(struct scsi_cmnd *sc); extern int iscsi_eh_session_reset(struct scsi_cmnd *sc); diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index ef7872c20da9..9d87a37aecad 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -161,17 +161,12 @@ struct expander_device { }; /* ---------- SATA device ---------- */ -enum ata_command_set { - ATA_COMMAND_SET = 0, - ATAPI_COMMAND_SET = 1, -}; - #define ATA_RESP_FIS_SIZE 24 struct sata_device { - enum ata_command_set command_set; - struct smp_resp rps_resp; /* report_phy_sata_resp */ - u8 port_no; /* port number, if this is a PM (Port) */ + unsigned int class; + struct smp_resp rps_resp; /* report_phy_sata_resp */ + u8 port_no; /* port number, if this is a PM (Port) */ struct ata_port *ap; struct ata_host ata_host; @@ -365,12 +360,6 @@ struct asd_sas_phy { struct scsi_core { struct Scsi_Host *shost; - struct mutex task_queue_flush; - spinlock_t task_queue_lock; - struct list_head task_queue; - int task_queue_size; - - struct task_struct *queue_thread; }; struct sas_ha_event { @@ -422,9 +411,6 @@ struct sas_ha_struct { struct asd_sas_port **sas_port; /* array of valid pointers, must be set */ int num_phys; /* must be set, gt 0, static */ - /* The class calls this to send a task for execution. */ - int lldd_max_execute_num; - int lldd_queue_size; int strict_wide_ports; /* both sas_addr and attached_sas_addr must match * their siblings when forming wide ports */ @@ -612,7 +598,6 @@ struct sas_ssp_task { struct sas_task { struct domain_device *dev; - struct list_head list; spinlock_t task_state_lock; unsigned task_state_flags; @@ -665,8 +650,7 @@ struct sas_domain_function_template { int (*lldd_dev_found)(struct domain_device *); void (*lldd_dev_gone)(struct domain_device *); - int (*lldd_execute_task)(struct sas_task *, int num, - gfp_t gfp_flags); + int (*lldd_execute_task)(struct sas_task *, gfp_t gfp_flags); /* Task Management Functions. Must be called from process context. */ int (*lldd_abort_task)(struct sas_task *); @@ -700,12 +684,10 @@ extern void sas_suspend_ha(struct sas_ha_struct *sas_ha); int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates); int sas_phy_reset(struct sas_phy *phy, int hard_reset); -int sas_queue_up(struct sas_task *task); extern int sas_queuecommand(struct Scsi_Host * ,struct scsi_cmnd *); extern int sas_target_alloc(struct scsi_target *); extern int sas_slave_configure(struct scsi_device *); -extern int sas_change_queue_depth(struct scsi_device *, int new_depth, - int reason); +extern int sas_change_queue_depth(struct scsi_device *, int new_depth); extern int sas_change_queue_type(struct scsi_device *, int qt); extern int sas_bios_param(struct scsi_device *, struct block_device *, diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index d17178e6fcdd..8a7f8ad58aac 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -128,8 +128,10 @@ enum scsi_timeouts { #define MOVE_MEDIUM 0xa5 #define EXCHANGE_MEDIUM 0xa6 #define READ_12 0xa8 +#define SERVICE_ACTION_OUT_12 0xa9 #define WRITE_12 0xaa -#define READ_MEDIA_SERIAL_NUMBER 0xab +#define READ_MEDIA_SERIAL_NUMBER 0xab /* Obsolete with SPC-2 */ +#define SERVICE_ACTION_IN_12 0xab #define WRITE_VERIFY_12 0xae #define VERIFY_12 0xaf #define SEARCH_HIGH_12 0xb0 @@ -151,7 +153,9 @@ enum scsi_timeouts { #define VERIFY_16 0x8f #define SYNCHRONIZE_CACHE_16 0x91 #define WRITE_SAME_16 0x93 -#define SERVICE_ACTION_IN 0x9e +#define SERVICE_ACTION_BIDIRECTIONAL 0x9d +#define SERVICE_ACTION_IN_16 0x9e +#define SERVICE_ACTION_OUT_16 0x9f /* values for service action in */ #define SAI_READ_CAPACITY_16 0x10 #define SAI_GET_LBA_STATUS 0x12 @@ -165,8 +169,8 @@ enum scsi_timeouts { #define MI_REPORT_ALIASES 0x0b #define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c #define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d -#define MI_REPORT_PRIORITY 0x0e -#define MI_REPORT_TIMESTAMP 0x0f +#define MI_REPORT_PRIORITY 0x0e +#define MI_REPORT_TIMESTAMP 0x0f #define MI_MANAGEMENT_PROTOCOL_IN 0x10 /* value for MI_REPORT_TARGET_PGS ext header */ #define MI_EXT_HDR_PARAM_FMT 0x20 diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 522a5f27f553..9fc1aecfc813 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -53,6 +53,9 @@ struct scsi_pointer { volatile int phase; }; +/* for scmd->flags */ +#define SCMD_TAGGED (1 << 0) + struct scsi_cmnd { struct scsi_device *device; struct list_head list; /* scsi_cmnd participates in queue lists */ @@ -132,6 +135,7 @@ struct scsi_cmnd { * to be at an address < 16Mb). */ int result; /* Status code from lower level driver */ + int flags; /* Command flags */ unsigned char tag; /* SCSI-II queued command tag */ }; @@ -159,7 +163,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, size_t *offset, size_t *len); extern void scsi_kunmap_atomic_sg(void *virt); -extern int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask); +extern int scsi_init_io(struct scsi_cmnd *cmd); extern int scsi_dma_map(struct scsi_cmnd *cmd); extern void scsi_dma_unmap(struct scsi_cmnd *cmd); diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h index e89844cc2cd3..7982795df595 100644 --- a/include/scsi/scsi_dbg.h +++ b/include/scsi/scsi_dbg.h @@ -2,23 +2,27 @@ #define _SCSI_SCSI_DBG_H struct scsi_cmnd; +struct scsi_device; struct scsi_sense_hdr; extern void scsi_print_command(struct scsi_cmnd *); -extern void __scsi_print_command(unsigned char *); -extern void scsi_show_extd_sense(unsigned char, unsigned char); -extern void scsi_show_sense_hdr(struct scsi_sense_hdr *); -extern void scsi_print_sense_hdr(const char *, struct scsi_sense_hdr *); -extern void scsi_cmd_print_sense_hdr(struct scsi_cmnd *, const char *, - struct scsi_sense_hdr *); -extern void scsi_print_sense(char *, struct scsi_cmnd *); -extern void __scsi_print_sense(const char *name, +extern void __scsi_print_command(const unsigned char *, size_t); +extern void scsi_show_extd_sense(const struct scsi_device *, const char *, + unsigned char, unsigned char); +extern void scsi_show_sense_hdr(const struct scsi_device *, const char *, + const struct scsi_sense_hdr *); +extern void scsi_print_sense_hdr(const struct scsi_device *, const char *, + const struct scsi_sense_hdr *); +extern void scsi_print_sense(const struct scsi_cmnd *); +extern void __scsi_print_sense(const struct scsi_device *, const char *name, const unsigned char *sense_buffer, int sense_len); -extern void scsi_show_result(int); -extern void scsi_print_result(struct scsi_cmnd *); -extern void scsi_print_status(unsigned char); +extern void scsi_print_result(struct scsi_cmnd *, const char *, int); +extern const char *scsi_hostbyte_string(int); +extern const char *scsi_driverbyte_string(int); +extern const char *scsi_mlreturn_string(int); extern const char *scsi_sense_key_string(unsigned char); -extern const char *scsi_extd_sense_format(unsigned char, unsigned char); +extern const char *scsi_extd_sense_format(unsigned char, unsigned char, + const char **); #endif /* _SCSI_SCSI_DBG_H */ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 27ecee73bd72..6364e23454dd 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -141,7 +141,6 @@ struct scsi_device { unsigned ppr:1; /* Device supports PPR messages */ unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */ unsigned simple_tags:1; /* simple queue tag messages are enabled */ - unsigned ordered_tags:1;/* ordered queue tag messages are enabled */ unsigned was_reset:1; /* There was a bus reset on the bus for * this device */ unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN @@ -201,11 +200,6 @@ struct scsi_device { unsigned long sdev_data[0]; } __attribute__((aligned(sizeof(unsigned long)))); -struct scsi_dh_devlist { - char *vendor; - char *model; -}; - typedef void (*activate_complete)(void *, int); struct scsi_device_handler { /* Used by the infrastructure */ @@ -214,9 +208,8 @@ struct scsi_device_handler { /* Filled by the hardware handler */ struct module *module; const char *name; - const struct scsi_dh_devlist *devlist; int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); - int (*attach)(struct scsi_device *); + struct scsi_dh_data *(*attach)(struct scsi_device *); void (*detach)(struct scsi_device *); int (*activate)(struct scsi_device *, activate_complete, void *); int (*prep_fn)(struct scsi_device *, struct request *); @@ -228,7 +221,6 @@ struct scsi_dh_data { struct scsi_device_handler *scsi_dh; struct scsi_device *sdev; struct kref kref; - char buf[0]; }; #define to_scsi_device(d) \ @@ -244,6 +236,15 @@ struct scsi_dh_data { #define sdev_dbg(sdev, fmt, a...) \ dev_dbg(&(sdev)->sdev_gendev, fmt, ##a) +/* + * like scmd_printk, but the device name is passed in + * as a string pointer + */ +#define sdev_prefix_printk(l, sdev, p, fmt, a...) \ + (p) ? \ + sdev_printk(l, sdev, "[%s] " fmt, p, ##a) : \ + sdev_printk(l, sdev, fmt, ##a) + #define scmd_printk(prefix, scmd, fmt, a...) \ (scmd)->request->rq_disk ? \ sdev_printk(prefix, (scmd)->device, "[%s] " fmt, \ @@ -379,7 +380,7 @@ extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *, #define __shost_for_each_device(sdev, shost) \ list_for_each_entry((sdev), &((shost)->__devices), siblings) -extern void scsi_adjust_queue_depth(struct scsi_device *, int, int); +extern int scsi_change_queue_depth(struct scsi_device *, int); extern int scsi_track_queue_full(struct scsi_device *, int); extern int scsi_set_medium_removal(struct scsi_device *, char); diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h index c2b759809d8a..891a658aa867 100644 --- a/include/scsi/scsi_driver.h +++ b/include/scsi/scsi_driver.h @@ -9,7 +9,6 @@ struct scsi_cmnd; struct scsi_device; struct scsi_driver { - struct module *owner; struct device_driver gendrv; void (*rescan)(struct device *); diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h index 06a8790893ef..1e1421b06565 100644 --- a/include/scsi/scsi_eh.h +++ b/include/scsi/scsi_eh.h @@ -27,10 +27,10 @@ struct scsi_sense_hdr { /* See SPC-3 section 4.5 */ u8 additional_length; /* always 0 for fixed sense format */ }; -static inline int scsi_sense_valid(struct scsi_sense_hdr *sshdr) +static inline bool scsi_sense_valid(const struct scsi_sense_hdr *sshdr) { if (!sshdr) - return 0; + return false; return (sshdr->response_code & 0x70) == 0x70; } @@ -42,12 +42,12 @@ extern void scsi_eh_flush_done_q(struct list_head *done_q); extern void scsi_report_bus_reset(struct Scsi_Host *, int); extern void scsi_report_device_reset(struct Scsi_Host *, int, int); extern int scsi_block_when_processing_errors(struct scsi_device *); -extern int scsi_normalize_sense(const u8 *sense_buffer, int sb_len, - struct scsi_sense_hdr *sshdr); -extern int scsi_command_normalize_sense(struct scsi_cmnd *cmd, - struct scsi_sense_hdr *sshdr); +extern bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len, + struct scsi_sense_hdr *sshdr); +extern bool scsi_command_normalize_sense(const struct scsi_cmnd *cmd, + struct scsi_sense_hdr *sshdr); -static inline int scsi_sense_is_deferred(struct scsi_sense_hdr *sshdr) +static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr) { return ((sshdr->response_code >= 0x70) && (sshdr->response_code & 1)); } @@ -60,15 +60,7 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq); -/* - * Reset request from external source - */ -#define SCSI_TRY_RESET_DEVICE 1 -#define SCSI_TRY_RESET_BUS 2 -#define SCSI_TRY_RESET_HOST 3 -#define SCSI_TRY_RESET_TARGET 4 - -extern int scsi_reset_provider(struct scsi_device *, int); +extern int scsi_ioctl_reset(struct scsi_device *, int __user *); struct scsi_eh_save { /* saved state */ diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 5e362489ee88..e939d2b3757a 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -46,12 +46,6 @@ struct blk_queue_tags; #define DISABLE_CLUSTERING 0 #define ENABLE_CLUSTERING 1 -enum { - SCSI_QDEPTH_DEFAULT, /* default requested change, e.g. from sysfs */ - SCSI_QDEPTH_QFULL, /* scsi-ml requested due to queue full */ - SCSI_QDEPTH_RAMP_UP, /* scsi-ml requested due to threshold event */ -}; - struct scsi_host_template { struct module *module; const char *name; @@ -195,7 +189,7 @@ struct scsi_host_template { * Things currently recommended to be handled at this time include: * * 1. Setting the device queue depth. Proper setting of this is - * described in the comments for scsi_adjust_queue_depth. + * described in the comments for scsi_change_queue_depth. * 2. Determining if the device supports the various synchronous * negotiation protocols. The device struct will already have * responded to INQUIRY and the results of the standard items @@ -281,7 +275,7 @@ struct scsi_host_template { * * Status: OPTIONAL */ - int (* change_queue_depth)(struct scsi_device *, int, int); + int (* change_queue_depth)(struct scsi_device *, int); /* * Fill in this function to allow the changing of tag types @@ -422,6 +416,16 @@ struct scsi_host_template { unsigned char present; /* + * Let the block layer assigns tags to all commands. + */ + unsigned use_blk_tags:1; + + /* + * Track QUEUE_FULL events and reduce queue depth on demand. + */ + unsigned track_queue_depth:1; + + /* * This specifies the mode that a LLD supports. */ unsigned supported_mode:2; @@ -451,11 +455,6 @@ struct scsi_host_template { */ unsigned skip_settle_delay:1; - /* - * True if we are using ordered write support. - */ - unsigned ordered_tag:1; - /* True if the controller does not support WRITE SAME */ unsigned no_write_same:1; @@ -555,7 +554,7 @@ struct Scsi_Host { * __devices is protected by the host_lock, but you should * usually use scsi_device_lookup / shost_for_each_device * to access it and don't care about locking yourself. - * In the rare case of beeing in irq context you can use + * In the rare case of being in irq context you can use * their __ prefixed variants with the lock held. NEVER * access this list directly from a driver. */ @@ -638,6 +637,14 @@ struct Scsi_Host { short unsigned int sg_prot_tablesize; unsigned int max_sectors; unsigned long dma_boundary; + /* + * In scsi-mq mode, the number of hardware queues supported by the LLD. + * + * Note: it is assumed that each hardware queue has a queue depth of + * can_queue. In other words, the total queue depth per host + * is nr_hw_queues * can_queue. + */ + unsigned nr_hw_queues; /* * Used to assign serial numbers to the cmds. * Protected by the host lock. @@ -647,7 +654,6 @@ struct Scsi_Host { unsigned active_mode:2; unsigned unchecked_isa_dma:1; unsigned use_clustering:1; - unsigned use_blk_tcq:1; /* * Host has requested that no further requests come through for the @@ -662,11 +668,6 @@ struct Scsi_Host { */ unsigned reverse_ordering:1; - /* - * Ordered write support - */ - unsigned ordered_tag:1; - /* Task mgmt function in progress */ unsigned tmf_in_progress:1; diff --git a/include/scsi/scsi_ioctl.h b/include/scsi/scsi_ioctl.h index b9006848b813..8d19d1d233c3 100644 --- a/include/scsi/scsi_ioctl.h +++ b/include/scsi/scsi_ioctl.h @@ -40,9 +40,9 @@ typedef struct scsi_fctargaddress { unsigned char host_wwn[8]; // include NULL term. } Scsi_FCTargAddress; +int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev, + int cmd, bool ndelay); extern int scsi_ioctl(struct scsi_device *, int, void __user *); -extern int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd, - void __user *arg, int ndelay); #endif /* __KERNEL__ */ #endif /* _SCSI_IOCTL_H */ diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h index 56ed843969ca..fe4a70299419 100644 --- a/include/scsi/scsi_tcq.h +++ b/include/scsi/scsi_tcq.h @@ -16,20 +16,16 @@ #ifdef CONFIG_BLOCK +int scsi_change_queue_type(struct scsi_device *sdev, int tag_type); + /** * scsi_get_tag_type - get the type of tag the device supports * @sdev: the scsi device - * - * Notes: - * If the drive only supports simple tags, returns MSG_SIMPLE_TAG - * if it supports all tag types, returns MSG_ORDERED_TAG. */ static inline int scsi_get_tag_type(struct scsi_device *sdev) { if (!sdev->tagged_supported) return 0; - if (sdev->ordered_tags) - return MSG_ORDERED_TAG; if (sdev->simple_tags) return MSG_SIMPLE_TAG; return 0; @@ -39,90 +35,33 @@ static inline void scsi_set_tag_type(struct scsi_device *sdev, int tag) { switch (tag) { case MSG_ORDERED_TAG: - sdev->ordered_tags = 1; - /* fall through */ case MSG_SIMPLE_TAG: sdev->simple_tags = 1; break; case 0: /* fall through */ default: - sdev->ordered_tags = 0; sdev->simple_tags = 0; break; } } -/** - * scsi_activate_tcq - turn on tag command queueing - * @SDpnt: device to turn on TCQ for - * @depth: queue depth - * - * Notes: - * Eventually, I hope depth would be the maximum depth - * the device could cope with and the real queue depth - * would be adjustable from 0 to depth. - **/ -static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth) -{ - if (!sdev->tagged_supported) - return; - - if (shost_use_blk_mq(sdev->host)) - queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, sdev->request_queue); - else if (!blk_queue_tagged(sdev->request_queue)) - blk_queue_init_tags(sdev->request_queue, depth, - sdev->host->bqt); - - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); -} - -/** - * scsi_deactivate_tcq - turn off tag command queueing - * @SDpnt: device to turn off TCQ for - **/ -static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth) -{ - if (blk_queue_tagged(sdev->request_queue)) - blk_queue_free_tags(sdev->request_queue); - scsi_adjust_queue_depth(sdev, 0, depth); -} - -/** - * scsi_populate_tag_msg - place a tag message in a buffer - * @SCpnt: pointer to the Scsi_Cmnd for the tag - * @msg: pointer to the area to place the tag - * - * Notes: - * designed to create the correct type of tag message for the - * particular request. Returns the size of the tag message. - * May return 0 if TCQ is disabled for this device. - **/ -static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg) -{ - struct request *req = cmd->request; - - if (blk_rq_tagged(req)) { - *msg++ = MSG_SIMPLE_TAG; - *msg++ = req->tag; - return 2; - } - - return 0; -} static inline struct scsi_cmnd *scsi_mq_find_tag(struct Scsi_Host *shost, - unsigned int hw_ctx, int tag) + int unique_tag) { - struct request *req; + u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag); + struct request *req = NULL; - req = blk_mq_tag_to_rq(shost->tag_set.tags[hw_ctx], tag); + if (hwq < shost->tag_set.nr_hw_queues) + req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq], + blk_mq_unique_tag_to_tag(unique_tag)); return req ? (struct scsi_cmnd *)req->special : NULL; } /** * scsi_find_tag - find a tagged command by device * @SDpnt: pointer to the ScSI device - * @tag: the tag number + * @tag: tag generated by blk_mq_unique_tag() * * Notes: * Only works with tags allocated by the generic blk layer. @@ -133,9 +72,9 @@ static inline struct scsi_cmnd *scsi_find_tag(struct scsi_device *sdev, int tag) if (tag != SCSI_NO_TAG) { if (shost_use_blk_mq(sdev->host)) - return scsi_mq_find_tag(sdev->host, 0, tag); + return scsi_mq_find_tag(sdev->host, tag); - req = blk_queue_find_tag(sdev->request_queue, tag); + req = blk_queue_find_tag(sdev->request_queue, tag); return req ? (struct scsi_cmnd *)req->special : NULL; } @@ -174,7 +113,7 @@ static inline int scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth) /** * scsi_host_find_tag - find the tagged command by host * @shost: pointer to scsi_host - * @tag: tag of the scsi_cmnd + * @tag: tag generated by blk_mq_unique_tag() * * Notes: * Only works with tags allocated by the generic blk layer. @@ -186,7 +125,7 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost, if (tag != SCSI_NO_TAG) { if (shost_use_blk_mq(shost)) - return scsi_mq_find_tag(shost, 0, tag); + return scsi_mq_find_tag(shost, tag); req = blk_map_queue_find_tag(shost->bqt, tag); return req ? (struct scsi_cmnd *)req->special : NULL; } diff --git a/include/scsi/scsi_transport_spi.h b/include/scsi/scsi_transport_spi.h index 7497a383b1a4..a4fa52b4d5c5 100644 --- a/include/scsi/scsi_transport_spi.h +++ b/include/scsi/scsi_transport_spi.h @@ -157,5 +157,6 @@ int spi_populate_width_msg(unsigned char *msg, int width); int spi_populate_sync_msg(unsigned char *msg, int period, int offset); int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, int width, int options); +int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd); #endif /* SCSI_TRANSPORT_SPI_H */ diff --git a/include/scsi/sg.h b/include/scsi/sg.h index 750e5db7c6bf..3afec7032448 100644 --- a/include/scsi/sg.h +++ b/include/scsi/sg.h @@ -164,12 +164,15 @@ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */ /* Returns -EBUSY if occupied. 3rd argument pointer to int (see next) */ #define SG_SCSI_RESET 0x2284 -/* Associated values that can be given to SG_SCSI_RESET follow */ +/* Associated values that can be given to SG_SCSI_RESET follow. + * SG_SCSI_RESET_NO_ESCALATE may be OR-ed to the _DEVICE, _TARGET, _BUS + * or _HOST reset value so only that action is attempted. */ #define SG_SCSI_RESET_NOTHING 0 #define SG_SCSI_RESET_DEVICE 1 #define SG_SCSI_RESET_BUS 2 #define SG_SCSI_RESET_HOST 3 #define SG_SCSI_RESET_TARGET 4 +#define SG_SCSI_RESET_NO_ESCALATE 0x100 /* synchronous SCSI command ioctl, (only in version 3 interface) */ #define SG_IO 0x2285 /* similar effect as write() followed by read() */ diff --git a/include/soc/at91/at91rm9200_sdramc.h b/include/soc/at91/at91rm9200_sdramc.h new file mode 100644 index 000000000000..aa047f458f1b --- /dev/null +++ b/include/soc/at91/at91rm9200_sdramc.h @@ -0,0 +1,63 @@ +/* + * arch/arm/mach-at91/include/mach/at91rm9200_sdramc.h + * + * Copyright (C) 2005 Ivan Kokshaysky + * Copyright (C) SAN People + * + * Memory Controllers (SDRAMC only) - System peripherals registers. + * Based on AT91RM9200 datasheet revision E. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef AT91RM9200_SDRAMC_H +#define AT91RM9200_SDRAMC_H + +/* SDRAM Controller registers */ +#define AT91RM9200_SDRAMC_MR 0x90 /* Mode Register */ +#define AT91RM9200_SDRAMC_MODE (0xf << 0) /* Command Mode */ +#define AT91RM9200_SDRAMC_MODE_NORMAL (0 << 0) +#define AT91RM9200_SDRAMC_MODE_NOP (1 << 0) +#define AT91RM9200_SDRAMC_MODE_PRECHARGE (2 << 0) +#define AT91RM9200_SDRAMC_MODE_LMR (3 << 0) +#define AT91RM9200_SDRAMC_MODE_REFRESH (4 << 0) +#define AT91RM9200_SDRAMC_DBW (1 << 4) /* Data Bus Width */ +#define AT91RM9200_SDRAMC_DBW_32 (0 << 4) +#define AT91RM9200_SDRAMC_DBW_16 (1 << 4) + +#define AT91RM9200_SDRAMC_TR 0x94 /* Refresh Timer Register */ +#define AT91RM9200_SDRAMC_COUNT (0xfff << 0) /* Refresh Timer Count */ + +#define AT91RM9200_SDRAMC_CR 0x98 /* Configuration Register */ +#define AT91RM9200_SDRAMC_NC (3 << 0) /* Number of Column Bits */ +#define AT91RM9200_SDRAMC_NC_8 (0 << 0) +#define AT91RM9200_SDRAMC_NC_9 (1 << 0) +#define AT91RM9200_SDRAMC_NC_10 (2 << 0) +#define AT91RM9200_SDRAMC_NC_11 (3 << 0) +#define AT91RM9200_SDRAMC_NR (3 << 2) /* Number of Row Bits */ +#define AT91RM9200_SDRAMC_NR_11 (0 << 2) +#define AT91RM9200_SDRAMC_NR_12 (1 << 2) +#define AT91RM9200_SDRAMC_NR_13 (2 << 2) +#define AT91RM9200_SDRAMC_NB (1 << 4) /* Number of Banks */ +#define AT91RM9200_SDRAMC_NB_2 (0 << 4) +#define AT91RM9200_SDRAMC_NB_4 (1 << 4) +#define AT91RM9200_SDRAMC_CAS (3 << 5) /* CAS Latency */ +#define AT91RM9200_SDRAMC_CAS_2 (2 << 5) +#define AT91RM9200_SDRAMC_TWR (0xf << 7) /* Write Recovery Delay */ +#define AT91RM9200_SDRAMC_TRC (0xf << 11) /* Row Cycle Delay */ +#define AT91RM9200_SDRAMC_TRP (0xf << 15) /* Row Precharge Delay */ +#define AT91RM9200_SDRAMC_TRCD (0xf << 19) /* Row to Column Delay */ +#define AT91RM9200_SDRAMC_TRAS (0xf << 23) /* Active to Precharge Delay */ +#define AT91RM9200_SDRAMC_TXSR (0xf << 27) /* Exit Self Refresh to Active Delay */ + +#define AT91RM9200_SDRAMC_SRR 0x9c /* Self Refresh Register */ +#define AT91RM9200_SDRAMC_LPR 0xa0 /* Low Power Register */ +#define AT91RM9200_SDRAMC_IER 0xa4 /* Interrupt Enable Register */ +#define AT91RM9200_SDRAMC_IDR 0xa8 /* Interrupt Disable Register */ +#define AT91RM9200_SDRAMC_IMR 0xac /* Interrupt Mask Register */ +#define AT91RM9200_SDRAMC_ISR 0xb0 /* Interrupt Status Register */ + +#endif diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h new file mode 100644 index 000000000000..0210797abf2e --- /dev/null +++ b/include/soc/at91/at91sam9_ddrsdr.h @@ -0,0 +1,124 @@ +/* + * Header file for the Atmel DDR/SDR SDRAM Controller + * + * Copyright (C) 2010 Atmel Corporation + * Nicolas Ferre <nicolas.ferre@atmel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef AT91SAM9_DDRSDR_H +#define AT91SAM9_DDRSDR_H + +#define AT91_DDRSDRC_MR 0x00 /* Mode Register */ +#define AT91_DDRSDRC_MODE (0x7 << 0) /* Command Mode */ +#define AT91_DDRSDRC_MODE_NORMAL 0 +#define AT91_DDRSDRC_MODE_NOP 1 +#define AT91_DDRSDRC_MODE_PRECHARGE 2 +#define AT91_DDRSDRC_MODE_LMR 3 +#define AT91_DDRSDRC_MODE_REFRESH 4 +#define AT91_DDRSDRC_MODE_EXT_LMR 5 +#define AT91_DDRSDRC_MODE_DEEP 6 + +#define AT91_DDRSDRC_RTR 0x04 /* Refresh Timer Register */ +#define AT91_DDRSDRC_COUNT (0xfff << 0) /* Refresh Timer Counter */ + +#define AT91_DDRSDRC_CR 0x08 /* Configuration Register */ +#define AT91_DDRSDRC_NC (3 << 0) /* Number of Column Bits */ +#define AT91_DDRSDRC_NC_SDR8 (0 << 0) +#define AT91_DDRSDRC_NC_SDR9 (1 << 0) +#define AT91_DDRSDRC_NC_SDR10 (2 << 0) +#define AT91_DDRSDRC_NC_SDR11 (3 << 0) +#define AT91_DDRSDRC_NC_DDR9 (0 << 0) +#define AT91_DDRSDRC_NC_DDR10 (1 << 0) +#define AT91_DDRSDRC_NC_DDR11 (2 << 0) +#define AT91_DDRSDRC_NC_DDR12 (3 << 0) +#define AT91_DDRSDRC_NR (3 << 2) /* Number of Row Bits */ +#define AT91_DDRSDRC_NR_11 (0 << 2) +#define AT91_DDRSDRC_NR_12 (1 << 2) +#define AT91_DDRSDRC_NR_13 (2 << 2) +#define AT91_DDRSDRC_NR_14 (3 << 2) +#define AT91_DDRSDRC_CAS (7 << 4) /* CAS Latency */ +#define AT91_DDRSDRC_CAS_2 (2 << 4) +#define AT91_DDRSDRC_CAS_3 (3 << 4) +#define AT91_DDRSDRC_CAS_25 (6 << 4) +#define AT91_DDRSDRC_RST_DLL (1 << 7) /* Reset DLL */ +#define AT91_DDRSDRC_DICDS (1 << 8) /* Output impedance control */ +#define AT91_DDRSDRC_DIS_DLL (1 << 9) /* Disable DLL [SAM9 Only] */ +#define AT91_DDRSDRC_OCD (1 << 12) /* Off-Chip Driver [SAM9 Only] */ +#define AT91_DDRSDRC_DQMS (1 << 16) /* Mask Data is Shared [SAM9 Only] */ +#define AT91_DDRSDRC_ACTBST (1 << 18) /* Active Bank X to Burst Stop Read Access Bank Y [SAM9 Only] */ + +#define AT91_DDRSDRC_T0PR 0x0C /* Timing 0 Register */ +#define AT91_DDRSDRC_TRAS (0xf << 0) /* Active to Precharge delay */ +#define AT91_DDRSDRC_TRCD (0xf << 4) /* Row to Column delay */ +#define AT91_DDRSDRC_TWR (0xf << 8) /* Write recovery delay */ +#define AT91_DDRSDRC_TRC (0xf << 12) /* Row cycle delay */ +#define AT91_DDRSDRC_TRP (0xf << 16) /* Row precharge delay */ +#define AT91_DDRSDRC_TRRD (0xf << 20) /* Active BankA to BankB */ +#define AT91_DDRSDRC_TWTR (0x7 << 24) /* Internal Write to Read delay */ +#define AT91_DDRSDRC_RED_WRRD (0x1 << 27) /* Reduce Write to Read Delay [SAM9 Only] */ +#define AT91_DDRSDRC_TMRD (0xf << 28) /* Load mode to active/refresh delay */ + +#define AT91_DDRSDRC_T1PR 0x10 /* Timing 1 Register */ +#define AT91_DDRSDRC_TRFC (0x1f << 0) /* Row Cycle Delay */ +#define AT91_DDRSDRC_TXSNR (0xff << 8) /* Exit self-refresh to non-read */ +#define AT91_DDRSDRC_TXSRD (0xff << 16) /* Exit self-refresh to read */ +#define AT91_DDRSDRC_TXP (0xf << 24) /* Exit power-down delay */ + +#define AT91_DDRSDRC_T2PR 0x14 /* Timing 2 Register [SAM9 Only] */ +#define AT91_DDRSDRC_TXARD (0xf << 0) /* Exit active power down delay to read command in mode "Fast Exit" */ +#define AT91_DDRSDRC_TXARDS (0xf << 4) /* Exit active power down delay to read command in mode "Slow Exit" */ +#define AT91_DDRSDRC_TRPA (0xf << 8) /* Row Precharge All delay */ +#define AT91_DDRSDRC_TRTP (0x7 << 12) /* Read to Precharge delay */ + +#define AT91_DDRSDRC_LPR 0x1C /* Low Power Register */ +#define AT91_DDRSDRC_LPCB (3 << 0) /* Low-power Configurations */ +#define AT91_DDRSDRC_LPCB_DISABLE 0 +#define AT91_DDRSDRC_LPCB_SELF_REFRESH 1 +#define AT91_DDRSDRC_LPCB_POWER_DOWN 2 +#define AT91_DDRSDRC_LPCB_DEEP_POWER_DOWN 3 +#define AT91_DDRSDRC_CLKFR (1 << 2) /* Clock Frozen */ +#define AT91_DDRSDRC_PASR (7 << 4) /* Partial Array Self Refresh */ +#define AT91_DDRSDRC_TCSR (3 << 8) /* Temperature Compensated Self Refresh */ +#define AT91_DDRSDRC_DS (3 << 10) /* Drive Strength */ +#define AT91_DDRSDRC_TIMEOUT (3 << 12) /* Time to define when Low Power Mode is enabled */ +#define AT91_DDRSDRC_TIMEOUT_0_CLK_CYCLES (0 << 12) +#define AT91_DDRSDRC_TIMEOUT_64_CLK_CYCLES (1 << 12) +#define AT91_DDRSDRC_TIMEOUT_128_CLK_CYCLES (2 << 12) +#define AT91_DDRSDRC_APDE (1 << 16) /* Active power down exit time */ +#define AT91_DDRSDRC_UPD_MR (3 << 20) /* Update load mode register and extended mode register */ + +#define AT91_DDRSDRC_MDR 0x20 /* Memory Device Register */ +#define AT91_DDRSDRC_MD (3 << 0) /* Memory Device Type */ +#define AT91_DDRSDRC_MD_SDR 0 +#define AT91_DDRSDRC_MD_LOW_POWER_SDR 1 +#define AT91_DDRSDRC_MD_LOW_POWER_DDR 3 +#define AT91_DDRSDRC_MD_DDR2 6 /* [SAM9 Only] */ +#define AT91_DDRSDRC_DBW (1 << 4) /* Data Bus Width */ +#define AT91_DDRSDRC_DBW_32BITS (0 << 4) +#define AT91_DDRSDRC_DBW_16BITS (1 << 4) + +#define AT91_DDRSDRC_DLL 0x24 /* DLL Information Register */ +#define AT91_DDRSDRC_MDINC (1 << 0) /* Master Delay increment */ +#define AT91_DDRSDRC_MDDEC (1 << 1) /* Master Delay decrement */ +#define AT91_DDRSDRC_MDOVF (1 << 2) /* Master Delay Overflow */ +#define AT91_DDRSDRC_MDVAL (0xff << 8) /* Master Delay value */ + +#define AT91_DDRSDRC_HS 0x2C /* High Speed Register [SAM9 Only] */ +#define AT91_DDRSDRC_DIS_ATCP_RD (1 << 2) /* Anticip read access is disabled */ + +#define AT91_DDRSDRC_DELAY(n) (0x30 + (0x4 * (n))) /* Delay I/O Register n */ + +#define AT91_DDRSDRC_WPMR 0xE4 /* Write Protect Mode Register [SAM9 Only] */ +#define AT91_DDRSDRC_WP (1 << 0) /* Write protect enable */ +#define AT91_DDRSDRC_WPKEY (0xffffff << 8) /* Write protect key */ +#define AT91_DDRSDRC_KEY (0x444452 << 8) /* Write protect key = "DDR" */ + +#define AT91_DDRSDRC_WPSR 0xE8 /* Write Protect Status Register [SAM9 Only] */ +#define AT91_DDRSDRC_WPVS (1 << 0) /* Write protect violation status */ +#define AT91_DDRSDRC_WPVSRC (0xffff << 8) /* Write protect violation source */ + +#endif diff --git a/include/soc/at91/at91sam9_sdramc.h b/include/soc/at91/at91sam9_sdramc.h new file mode 100644 index 000000000000..3d085a9a7450 --- /dev/null +++ b/include/soc/at91/at91sam9_sdramc.h @@ -0,0 +1,85 @@ +/* + * arch/arm/mach-at91/include/mach/at91sam9_sdramc.h + * + * Copyright (C) 2007 Andrew Victor + * Copyright (C) 2007 Atmel Corporation. + * + * SDRAM Controllers (SDRAMC) - System peripherals registers. + * Based on AT91SAM9261 datasheet revision D. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef AT91SAM9_SDRAMC_H +#define AT91SAM9_SDRAMC_H + +/* SDRAM Controller (SDRAMC) registers */ +#define AT91_SDRAMC_MR 0x00 /* SDRAM Controller Mode Register */ +#define AT91_SDRAMC_MODE (0xf << 0) /* Command Mode */ +#define AT91_SDRAMC_MODE_NORMAL 0 +#define AT91_SDRAMC_MODE_NOP 1 +#define AT91_SDRAMC_MODE_PRECHARGE 2 +#define AT91_SDRAMC_MODE_LMR 3 +#define AT91_SDRAMC_MODE_REFRESH 4 +#define AT91_SDRAMC_MODE_EXT_LMR 5 +#define AT91_SDRAMC_MODE_DEEP 6 + +#define AT91_SDRAMC_TR 0x04 /* SDRAM Controller Refresh Timer Register */ +#define AT91_SDRAMC_COUNT (0xfff << 0) /* Refresh Timer Counter */ + +#define AT91_SDRAMC_CR 0x08 /* SDRAM Controller Configuration Register */ +#define AT91_SDRAMC_NC (3 << 0) /* Number of Column Bits */ +#define AT91_SDRAMC_NC_8 (0 << 0) +#define AT91_SDRAMC_NC_9 (1 << 0) +#define AT91_SDRAMC_NC_10 (2 << 0) +#define AT91_SDRAMC_NC_11 (3 << 0) +#define AT91_SDRAMC_NR (3 << 2) /* Number of Row Bits */ +#define AT91_SDRAMC_NR_11 (0 << 2) +#define AT91_SDRAMC_NR_12 (1 << 2) +#define AT91_SDRAMC_NR_13 (2 << 2) +#define AT91_SDRAMC_NB (1 << 4) /* Number of Banks */ +#define AT91_SDRAMC_NB_2 (0 << 4) +#define AT91_SDRAMC_NB_4 (1 << 4) +#define AT91_SDRAMC_CAS (3 << 5) /* CAS Latency */ +#define AT91_SDRAMC_CAS_1 (1 << 5) +#define AT91_SDRAMC_CAS_2 (2 << 5) +#define AT91_SDRAMC_CAS_3 (3 << 5) +#define AT91_SDRAMC_DBW (1 << 7) /* Data Bus Width */ +#define AT91_SDRAMC_DBW_32 (0 << 7) +#define AT91_SDRAMC_DBW_16 (1 << 7) +#define AT91_SDRAMC_TWR (0xf << 8) /* Write Recovery Delay */ +#define AT91_SDRAMC_TRC (0xf << 12) /* Row Cycle Delay */ +#define AT91_SDRAMC_TRP (0xf << 16) /* Row Precharge Delay */ +#define AT91_SDRAMC_TRCD (0xf << 20) /* Row to Column Delay */ +#define AT91_SDRAMC_TRAS (0xf << 24) /* Active to Precharge Delay */ +#define AT91_SDRAMC_TXSR (0xf << 28) /* Exit Self Refresh to Active Delay */ + +#define AT91_SDRAMC_LPR 0x10 /* SDRAM Controller Low Power Register */ +#define AT91_SDRAMC_LPCB (3 << 0) /* Low-power Configurations */ +#define AT91_SDRAMC_LPCB_DISABLE 0 +#define AT91_SDRAMC_LPCB_SELF_REFRESH 1 +#define AT91_SDRAMC_LPCB_POWER_DOWN 2 +#define AT91_SDRAMC_LPCB_DEEP_POWER_DOWN 3 +#define AT91_SDRAMC_PASR (7 << 4) /* Partial Array Self Refresh */ +#define AT91_SDRAMC_TCSR (3 << 8) /* Temperature Compensated Self Refresh */ +#define AT91_SDRAMC_DS (3 << 10) /* Drive Strength */ +#define AT91_SDRAMC_TIMEOUT (3 << 12) /* Time to define when Low Power Mode is enabled */ +#define AT91_SDRAMC_TIMEOUT_0_CLK_CYCLES (0 << 12) +#define AT91_SDRAMC_TIMEOUT_64_CLK_CYCLES (1 << 12) +#define AT91_SDRAMC_TIMEOUT_128_CLK_CYCLES (2 << 12) + +#define AT91_SDRAMC_IER 0x14 /* SDRAM Controller Interrupt Enable Register */ +#define AT91_SDRAMC_IDR 0x18 /* SDRAM Controller Interrupt Disable Register */ +#define AT91_SDRAMC_IMR 0x1C /* SDRAM Controller Interrupt Mask Register */ +#define AT91_SDRAMC_ISR 0x20 /* SDRAM Controller Interrupt Status Register */ +#define AT91_SDRAMC_RES (1 << 0) /* Refresh Error Status */ + +#define AT91_SDRAMC_MDR 0x24 /* SDRAM Memory Device Register */ +#define AT91_SDRAMC_MD (3 << 0) /* Memory Device Type */ +#define AT91_SDRAMC_MD_SDRAM 0 +#define AT91_SDRAMC_MD_LOW_POWER_SDRAM 1 + +#endif diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h new file mode 100644 index 000000000000..63deb8d9f82a --- /dev/null +++ b/include/soc/tegra/mc.h @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2014 NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __SOC_TEGRA_MC_H__ +#define __SOC_TEGRA_MC_H__ + +#include <linux/types.h> + +struct clk; +struct device; +struct page; + +struct tegra_smmu_enable { + unsigned int reg; + unsigned int bit; +}; + +/* latency allowance */ +struct tegra_mc_la { + unsigned int reg; + unsigned int shift; + unsigned int mask; + unsigned int def; +}; + +struct tegra_mc_client { + unsigned int id; + const char *name; + unsigned int swgroup; + + unsigned int fifo_size; + + struct tegra_smmu_enable smmu; + struct tegra_mc_la la; +}; + +struct tegra_smmu_swgroup { + unsigned int swgroup; + unsigned int reg; +}; + +struct tegra_smmu_ops { + void (*flush_dcache)(struct page *page, unsigned long offset, + size_t size); +}; + +struct tegra_smmu_soc { + const struct tegra_mc_client *clients; + unsigned int num_clients; + + const struct tegra_smmu_swgroup *swgroups; + unsigned int num_swgroups; + + bool supports_round_robin_arbitration; + bool supports_request_limit; + + unsigned int num_asids; + + const struct tegra_smmu_ops *ops; +}; + +struct tegra_mc; +struct tegra_smmu; + +#ifdef CONFIG_TEGRA_IOMMU_SMMU +struct tegra_smmu *tegra_smmu_probe(struct device *dev, + const struct tegra_smmu_soc *soc, + struct tegra_mc *mc); +#else +static inline struct tegra_smmu * +tegra_smmu_probe(struct device *dev, const struct tegra_smmu_soc *soc, + struct tegra_mc *mc) +{ + return NULL; +} +#endif + +struct tegra_mc_soc { + const struct tegra_mc_client *clients; + unsigned int num_clients; + + const unsigned int *emem_regs; + unsigned int num_emem_regs; + + unsigned int num_address_bits; + unsigned int atom_size; + + const struct tegra_smmu_soc *smmu; +}; + +struct tegra_mc { + struct device *dev; + struct tegra_smmu *smmu; + void __iomem *regs; + struct clk *clk; + int irq; + + const struct tegra_mc_soc *soc; + unsigned long tick; +}; + +#endif /* __SOC_TEGRA_MC_H__ */ diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index ae6c3b8ed2f5..396e8f73670a 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h @@ -42,12 +42,11 @@ struct snd_compr_ops; * @buffer_size: size of the above buffer * @fragment_size: size of buffer fragment in bytes * @fragments: number of such fragments - * @hw_pointer: offset of last location in buffer where DSP copied data - * @app_pointer: offset of last location in buffer where app wrote data * @total_bytes_available: cumulative number of bytes made available in * the ring buffer * @total_bytes_transferred: cumulative bytes transferred by offload DSP * @sleep: poll sleep + * @private_data: driver private data pointer */ struct snd_compr_runtime { snd_pcm_state_t state; @@ -94,6 +93,8 @@ struct snd_compr_stream { * This can be called in during stream creation only to set codec params * and the stream properties * @get_params: retrieve the codec parameters, mandatory + * @set_metadata: Set the metadata values for a stream + * @get_metadata: retreives the requested metadata values from stream * @trigger: Trigger operations like start, pause, resume, drain, stop. * This callback is mandatory * @pointer: Retrieve current h/w pointer information. Mandatory diff --git a/include/sound/jack.h b/include/sound/jack.h index 58916573db58..218235030ebc 100644 --- a/include/sound/jack.h +++ b/include/sound/jack.h @@ -28,8 +28,23 @@ struct input_dev; /** - * Jack types which can be reported. These values are used as a - * bitmask. + * enum snd_jack_types - Jack types which can be reported + * @SND_JACK_HEADPHONE: Headphone + * @SND_JACK_MICROPHONE: Microphone + * @SND_JACK_HEADSET: Headset + * @SND_JACK_LINEOUT: Line out + * @SND_JACK_MECHANICAL: Mechanical switch + * @SND_JACK_VIDEOOUT: Video out + * @SND_JACK_AVOUT: AV (Audio Video) out + * @SND_JACK_LINEIN: Line in + * @SND_JACK_BTN_0: Button 0 + * @SND_JACK_BTN_1: Button 1 + * @SND_JACK_BTN_2: Button 2 + * @SND_JACK_BTN_3: Button 3 + * @SND_JACK_BTN_4: Button 4 + * @SND_JACK_BTN_5: Button 5 + * + * These values are used as a bitmask. * * Note that this must be kept in sync with the lookup table in * sound/core/jack.c. @@ -90,6 +105,13 @@ static inline void snd_jack_set_parent(struct snd_jack *jack, { } +static inline int snd_jack_set_key(struct snd_jack *jack, + enum snd_jack_types type, + int keytype) +{ + return 0; +} + static inline void snd_jack_report(struct snd_jack *jack, int status) { } diff --git a/include/sound/omap-hdmi-audio.h b/include/sound/omap-hdmi-audio.h new file mode 100644 index 000000000000..afdb416898e0 --- /dev/null +++ b/include/sound/omap-hdmi-audio.h @@ -0,0 +1,43 @@ +/* + * hdmi-audio.c -- OMAP4+ DSS HDMI audio support library + * + * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * + * Author: Jyri Sarha <jsarha@ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ + +#include <video/omapdss.h> + +#ifndef __OMAP_HDMI_AUDIO_H__ +#define __OMAP_HDMI_AUDIO_H__ + +struct omap_hdmi_audio_ops { + int (*audio_startup)(struct device *dev, + void (*abort_cb)(struct device *dev)); + int (*audio_shutdown)(struct device *dev); + int (*audio_start)(struct device *dev); + void (*audio_stop)(struct device *dev); + int (*audio_config)(struct device *dev, + struct omap_dss_audio *dss_audio); +}; + +/* HDMI audio initalization data */ +struct omap_hdmi_audio_pdata { + struct device *dev; + enum omapdss_version dss_version; + phys_addr_t audio_dma_addr; + + const struct omap_hdmi_audio_ops *ops; +}; + +#endif /* __OMAP_HDMI_AUDIO_H__ */ diff --git a/include/sound/pcm.h b/include/sound/pcm.h index e862497f7556..1e7f74acc2ec 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -184,6 +184,8 @@ struct snd_pcm_ops { #define SNDRV_PCM_FMTBIT_DSD_U8 _SNDRV_PCM_FMTBIT(DSD_U8) #define SNDRV_PCM_FMTBIT_DSD_U16_LE _SNDRV_PCM_FMTBIT(DSD_U16_LE) #define SNDRV_PCM_FMTBIT_DSD_U32_LE _SNDRV_PCM_FMTBIT(DSD_U32_LE) +#define SNDRV_PCM_FMTBIT_DSD_U16_BE _SNDRV_PCM_FMTBIT(DSD_U16_BE) +#define SNDRV_PCM_FMTBIT_DSD_U32_BE _SNDRV_PCM_FMTBIT(DSD_U32_BE) #ifdef SNDRV_LITTLE_ENDIAN #define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE @@ -416,7 +418,10 @@ struct snd_pcm_substream { struct snd_info_entry *proc_status_entry; struct snd_info_entry *proc_prealloc_entry; struct snd_info_entry *proc_prealloc_max_entry; +#ifdef CONFIG_SND_PCM_XRUN_DEBUG + struct snd_info_entry *proc_xrun_injection_entry; #endif +#endif /* CONFIG_SND_VERBOSE_PROCFS */ /* misc flags */ unsigned int hw_opened: 1; }; @@ -503,6 +508,7 @@ int snd_pcm_status(struct snd_pcm_substream *substream, int snd_pcm_start(struct snd_pcm_substream *substream); int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t status); int snd_pcm_drain_done(struct snd_pcm_substream *substream); +int snd_pcm_stop_xrun(struct snd_pcm_substream *substream); #ifdef CONFIG_PM int snd_pcm_suspend(struct snd_pcm_substream *substream); int snd_pcm_suspend_all(struct snd_pcm *pcm); @@ -533,6 +539,12 @@ snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size) * PCM library */ +/** + * snd_pcm_stream_linked - Check whether the substream is linked with others + * @substream: substream to check + * + * Returns true if the given substream is being linked with others. + */ static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream) { return substream->group != &substream->self_group; @@ -543,6 +555,16 @@ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream); void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream); void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream); unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream); + +/** + * snd_pcm_stream_lock_irqsave - Lock the PCM stream + * @substream: PCM substream + * @flags: irq flags + * + * This locks the PCM stream like snd_pcm_stream_lock() but with the local + * IRQ (only when nonatomic is false). In nonatomic case, this is identical + * as snd_pcm_stream_lock(). + */ #define snd_pcm_stream_lock_irqsave(substream, flags) \ do { \ typecheck(unsigned long, flags); \ @@ -551,9 +573,25 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream); void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream, unsigned long flags); +/** + * snd_pcm_group_for_each_entry - iterate over the linked substreams + * @s: the iterator + * @substream: the substream + * + * Iterate over the all linked substreams to the given @substream. + * When @substream isn't linked with any others, this gives returns @substream + * itself once. + */ #define snd_pcm_group_for_each_entry(s, substream) \ list_for_each_entry(s, &substream->group->substreams, link_list) +/** + * snd_pcm_running - Check whether the substream is in a running state + * @substream: substream to check + * + * Returns true if the given substream is in the state RUNNING, or in the + * state DRAINING for playback. + */ static inline int snd_pcm_running(struct snd_pcm_substream *substream) { return (substream->runtime->status->state == SNDRV_PCM_STATE_RUNNING || @@ -561,45 +599,81 @@ static inline int snd_pcm_running(struct snd_pcm_substream *substream) substream->stream == SNDRV_PCM_STREAM_PLAYBACK)); } +/** + * bytes_to_samples - Unit conversion of the size from bytes to samples + * @runtime: PCM runtime instance + * @size: size in bytes + */ static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t size) { return size * 8 / runtime->sample_bits; } +/** + * bytes_to_frames - Unit conversion of the size from bytes to frames + * @runtime: PCM runtime instance + * @size: size in bytes + */ static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime, ssize_t size) { return size * 8 / runtime->frame_bits; } +/** + * samples_to_bytes - Unit conversion of the size from samples to bytes + * @runtime: PCM runtime instance + * @size: size in samples + */ static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t size) { return size * runtime->sample_bits / 8; } +/** + * frames_to_bytes - Unit conversion of the size from frames to bytes + * @runtime: PCM runtime instance + * @size: size in frames + */ static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_sframes_t size) { return size * runtime->frame_bits / 8; } +/** + * frame_aligned - Check whether the byte size is aligned to frames + * @runtime: PCM runtime instance + * @bytes: size in bytes + */ static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes) { return bytes % runtime->byte_align == 0; } +/** + * snd_pcm_lib_buffer_bytes - Get the buffer size of the current PCM in bytes + * @substream: PCM substream + */ static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return frames_to_bytes(runtime, runtime->buffer_size); } +/** + * snd_pcm_lib_period_bytes - Get the period size of the current PCM in bytes + * @substream: PCM substream + */ static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return frames_to_bytes(runtime, runtime->period_size); } -/* - * result is: 0 ... (boundary - 1) +/** + * snd_pcm_playback_avail - Get the available (writable) space for playback + * @runtime: PCM runtime instance + * + * Result is between 0 ... (boundary - 1) */ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *runtime) { @@ -611,8 +685,11 @@ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *r return avail; } -/* - * result is: 0 ... (boundary - 1) +/** + * snd_pcm_playback_avail - Get the available (readable) space for capture + * @runtime: PCM runtime instance + * + * Result is between 0 ... (boundary - 1) */ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *runtime) { @@ -622,11 +699,19 @@ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *ru return avail; } +/** + * snd_pcm_playback_hw_avail - Get the queued space for playback + * @runtime: PCM runtime instance + */ static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime *runtime) { return runtime->buffer_size - snd_pcm_playback_avail(runtime); } +/** + * snd_pcm_capture_hw_avail - Get the free space for capture + * @runtime: PCM runtime instance + */ static inline snd_pcm_sframes_t snd_pcm_capture_hw_avail(struct snd_pcm_runtime *runtime) { return runtime->buffer_size - snd_pcm_capture_avail(runtime); @@ -706,6 +791,20 @@ static inline int snd_pcm_capture_empty(struct snd_pcm_substream *substream) return snd_pcm_capture_avail(runtime) == 0; } +/** + * snd_pcm_trigger_done - Mark the master substream + * @substream: the pcm substream instance + * @master: the linked master substream + * + * When multiple substreams of the same card are linked and the hardware + * supports the single-shot operation, the driver calls this in the loop + * in snd_pcm_group_for_each_entry() for marking the substream as "done". + * Then most of trigger operations are performed only to the given master + * substream. + * + * The trigger_master mark is cleared at timestamp updates at the end + * of trigger operations. + */ static inline void snd_pcm_trigger_done(struct snd_pcm_substream *substream, struct snd_pcm_substream *master) { @@ -748,18 +847,59 @@ static inline const struct snd_interval *hw_param_interval_c(const struct snd_pc return ¶ms->intervals[var - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL]; } -#define params_channels(p) \ - (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_CHANNELS)->min) -#define params_rate(p) \ - (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_RATE)->min) -#define params_period_size(p) \ - (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_PERIOD_SIZE)->min) -#define params_periods(p) \ - (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_PERIODS)->min) -#define params_buffer_size(p) \ - (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_BUFFER_SIZE)->min) -#define params_buffer_bytes(p) \ - (hw_param_interval_c((p), SNDRV_PCM_HW_PARAM_BUFFER_BYTES)->min) +/** + * params_channels - Get the number of channels from the hw params + * @p: hw params + */ +static inline unsigned int params_channels(const struct snd_pcm_hw_params *p) +{ + return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_CHANNELS)->min; +} + +/** + * params_channels - Get the sample rate from the hw params + * @p: hw params + */ +static inline unsigned int params_rate(const struct snd_pcm_hw_params *p) +{ + return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_RATE)->min; +} + +/** + * params_channels - Get the period size (in frames) from the hw params + * @p: hw params + */ +static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p) +{ + return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIOD_SIZE)->min; +} + +/** + * params_channels - Get the number of periods from the hw params + * @p: hw params + */ +static inline unsigned int params_periods(const struct snd_pcm_hw_params *p) +{ + return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIODS)->min; +} + +/** + * params_channels - Get the buffer size (in frames) from the hw params + * @p: hw params + */ +static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p) +{ + return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_BUFFER_SIZE)->min; +} + +/** + * params_channels - Get the buffer size (in bytes) from the hw params + * @p: hw params + */ +static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p) +{ + return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_BUFFER_BYTES)->min; +} int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v); void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c); @@ -881,6 +1021,14 @@ unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit); unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a, unsigned int rates_b); +/** + * snd_pcm_set_runtime_buffer - Set the PCM runtime buffer + * @substream: PCM substream to set + * @bufp: the buffer information, NULL to clear + * + * Copy the buffer information to runtime->dma_buffer when @bufp is non-NULL. + * Otherwise it clears the current buffer information. + */ static inline void snd_pcm_set_runtime_buffer(struct snd_pcm_substream *substream, struct snd_dma_buffer *bufp) { @@ -906,6 +1054,11 @@ void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream); void snd_pcm_timer_init(struct snd_pcm_substream *substream); void snd_pcm_timer_done(struct snd_pcm_substream *substream); +/** + * snd_pcm_gettime - Fill the timespec depending on the timestamp mode + * @runtime: PCM runtime instance + * @tv: timespec to fill + */ static inline void snd_pcm_gettime(struct snd_pcm_runtime *runtime, struct timespec *tv) { @@ -942,7 +1095,6 @@ int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream, int snd_pcm_lib_free_vmalloc_buffer(struct snd_pcm_substream *substream); struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream, unsigned long offset); -#if 0 /* for kernel-doc */ /** * snd_pcm_lib_alloc_vmalloc_buffer - allocate virtual DMA buffer * @substream: the substream to allocate the buffer to @@ -955,8 +1107,13 @@ struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream, * Return: 1 if the buffer was changed, 0 if not changed, or a negative error * code. */ -static int snd_pcm_lib_alloc_vmalloc_buffer - (struct snd_pcm_substream *substream, size_t size); +static inline int snd_pcm_lib_alloc_vmalloc_buffer + (struct snd_pcm_substream *substream, size_t size) +{ + return _snd_pcm_lib_alloc_vmalloc_buffer(substream, size, + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); +} + /** * snd_pcm_lib_alloc_vmalloc_32_buffer - allocate 32-bit-addressable buffer * @substream: the substream to allocate the buffer to @@ -968,15 +1125,12 @@ static int snd_pcm_lib_alloc_vmalloc_buffer * Return: 1 if the buffer was changed, 0 if not changed, or a negative error * code. */ -static int snd_pcm_lib_alloc_vmalloc_32_buffer - (struct snd_pcm_substream *substream, size_t size); -#endif -#define snd_pcm_lib_alloc_vmalloc_buffer(subs, size) \ - _snd_pcm_lib_alloc_vmalloc_buffer \ - (subs, size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO) -#define snd_pcm_lib_alloc_vmalloc_32_buffer(subs, size) \ - _snd_pcm_lib_alloc_vmalloc_buffer \ - (subs, size, GFP_KERNEL | GFP_DMA32 | __GFP_ZERO) +static inline int snd_pcm_lib_alloc_vmalloc_32_buffer + (struct snd_pcm_substream *substream, size_t size) +{ + return _snd_pcm_lib_alloc_vmalloc_buffer(substream, size, + GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); +} #define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p) @@ -996,18 +1150,35 @@ struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream, #define snd_pcm_sgbuf_ops_page NULL #endif /* SND_DMA_SGBUF */ +/** + * snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset + * @substream: PCM substream + * @ofs: byte offset + */ static inline dma_addr_t snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs) { return snd_sgbuf_get_addr(snd_pcm_get_dma_buf(substream), ofs); } +/** + * snd_pcm_sgbuf_get_ptr - Get the virtual address at the corresponding offset + * @substream: PCM substream + * @ofs: byte offset + */ static inline void * snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs) { return snd_sgbuf_get_ptr(snd_pcm_get_dma_buf(substream), ofs); } +/** + * snd_pcm_sgbuf_chunk_size - Compute the max size that fits within the contig. + * page from the given size + * @substream: PCM substream + * @ofs: byte offset + * @size: byte size to examine + */ static inline unsigned int snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream, unsigned int ofs, unsigned int size) @@ -1015,13 +1186,24 @@ snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream, return snd_sgbuf_get_chunk_size(snd_pcm_get_dma_buf(substream), ofs, size); } -/* handle mmap counter - PCM mmap callback should handle this counter properly */ +/** + * snd_pcm_mmap_data_open - increase the mmap counter + * @area: VMA + * + * PCM mmap callback should handle this counter properly + */ static inline void snd_pcm_mmap_data_open(struct vm_area_struct *area) { struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data; atomic_inc(&substream->mmap_count); } +/** + * snd_pcm_mmap_data_close - decrease the mmap counter + * @area: VMA + * + * PCM mmap callback should handle this counter properly + */ static inline void snd_pcm_mmap_data_close(struct vm_area_struct *area) { struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data; @@ -1041,6 +1223,11 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s #define snd_pcm_lib_mmap_vmalloc NULL +/** + * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer + * @dma: DMA number + * @max: pointer to store the max size + */ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max) { *max = dma < 4 ? 64 * 1024 : 128 * 1024; @@ -1093,7 +1280,11 @@ struct snd_pcm_chmap { void *private_data; /* optional: private data pointer */ }; -/* get the PCM substream assigned to the given chmap info */ +/** + * snd_pcm_chmap_substream - get the PCM substream assigned to the given chmap info + * @info: chmap information + * @idx: the substream number index + */ static inline struct snd_pcm_substream * snd_pcm_chmap_substream(struct snd_pcm_chmap *info, unsigned int idx) { @@ -1120,7 +1311,10 @@ int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream, unsigned long private_value, struct snd_pcm_chmap **info_ret); -/* Strong-typed conversion of pcm_format to bitwise */ +/** + * pcm_format_to_bits - Strong-typed conversion of pcm_format to bitwise + * @pcm_format: PCM format + */ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format) { return 1ULL << (__force int) pcm_format; diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h index d76412b84b48..83284cae464c 100644 --- a/include/sound/rcar_snd.h +++ b/include/sound/rcar_snd.h @@ -36,14 +36,14 @@ #define RSND_SSI_CLK_PIN_SHARE (1 << 31) #define RSND_SSI_NO_BUSIF (1 << 30) /* SSI+DMA without BUSIF */ -#define RSND_SSI(_dma_id, _pio_irq, _flags) \ -{ .dma_id = _dma_id, .pio_irq = _pio_irq, .flags = _flags } +#define RSND_SSI(_dma_id, _irq, _flags) \ +{ .dma_id = _dma_id, .irq = _irq, .flags = _flags } #define RSND_SSI_UNUSED \ -{ .dma_id = -1, .pio_irq = -1, .flags = 0 } +{ .dma_id = -1, .irq = -1, .flags = 0 } struct rsnd_ssi_platform_info { int dma_id; - int pio_irq; + int irq; u32 flags; }; diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h index a5352712194b..120d9610054e 100644 --- a/include/sound/rt5645.h +++ b/include/sound/rt5645.h @@ -23,6 +23,10 @@ struct rt5645_platform_data { unsigned int hp_det_gpio; bool gpio_hp_det_active_high; + + /* true if codec's jd function is used */ + bool en_jd_func; + unsigned int jd_mode; }; #endif diff --git a/include/sound/rt5677.h b/include/sound/rt5677.h index 082670e3a353..d9eb7d861cd0 100644 --- a/include/sound/rt5677.h +++ b/include/sound/rt5677.h @@ -27,6 +27,16 @@ struct rt5677_platform_data { bool lout3_diff; /* DMIC2 clock source selection */ enum rt5677_dmic2_clk dmic2_clk_pin; + + /* configures GPIO, 0 - floating, 1 - pulldown, 2 - pullup */ + u8 gpio_config[6]; + + /* jd1 can select 0 ~ 3 as OFF, GPIO1, GPIO2 and GPIO3 respectively */ + unsigned int jd1_gpio; + /* jd2 and jd3 can select 0 ~ 3 as + OFF, GPIO4, GPIO5 and GPIO6 respectively */ + unsigned int jd2_gpio; + unsigned int jd3_gpio; }; #endif diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h index 2398521f0998..eea5400fe373 100644 --- a/include/sound/seq_kernel.h +++ b/include/sound/seq_kernel.h @@ -108,9 +108,13 @@ int snd_seq_event_port_detach(int client, int port); #ifdef CONFIG_MODULES void snd_seq_autoload_lock(void); void snd_seq_autoload_unlock(void); +void snd_seq_autoload_init(void); +#define snd_seq_autoload_exit() snd_seq_autoload_lock() #else #define snd_seq_autoload_lock() #define snd_seq_autoload_unlock() +#define snd_seq_autoload_init() +#define snd_seq_autoload_exit() #endif #endif /* __SOUND_SEQ_KERNEL_H */ diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index e8b3080d196a..2df96b1384c7 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -206,7 +206,6 @@ struct snd_soc_dai_driver { /* DAI description */ const char *name; unsigned int id; - int ac97_control; unsigned int base; /* DAI driver callbacks */ @@ -216,6 +215,8 @@ struct snd_soc_dai_driver { int (*resume)(struct snd_soc_dai *dai); /* compress dai */ bool compress_dai; + /* DAI is also used for the control bus */ + bool bus_control; /* ops */ const struct snd_soc_dai_ops *ops; @@ -241,7 +242,6 @@ struct snd_soc_dai { const char *name; int id; struct device *dev; - void *ac97_pdata; /* platform_data for the ac97 codec */ /* driver ops */ struct snd_soc_dai_driver *driver; @@ -268,7 +268,6 @@ struct snd_soc_dai { unsigned int sample_bits; /* parent platform/codec */ - struct snd_soc_platform *platform; struct snd_soc_codec *codec; struct snd_soc_component *component; @@ -276,8 +275,6 @@ struct snd_soc_dai { unsigned int tx_mask; unsigned int rx_mask; - struct snd_soc_card *card; - struct list_head list; }; diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 3a4d7da67b8d..89823cfe6f04 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -435,7 +435,7 @@ void snd_soc_dapm_auto_nc_pins(struct snd_soc_card *card); unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol); /* Mostly internal - should not normally be used */ -void dapm_mark_io_dirty(struct snd_soc_dapm_context *dapm); +void dapm_mark_endpoints_dirty(struct snd_soc_card *card); /* dapm path query */ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream, @@ -508,9 +508,9 @@ struct snd_soc_dapm_path { /* status */ u32 connect:1; /* source and sink widgets are connected */ - u32 walked:1; /* path has been walked */ u32 walking:1; /* path is in the process of being walked */ u32 weak:1; /* path ignored for power management */ + u32 is_supply:1; /* At least one of the connected widgets is a supply */ int (*connected)(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink); @@ -544,11 +544,13 @@ struct snd_soc_dapm_widget { unsigned char active:1; /* active stream on DAC, ADC's */ unsigned char connected:1; /* connected codec pin */ unsigned char new:1; /* cnew complete */ - unsigned char ext:1; /* has external widgets */ unsigned char force:1; /* force state */ unsigned char ignore_suspend:1; /* kept enabled over suspend */ unsigned char new_power:1; /* power from this run */ unsigned char power_checked:1; /* power checked this run */ + unsigned char is_supply:1; /* Widget is a supply type widget */ + unsigned char is_sink:1; /* Widget is a sink type widget */ + unsigned char is_source:1; /* Widget is a source type widget */ int subseq; /* sort within widget type */ int (*power_check)(struct snd_soc_dapm_widget *w); @@ -567,6 +569,7 @@ struct snd_soc_dapm_widget { struct list_head sinks; /* used during DAPM updates */ + struct list_head work_list; struct list_head power_list; struct list_head dirty; int inputs; diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h index 2883a7a6f9f3..98f2ade0266e 100644 --- a/include/sound/soc-dpcm.h +++ b/include/sound/soc-dpcm.h @@ -102,6 +102,8 @@ struct snd_soc_dpcm_runtime { /* state and update */ enum snd_soc_dpcm_update runtime_update; enum snd_soc_dpcm_state state; + + int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */ }; /* can this BE stop and free */ diff --git a/include/sound/soc.h b/include/sound/soc.h index 7ba7130037a0..b4fca9aed2a2 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -36,6 +36,11 @@ {.reg = xreg, .rreg = xreg, .shift = shift_left, \ .rshift = shift_right, .max = xmax, .platform_max = xmax, \ .invert = xinvert, .autodisable = xautodisable}) +#define SOC_DOUBLE_S_VALUE(xreg, shift_left, shift_right, xmin, xmax, xsign_bit, xinvert, xautodisable) \ + ((unsigned long)&(struct soc_mixer_control) \ + {.reg = xreg, .rreg = xreg, .shift = shift_left, \ + .rshift = shift_right, .min = xmin, .max = xmax, .platform_max = xmax, \ + .sign_bit = xsign_bit, .invert = xinvert, .autodisable = xautodisable}) #define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, xautodisable) \ SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmax, xinvert, xautodisable) #define SOC_SINGLE_VALUE_EXT(xreg, xmax, xinvert) \ @@ -171,11 +176,9 @@ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ SNDRV_CTL_ELEM_ACCESS_READWRITE, \ .tlv.p = (tlv_array), \ - .info = snd_soc_info_volsw_s8, .get = snd_soc_get_volsw_s8, \ - .put = snd_soc_put_volsw_s8, \ - .private_value = (unsigned long)&(struct soc_mixer_control) \ - {.reg = xreg, .min = xmin, .max = xmax, \ - .platform_max = xmax} } + .info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\ + .put = snd_soc_put_volsw, \ + .private_value = SOC_DOUBLE_S_VALUE(xreg, 0, 8, xmin, xmax, 7, 0, 0) } #define SOC_ENUM_DOUBLE(xreg, xshift_l, xshift_r, xitems, xtexts) \ { .reg = xreg, .shift_l = xshift_l, .shift_r = xshift_r, \ .items = xitems, .texts = xtexts, \ @@ -366,8 +369,6 @@ struct snd_soc_jack_gpio; typedef int (*hw_write_t)(void *,const char* ,int); -extern struct snd_ac97_bus_ops *soc_ac97_ops; - enum snd_soc_pcm_subclass { SND_SOC_PCM_CLASS_PCM = 0, SND_SOC_PCM_CLASS_BE = 1, @@ -409,13 +410,9 @@ int devm_snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *cmpnt_drv, struct snd_soc_dai_driver *dai_drv, int num_dai); void snd_soc_unregister_component(struct device *dev); -int snd_soc_cache_sync(struct snd_soc_codec *codec); int snd_soc_cache_init(struct snd_soc_codec *codec); int snd_soc_cache_exit(struct snd_soc_codec *codec); -int snd_soc_cache_write(struct snd_soc_codec *codec, - unsigned int reg, unsigned int value); -int snd_soc_cache_read(struct snd_soc_codec *codec, - unsigned int reg, unsigned int *value); + int snd_soc_platform_read(struct snd_soc_platform *platform, unsigned int reg); int snd_soc_platform_write(struct snd_soc_platform *platform, @@ -500,14 +497,28 @@ int snd_soc_update_bits_locked(struct snd_soc_codec *codec, int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg, unsigned int mask, unsigned int value); -int snd_soc_new_ac97_codec(struct snd_soc_codec *codec, - struct snd_ac97_bus_ops *ops, int num); -void snd_soc_free_ac97_codec(struct snd_soc_codec *codec); +#ifdef CONFIG_SND_SOC_AC97_BUS +struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec); +void snd_soc_free_ac97_codec(struct snd_ac97 *ac97); int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops); int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops, struct platform_device *pdev); +extern struct snd_ac97_bus_ops *soc_ac97_ops; +#else +static inline int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops, + struct platform_device *pdev) +{ + return 0; +} + +static inline int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops) +{ + return 0; +} +#endif + /* *Controls */ @@ -545,12 +556,6 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); -int snd_soc_info_volsw_s8(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_info *uinfo); -int snd_soc_get_volsw_s8(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol); -int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol); int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo); int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, @@ -780,24 +785,18 @@ struct snd_soc_codec { struct device *dev; const struct snd_soc_codec_driver *driver; - struct mutex mutex; struct list_head list; struct list_head card_list; /* runtime */ - struct snd_ac97 *ac97; /* for ad-hoc ac97 devices */ unsigned int cache_bypass:1; /* Suppress access to the cache */ unsigned int suspended:1; /* Codec is in suspend PM state */ - unsigned int ac97_registered:1; /* Codec has been AC97 registered */ - unsigned int ac97_created:1; /* Codec has been created by SoC */ unsigned int cache_init:1; /* codec cache has been initialized */ - u32 cache_sync; /* Cache needs to be synced to hardware */ /* codec IO */ void *control_data; /* codec control (i2c/3wire) data */ hw_write_t hw_write; void *reg_cache; - struct mutex cache_rw_mutex; /* component */ struct snd_soc_component component; @@ -860,8 +859,6 @@ struct snd_soc_platform_driver { int (*probe)(struct snd_soc_platform *); int (*remove)(struct snd_soc_platform *); - int (*suspend)(struct snd_soc_dai *dai); - int (*resume)(struct snd_soc_dai *dai); struct snd_soc_component_driver component_driver; /* pcm creation and destruction */ @@ -886,7 +883,7 @@ struct snd_soc_platform_driver { struct snd_soc_dai_link_component { const char *name; - const struct device_node *of_node; + struct device_node *of_node; const char *dai_name; }; @@ -894,8 +891,6 @@ struct snd_soc_platform { struct device *dev; const struct snd_soc_platform_driver *driver; - unsigned int suspended:1; /* platform is suspended */ - struct list_head list; struct snd_soc_component component; @@ -990,7 +985,7 @@ struct snd_soc_codec_conf { * DT/OF node, but not both. */ const char *dev_name; - const struct device_node *of_node; + struct device_node *of_node; /* * optional map of kcontrol, widget and path name prefixes that are @@ -1007,7 +1002,7 @@ struct snd_soc_aux_dev { * DT/OF node, but not both. */ const char *codec_name; - const struct device_node *codec_of_node; + struct device_node *codec_of_node; /* codec/machine specific init - e.g. add machine controls */ int (*init)(struct snd_soc_component *component); @@ -1264,6 +1259,17 @@ unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg); int snd_soc_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int val); +/** + * snd_soc_cache_sync() - Sync the register cache with the hardware + * @codec: CODEC to sync + * + * Note: This function will call regcache_sync() + */ +static inline int snd_soc_cache_sync(struct snd_soc_codec *codec) +{ + return regcache_sync(codec->component.regmap); +} + /* component IO */ int snd_soc_component_read(struct snd_soc_component *component, unsigned int reg, unsigned int *val); @@ -1277,6 +1283,45 @@ void snd_soc_component_async_complete(struct snd_soc_component *component); int snd_soc_component_test_bits(struct snd_soc_component *component, unsigned int reg, unsigned int mask, unsigned int value); +#ifdef CONFIG_REGMAP + +void snd_soc_component_init_regmap(struct snd_soc_component *component, + struct regmap *regmap); +void snd_soc_component_exit_regmap(struct snd_soc_component *component); + +/** + * snd_soc_codec_init_regmap() - Initialize regmap instance for the CODEC + * @codec: The CODEC for which to initialize the regmap instance + * @regmap: The regmap instance that should be used by the CODEC + * + * This function allows deferred assignment of the regmap instance that is + * associated with the CODEC. Only use this if the regmap instance is not yet + * ready when the CODEC is registered. The function must also be called before + * the first IO attempt of the CODEC. + */ +static inline void snd_soc_codec_init_regmap(struct snd_soc_codec *codec, + struct regmap *regmap) +{ + snd_soc_component_init_regmap(&codec->component, regmap); +} + +/** + * snd_soc_codec_exit_regmap() - De-initialize regmap instance for the CODEC + * @codec: The CODEC for which to de-initialize the regmap instance + * + * Calls regmap_exit() on the regmap instance associated to the CODEC and + * removes the regmap instance from the CODEC. + * + * This function should only be used if snd_soc_codec_init_regmap() was used to + * initialize the regmap instance. + */ +static inline void snd_soc_codec_exit_regmap(struct snd_soc_codec *codec) +{ + snd_soc_component_exit_regmap(&codec->component); +} + +#endif + /* device driver data */ static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card, @@ -1451,6 +1496,9 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np, struct device_node **framemaster); int snd_soc_of_get_dai_name(struct device_node *of_node, const char **dai_name); +int snd_soc_of_get_dai_link_codecs(struct device *dev, + struct device_node *of_node, + struct snd_soc_dai_link *dai_link); #include <sound/soc-dai.h> diff --git a/include/sound/uda134x.h b/include/sound/uda134x.h index e475659bd3be..509efb050176 100644 --- a/include/sound/uda134x.h +++ b/include/sound/uda134x.h @@ -18,18 +18,6 @@ struct uda134x_platform_data { struct l3_pins l3; void (*power) (int); int model; - /* - ALSA SOC usually puts the device in standby mode when it's not used - for sometime. If you unset is_powered_on_standby the driver will - turn off the ADC/DAC when this callback is invoked and turn it back - on when needed. Unfortunately this will result in a very light bump - (it can be audible only with good earphones). If this bothers you - set is_powered_on_standby, you will have slightly higher power - consumption. Please note that sending the L3 command for ADC is - enough to make the bump, so it doesn't make difference if you - completely take off power from the codec. - */ - int is_powered_on_standby; #define UDA134X_UDA1340 1 #define UDA134X_UDA1341 2 #define UDA134X_UDA1344 3 diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h index b04ee7e5a466..88cf39d96d0f 100644 --- a/include/trace/events/asoc.h +++ b/include/trace/events/asoc.h @@ -288,31 +288,6 @@ TRACE_EVENT(snd_soc_jack_notify, TP_printk("jack=%s %x", __get_str(name), (int)__entry->val) ); -TRACE_EVENT(snd_soc_cache_sync, - - TP_PROTO(struct snd_soc_codec *codec, const char *type, - const char *status), - - TP_ARGS(codec, type, status), - - TP_STRUCT__entry( - __string( name, codec->component.name) - __string( status, status ) - __string( type, type ) - __field( int, id ) - ), - - TP_fast_assign( - __assign_str(name, codec->component.name); - __assign_str(status, status); - __assign_str(type, type); - __entry->id = codec->component.id; - ), - - TP_printk("codec=%s.%d type=%s status=%s", __get_str(name), - (int)__entry->id, __get_str(type), __get_str(status)) -); - #endif /* _TRACE_ASOC_H */ /* This part must be outside protection */ diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index ff4bd1b35246..6cfb841fea7c 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -43,15 +43,13 @@ struct extent_status; { EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \ { EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \ { EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \ - { EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" }, \ - { EXT4_GET_BLOCKS_NO_PUT_HOLE, "NO_PUT_HOLE" }) + { EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" }) #define show_mflags(flags) __print_flags(flags, "", \ { EXT4_MAP_NEW, "N" }, \ { EXT4_MAP_MAPPED, "M" }, \ { EXT4_MAP_UNWRITTEN, "U" }, \ - { EXT4_MAP_BOUNDARY, "B" }, \ - { EXT4_MAP_FROM_CLUSTER, "C" }) + { EXT4_MAP_BOUNDARY, "B" }) #define show_free_flags(flags) __print_flags(flags, "|", \ { EXT4_FREE_BLOCKS_METADATA, "METADATA" }, \ @@ -2452,15 +2450,14 @@ TRACE_EVENT(ext4_collapse_range, TRACE_EVENT(ext4_es_shrink, TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time, - int skip_precached, int nr_skipped, int retried), + int nr_skipped, int retried), - TP_ARGS(sb, nr_shrunk, scan_time, skip_precached, nr_skipped, retried), + TP_ARGS(sb, nr_shrunk, scan_time, nr_skipped, retried), TP_STRUCT__entry( __field( dev_t, dev ) __field( int, nr_shrunk ) __field( unsigned long long, scan_time ) - __field( int, skip_precached ) __field( int, nr_skipped ) __field( int, retried ) ), @@ -2469,16 +2466,14 @@ TRACE_EVENT(ext4_es_shrink, __entry->dev = sb->s_dev; __entry->nr_shrunk = nr_shrunk; __entry->scan_time = div_u64(scan_time, 1000); - __entry->skip_precached = skip_precached; __entry->nr_skipped = nr_skipped; __entry->retried = retried; ), - TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu skip_precached %d " + TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu " "nr_skipped %d retried %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_shrunk, - __entry->scan_time, __entry->skip_precached, - __entry->nr_skipped, __entry->retried) + __entry->scan_time, __entry->nr_skipped, __entry->retried) ); #endif /* _TRACE_EXT4_H */ diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index e335e7d8c6c2..c78e88ce5ea3 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -36,7 +36,7 @@ TRACE_EVENT(rcu_utilization, #ifdef CONFIG_RCU_TRACE -#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) /* * Tracepoint for grace-period events. Takes a string identifying the @@ -345,7 +345,7 @@ TRACE_EVENT(rcu_fqs, __entry->cpu, __entry->qsevent) ); -#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) */ +#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) */ /* * Tracepoint for dyntick-idle entry/exit events. These take a string diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 0a68d5ae584e..30fedaf3e56a 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -97,16 +97,19 @@ static inline long __trace_sched_switch_state(struct task_struct *p) long state = p->state; #ifdef CONFIG_PREEMPT +#ifdef CONFIG_SCHED_DEBUG + BUG_ON(p != current); +#endif /* CONFIG_SCHED_DEBUG */ /* * For all intents and purposes a preempted task is a running task. */ - if (task_preempt_count(p) & PREEMPT_ACTIVE) + if (preempt_count() & PREEMPT_ACTIVE) state = TASK_RUNNING | TASK_STATE_MAX; -#endif +#endif /* CONFIG_PREEMPT */ return state; } -#endif +#endif /* CREATE_TRACE_POINTS */ /* * Tracepoint for task switches, performed by the scheduler: diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h index db6c93510f74..079bd10a01b4 100644 --- a/include/trace/events/scsi.h +++ b/include/trace/events/scsi.h @@ -94,7 +94,7 @@ scsi_opcode_name(WRITE_16), \ scsi_opcode_name(VERIFY_16), \ scsi_opcode_name(WRITE_SAME_16), \ - scsi_opcode_name(SERVICE_ACTION_IN), \ + scsi_opcode_name(SERVICE_ACTION_IN_16), \ scsi_opcode_name(SAI_READ_CAPACITY_16), \ scsi_opcode_name(SAI_GET_LBA_STATUS), \ scsi_opcode_name(MI_REPORT_TARGET_PGS), \ diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 1fef3e6e9436..171ca4ff6d99 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -6,6 +6,8 @@ #include <linux/sunrpc/sched.h> #include <linux/sunrpc/clnt.h> +#include <linux/sunrpc/svc.h> +#include <linux/sunrpc/xprtsock.h> #include <net/tcp_states.h> #include <linux/net.h> #include <linux/tracepoint.h> @@ -306,6 +308,164 @@ DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection); DEFINE_RPC_SOCKET_EVENT(rpc_socket_close); DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown); +DECLARE_EVENT_CLASS(rpc_xprt_event, + TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), + + TP_ARGS(xprt, xid, status), + + TP_STRUCT__entry( + __field(__be32, xid) + __field(int, status) + __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR]) + __string(port, xprt->address_strings[RPC_DISPLAY_PORT]) + ), + + TP_fast_assign( + __entry->xid = xid; + __entry->status = status; + __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]); + __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]); + ), + + TP_printk("peer=[%s]:%s xid=0x%x status=%d", __get_str(addr), + __get_str(port), be32_to_cpu(__entry->xid), + __entry->status) +); + +DEFINE_EVENT(rpc_xprt_event, xprt_lookup_rqst, + TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), + TP_ARGS(xprt, xid, status)); + +DEFINE_EVENT(rpc_xprt_event, xprt_transmit, + TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), + TP_ARGS(xprt, xid, status)); + +DEFINE_EVENT(rpc_xprt_event, xprt_complete_rqst, + TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), + TP_ARGS(xprt, xid, status)); + +TRACE_EVENT(xs_tcp_data_ready, + TP_PROTO(struct rpc_xprt *xprt, int err, unsigned int total), + + TP_ARGS(xprt, err, total), + + TP_STRUCT__entry( + __field(int, err) + __field(unsigned int, total) + __string(addr, xprt ? xprt->address_strings[RPC_DISPLAY_ADDR] : + "(null)") + __string(port, xprt ? xprt->address_strings[RPC_DISPLAY_PORT] : + "(null)") + ), + + TP_fast_assign( + __entry->err = err; + __entry->total = total; + __assign_str(addr, xprt ? + xprt->address_strings[RPC_DISPLAY_ADDR] : "(null)"); + __assign_str(port, xprt ? + xprt->address_strings[RPC_DISPLAY_PORT] : "(null)"); + ), + + TP_printk("peer=[%s]:%s err=%d total=%u", __get_str(addr), + __get_str(port), __entry->err, __entry->total) +); + +#define rpc_show_sock_xprt_flags(flags) \ + __print_flags(flags, "|", \ + { TCP_RCV_LAST_FRAG, "TCP_RCV_LAST_FRAG" }, \ + { TCP_RCV_COPY_FRAGHDR, "TCP_RCV_COPY_FRAGHDR" }, \ + { TCP_RCV_COPY_XID, "TCP_RCV_COPY_XID" }, \ + { TCP_RCV_COPY_DATA, "TCP_RCV_COPY_DATA" }, \ + { TCP_RCV_READ_CALLDIR, "TCP_RCV_READ_CALLDIR" }, \ + { TCP_RCV_COPY_CALLDIR, "TCP_RCV_COPY_CALLDIR" }, \ + { TCP_RPC_REPLY, "TCP_RPC_REPLY" }) + +TRACE_EVENT(xs_tcp_data_recv, + TP_PROTO(struct sock_xprt *xs), + + TP_ARGS(xs), + + TP_STRUCT__entry( + __string(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]) + __string(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]) + __field(__be32, xid) + __field(unsigned long, flags) + __field(unsigned long, copied) + __field(unsigned int, reclen) + __field(unsigned long, offset) + ), + + TP_fast_assign( + __assign_str(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]); + __assign_str(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]); + __entry->xid = xs->tcp_xid; + __entry->flags = xs->tcp_flags; + __entry->copied = xs->tcp_copied; + __entry->reclen = xs->tcp_reclen; + __entry->offset = xs->tcp_offset; + ), + + TP_printk("peer=[%s]:%s xid=0x%x flags=%s copied=%lu reclen=%u offset=%lu", + __get_str(addr), __get_str(port), be32_to_cpu(__entry->xid), + rpc_show_sock_xprt_flags(__entry->flags), + __entry->copied, __entry->reclen, __entry->offset) +); + +TRACE_EVENT(svc_recv, + TP_PROTO(struct svc_rqst *rqst, int status), + + TP_ARGS(rqst, status), + + TP_STRUCT__entry( + __field(struct sockaddr *, addr) + __field(__be32, xid) + __field(int, status) + ), + + TP_fast_assign( + __entry->addr = (struct sockaddr *)&rqst->rq_addr; + __entry->xid = status > 0 ? rqst->rq_xid : 0; + __entry->status = status; + ), + + TP_printk("addr=%pIScp xid=0x%x status=%d", __entry->addr, + be32_to_cpu(__entry->xid), __entry->status) +); + +DECLARE_EVENT_CLASS(svc_rqst_status, + + TP_PROTO(struct svc_rqst *rqst, int status), + + TP_ARGS(rqst, status), + + TP_STRUCT__entry( + __field(struct sockaddr *, addr) + __field(__be32, xid) + __field(int, dropme) + __field(int, status) + ), + + TP_fast_assign( + __entry->addr = (struct sockaddr *)&rqst->rq_addr; + __entry->xid = rqst->rq_xid; + __entry->dropme = (int)rqst->rq_dropme; + __entry->status = status; + ), + + TP_printk("addr=%pIScp rq_xid=0x%x dropme=%d status=%d", + __entry->addr, be32_to_cpu(__entry->xid), __entry->dropme, + __entry->status) +); + +DEFINE_EVENT(svc_rqst_status, svc_process, + TP_PROTO(struct svc_rqst *rqst, int status), + TP_ARGS(rqst, status)); + +DEFINE_EVENT(svc_rqst_status, svc_send, + TP_PROTO(struct svc_rqst *rqst, int status), + TP_ARGS(rqst, status)); + #endif /* _TRACE_SUNRPC_H */ #include <trace/define_trace.h> diff --git a/include/trace/events/target.h b/include/trace/events/target.h index da9cc0f05c93..45403443dd82 100644 --- a/include/trace/events/target.h +++ b/include/trace/events/target.h @@ -96,7 +96,7 @@ scsi_opcode_name(WRITE_16), \ scsi_opcode_name(VERIFY_16), \ scsi_opcode_name(WRITE_SAME_16), \ - scsi_opcode_name(SERVICE_ACTION_IN), \ + scsi_opcode_name(SERVICE_ACTION_IN_16), \ scsi_opcode_name(SAI_READ_CAPACITY_16), \ scsi_opcode_name(SAI_GET_LBA_STATUS), \ scsi_opcode_name(MI_REPORT_TARGET_PGS), \ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 26b4f2e13275..139b5067345b 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -277,14 +277,12 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ field = (typeof(field))iter->ent; \ \ ret = ftrace_raw_output_prep(iter, trace_event); \ - if (ret) \ + if (ret != TRACE_TYPE_HANDLED) \ return ret; \ \ - ret = trace_seq_printf(s, print); \ - if (!ret) \ - return TRACE_TYPE_PARTIAL_LINE; \ + trace_seq_printf(s, print); \ \ - return TRACE_TYPE_HANDLED; \ + return trace_handle_return(s); \ } \ static struct trace_event_functions ftrace_event_type_funcs_##call = { \ .trace = ftrace_raw_output_##call, \ diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index ba5be7fdbdfe..1e3552037a5a 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -91,6 +91,10 @@ typedef struct siginfo { int _trapno; /* TRAP # which caused the signal */ #endif short _addr_lsb; /* LSB of the reported address */ + struct { + void __user *_lower; + void __user *_upper; + } _addr_bnd; } _sigfault; /* SIGPOLL */ @@ -131,6 +135,8 @@ typedef struct siginfo { #define si_trapno _sifields._sigfault._trapno #endif #define si_addr_lsb _sifields._sigfault._addr_lsb +#define si_lower _sifields._sigfault._addr_bnd._lower +#define si_upper _sifields._sigfault._addr_bnd._upper #define si_band _sifields._sigpoll._band #define si_fd _sifields._sigpoll._fd #ifdef __ARCH_SIGSYS @@ -199,7 +205,8 @@ typedef struct siginfo { */ #define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */ #define SEGV_ACCERR (__SI_FAULT|2) /* invalid permissions for mapped object */ -#define NSIGSEGV 2 +#define SEGV_BNDERR (__SI_FAULT|3) /* failed address bound checks */ +#define NSIGSEGV 3 /* * SIGBUS si_codes diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index ea0796bdcf88..5c15c2a5c123 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -82,4 +82,9 @@ #define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 + +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 22749c134117..e016bd9b1a04 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -707,9 +707,11 @@ __SYSCALL(__NR_getrandom, sys_getrandom) __SYSCALL(__NR_memfd_create, sys_memfd_create) #define __NR_bpf 280 __SYSCALL(__NR_bpf, sys_bpf) +#define __NR_execveat 281 +__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat) #undef __NR_syscalls -#define __NR_syscalls 281 +#define __NR_syscalls 282 /* * All syscalls below here should go away really, diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index b70237e8bc37..d8e1716707ba 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -37,27 +37,27 @@ header-y += aio_abi.h header-y += apm_bios.h header-y += arcfb.h header-y += atalk.h -header-y += atm.h -header-y += atm_eni.h -header-y += atm_he.h -header-y += atm_idt77105.h -header-y += atm_nicstar.h -header-y += atm_tcp.h -header-y += atm_zatm.h header-y += atmapi.h header-y += atmarp.h header-y += atmbr2684.h header-y += atmclip.h header-y += atmdev.h +header-y += atm_eni.h +header-y += atm.h +header-y += atm_he.h +header-y += atm_idt77105.h header-y += atmioc.h header-y += atmlec.h header-y += atmmpc.h +header-y += atm_nicstar.h header-y += atmppp.h header-y += atmsap.h header-y += atmsvc.h +header-y += atm_tcp.h +header-y += atm_zatm.h header-y += audit.h -header-y += auto_fs.h header-y += auto_fs4.h +header-y += auto_fs.h header-y += auxvec.h header-y += ax25.h header-y += b1lli.h @@ -67,8 +67,8 @@ header-y += bfs_fs.h header-y += binfmts.h header-y += blkpg.h header-y += blktrace_api.h -header-y += bpf.h header-y += bpf_common.h +header-y += bpf.h header-y += bpqether.h header-y += bsg.h header-y += btrfs.h @@ -93,21 +93,21 @@ header-y += cyclades.h header-y += cycx_cfm.h header-y += dcbnl.h header-y += dccp.h -header-y += dlm.h +header-y += dlmconstants.h header-y += dlm_device.h +header-y += dlm.h header-y += dlm_netlink.h header-y += dlm_plock.h -header-y += dlmconstants.h header-y += dm-ioctl.h header-y += dm-log-userspace.h header-y += dn.h header-y += dqblk_xfs.h header-y += edd.h header-y += efs_fs_sb.h +header-y += elfcore.h header-y += elf-em.h header-y += elf-fdpic.h header-y += elf.h -header-y += elfcore.h header-y += errno.h header-y += errqueue.h header-y += ethtool.h @@ -125,22 +125,24 @@ header-y += filter.h header-y += firewire-cdev.h header-y += firewire-constants.h header-y += flat.h +header-y += fou.h header-y += fs.h header-y += fsl_hypervisor.h header-y += fuse.h header-y += futex.h header-y += gameport.h -header-y += gen_stats.h header-y += genetlink.h +header-y += gen_stats.h header-y += gfs2_ondisk.h header-y += gigaset_dev.h -header-y += hdlc.h header-y += hdlcdrv.h +header-y += hdlc.h header-y += hdreg.h -header-y += hid.h header-y += hiddev.h +header-y += hid.h header-y += hidraw.h header-y += hpet.h +header-y += hsr_netlink.h header-y += hyperv.h header-y += hysdn_if.h header-y += i2c-dev.h @@ -149,7 +151,6 @@ header-y += i2o-dev.h header-y += i8k.h header-y += icmp.h header-y += icmpv6.h -header-y += if.h header-y += if_addr.h header-y += if_addrlabel.h header-y += if_alg.h @@ -163,6 +164,7 @@ header-y += if_ether.h header-y += if_fc.h header-y += if_fddi.h header-y += if_frad.h +header-y += if.h header-y += if_hippi.h header-y += if_infiniband.h header-y += if_link.h @@ -180,40 +182,40 @@ header-y += if_tunnel.h header-y += if_vlan.h header-y += if_x25.h header-y += igmp.h -header-y += in.h header-y += in6.h -header-y += in_route.h header-y += inet_diag.h +header-y += in.h header-y += inotify.h header-y += input.h +header-y += in_route.h header-y += ioctl.h -header-y += ip.h header-y += ip6_tunnel.h -header-y += ip_vs.h header-y += ipc.h +header-y += ip.h header-y += ipmi.h header-y += ipmi_msgdefs.h header-y += ipsec.h header-y += ipv6.h header-y += ipv6_route.h +header-y += ip_vs.h header-y += ipx.h header-y += irda.h header-y += irqnr.h -header-y += isdn.h header-y += isdn_divertif.h -header-y += isdn_ppp.h +header-y += isdn.h header-y += isdnif.h +header-y += isdn_ppp.h header-y += iso_fs.h -header-y += ivtv.h header-y += ivtvfb.h +header-y += ivtv.h header-y += ixjuser.h header-y += jffs2.h header-y += joystick.h -header-y += kd.h header-y += kdev_t.h -header-y += kernel-page-flags.h -header-y += kernel.h +header-y += kd.h header-y += kernelcapi.h +header-y += kernel.h +header-y += kernel-page-flags.h header-y += kexec.h header-y += keyboard.h header-y += keyctl.h @@ -229,6 +231,7 @@ ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h \ header-y += kvm_para.h endif +header-y += hw_breakpoint.h header-y += l2tp.h header-y += libc-compat.h header-y += limits.h @@ -241,6 +244,7 @@ header-y += map_to_7segment.h header-y += matroxfb.h header-y += mdio.h header-y += media.h +header-y += media-bus-format.h header-y += mei.h header-y += memfd.h header-y += mempolicy.h @@ -251,44 +255,45 @@ header-y += mii.h header-y += minix_fs.h header-y += mman.h header-y += mmtimer.h +header-y += mpls.h header-y += mqueue.h -header-y += mroute.h header-y += mroute6.h +header-y += mroute.h header-y += msdos_fs.h header-y += msg.h header-y += mtio.h -header-y += n_r3964.h header-y += nbd.h -header-y += ncp.h header-y += ncp_fs.h +header-y += ncp.h header-y += ncp_mount.h header-y += ncp_no.h header-y += neighbour.h -header-y += net.h -header-y += net_dropmon.h -header-y += net_tstamp.h header-y += netconf.h header-y += netdevice.h -header-y += netlink_diag.h -header-y += netfilter.h +header-y += net_dropmon.h header-y += netfilter_arp.h header-y += netfilter_bridge.h header-y += netfilter_decnet.h +header-y += netfilter.h header-y += netfilter_ipv4.h header-y += netfilter_ipv6.h +header-y += net.h +header-y += netlink_diag.h header-y += netlink.h header-y += netrom.h +header-y += net_tstamp.h header-y += nfc.h -header-y += nfs.h header-y += nfs2.h header-y += nfs3.h header-y += nfs4.h header-y += nfs4_mount.h +header-y += nfsacl.h header-y += nfs_fs.h +header-y += nfs.h header-y += nfs_idmap.h header-y += nfs_mount.h -header-y += nfsacl.h header-y += nl80211.h +header-y += n_r3964.h header-y += nubus.h header-y += nvme.h header-y += nvram.h @@ -308,16 +313,16 @@ header-y += pfkeyv2.h header-y += pg.h header-y += phantom.h header-y += phonet.h +header-y += pktcdvd.h header-y += pkt_cls.h header-y += pkt_sched.h -header-y += pktcdvd.h header-y += pmu.h header-y += poll.h header-y += posix_types.h header-y += ppdev.h header-y += ppp-comp.h -header-y += ppp-ioctl.h header-y += ppp_defs.h +header-y += ppp-ioctl.h header-y += pps.h header-y += prctl.h header-y += psci.h @@ -349,13 +354,13 @@ header-y += seccomp.h header-y += securebits.h header-y += selinux_netlink.h header-y += sem.h -header-y += serial.h header-y += serial_core.h +header-y += serial.h header-y += serial_reg.h header-y += serio.h header-y += shm.h -header-y += signal.h header-y += signalfd.h +header-y += signal.h header-y += smiapp.h header-y += snmp.h header-y += sock_diag.h @@ -364,8 +369,8 @@ header-y += sockios.h header-y += som.h header-y += sonet.h header-y += sonypi.h -header-y += sound.h header-y += soundcard.h +header-y += sound.h header-y += stat.h header-y += stddef.h header-y += string.h @@ -384,11 +389,12 @@ header-y += time.h header-y += times.h header-y += timex.h header-y += tiocl.h -header-y += tipc.h header-y += tipc_config.h +header-y += tipc_netlink.h +header-y += tipc.h header-y += toshiba.h -header-y += tty.h header-y += tty_flags.h +header-y += tty.h header-y += types.h header-y += udf_fs_i.h header-y += udp.h @@ -424,6 +430,9 @@ header-y += virtio_net.h header-y += virtio_pci.h header-y += virtio_ring.h header-y += virtio_rng.h +header-y += virtio_scsi.h +header-y += virtio_types.h +header-y += vm_sockets.h header-y += vt.h header-y += wait.h header-y += wanrouter.h @@ -433,6 +442,5 @@ header-y += wireless.h header-y += x25.h header-y += xattr.h header-y += xfrm.h -header-y += hw_breakpoint.h header-y += zorro.h header-y += zorro_ids.h diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index d4dbef14d4df..12e26683c706 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -322,9 +322,15 @@ enum { #define AUDIT_STATUS_BACKLOG_LIMIT 0x0010 #define AUDIT_STATUS_BACKLOG_WAIT_TIME 0x0020 -#define AUDIT_VERSION_BACKLOG_LIMIT 1 -#define AUDIT_VERSION_BACKLOG_WAIT_TIME 2 -#define AUDIT_VERSION_LATEST AUDIT_VERSION_BACKLOG_WAIT_TIME +#define AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT 0x00000001 +#define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME 0x00000002 +#define AUDIT_FEATURE_BITMAP_ALL (AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT | \ + AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME) + +/* deprecated: AUDIT_VERSION_* */ +#define AUDIT_VERSION_LATEST AUDIT_FEATURE_BITMAP_ALL +#define AUDIT_VERSION_BACKLOG_LIMIT AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT +#define AUDIT_VERSION_BACKLOG_WAIT_TIME AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME /* Failure-to-log actions */ #define AUDIT_FAIL_SILENT 0 @@ -404,7 +410,10 @@ struct audit_status { __u32 backlog_limit; /* waiting messages limit */ __u32 lost; /* messages lost */ __u32 backlog; /* messages waiting in queue */ - __u32 version; /* audit api version number */ + union { + __u32 version; /* deprecated: audit api version num */ + __u32 feature_bitmap; /* bitmap of kernel audit features */ + }; __u32 backlog_wait_time;/* message queue wait timeout */ }; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d18316f9e9c4..45da7ec7d274 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -82,7 +82,7 @@ enum bpf_cmd { /* create or update key/value pair in a given map * err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size) - * Using attr->map_fd, attr->key, attr->value + * Using attr->map_fd, attr->key, attr->value, attr->flags * returns zero or negative error */ BPF_MAP_UPDATE_ELEM, @@ -111,12 +111,20 @@ enum bpf_cmd { enum bpf_map_type { BPF_MAP_TYPE_UNSPEC, + BPF_MAP_TYPE_HASH, + BPF_MAP_TYPE_ARRAY, }; enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC, + BPF_PROG_TYPE_SOCKET_FILTER, }; +/* flags for BPF_MAP_UPDATE_ELEM command */ +#define BPF_ANY 0 /* create new element or update existing */ +#define BPF_NOEXIST 1 /* create new element if it didn't exist */ +#define BPF_EXIST 2 /* update existing element */ + union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; /* one of enum bpf_map_type */ @@ -132,6 +140,7 @@ union bpf_attr { __aligned_u64 value; __aligned_u64 next_key; }; + __u64 flags; }; struct { /* anonymous struct used by BPF_PROG_LOAD command */ @@ -150,6 +159,9 @@ union bpf_attr { */ enum bpf_func_id { BPF_FUNC_unspec, + BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */ + BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */ + BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */ __BPF_FUNC_MAX_ID, }; diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 2f47824e7a36..611e1c5893b4 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -157,6 +157,7 @@ struct btrfs_ioctl_dev_replace_status_params { #define BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR 0 #define BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED 1 #define BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED 2 +#define BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS 3 struct btrfs_ioctl_dev_replace_args { __u64 cmd; /* in */ __u64 result; /* out */ diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h index c247446ab25a..1c508be9687f 100644 --- a/include/uapi/linux/can/error.h +++ b/include/uapi/linux/can/error.h @@ -71,6 +71,7 @@ #define CAN_ERR_CRTL_TX_PASSIVE 0x20 /* reached error passive status TX */ /* (at least one error counter exceeds */ /* the protocol-defined level of 127) */ +#define CAN_ERR_CRTL_ACTIVE 0x40 /* recovered to error active state */ /* error in CAN protocol (type) / data[2] */ #define CAN_ERR_PROT_UNSPEC 0x00 /* unspecified */ diff --git a/include/uapi/linux/dlmconstants.h b/include/uapi/linux/dlmconstants.h index 47bf08dc7566..2857bdc5b27b 100644 --- a/include/uapi/linux/dlmconstants.h +++ b/include/uapi/linux/dlmconstants.h @@ -114,7 +114,7 @@ * * DLM_LKF_ORPHAN * - * not yet implemented + * Acquire an orphan lock. * * DLM_LKF_ALTPR * diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index 3315ab21f728..a570d7b5796c 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -267,9 +267,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 28 +#define DM_VERSION_MINOR 29 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2014-09-17)" +#define DM_VERSION_EXTRA "-ioctl (2014-10-28)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ @@ -352,4 +352,9 @@ enum { */ #define DM_DEFERRED_REMOVE (1 << 17) /* In/Out */ +/* + * If set, the device is suspended internally. + */ +#define DM_INTERNAL_SUSPEND_FLAG (1 << 18) /* Out */ + #endif /* _LINUX_DM_IOCTL_H */ diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h index aa90bc98b6e2..ae99f7743cf4 100644 --- a/include/uapi/linux/elf-em.h +++ b/include/uapi/linux/elf-em.h @@ -34,6 +34,7 @@ #define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */ #define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */ #define EM_BLACKFIN 106 /* ADI Blackfin Processor */ +#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */ #define EM_TI_C6000 140 /* TI C6X DSPs */ #define EM_AARCH64 183 /* ARM 64 bit */ #define EM_FRV 0x5441 /* Fujitsu FR-V */ diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index ea9bf2561b9e..71e1d0ed92f7 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -397,6 +397,7 @@ typedef struct elf64_shdr { #define NT_ARM_TLS 0x401 /* ARM TLS register */ #define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ #define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ +#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */ #define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */ #define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */ #define NT_METAG_TLS 0x502 /* Metag TLS pointer */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 99b43056a6fe..5f66d9c2889d 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -534,6 +534,7 @@ struct ethtool_pauseparam { * @ETH_SS_NTUPLE_FILTERS: Previously used with %ETHTOOL_GRXNTUPLE; * now deprecated * @ETH_SS_FEATURES: Device feature names + * @ETH_SS_RSS_HASH_FUNCS: RSS hush function names */ enum ethtool_stringset { ETH_SS_TEST = 0, @@ -541,6 +542,7 @@ enum ethtool_stringset { ETH_SS_PRIV_FLAGS, ETH_SS_NTUPLE_FILTERS, ETH_SS_FEATURES, + ETH_SS_RSS_HASH_FUNCS, }; /** @@ -884,6 +886,8 @@ struct ethtool_rxfh_indir { * @key_size: On entry, the array size of the user buffer for the hash key, * which may be zero. On return from %ETHTOOL_GRSSH, the size of the * hardware hash key. + * @hfunc: Defines the current RSS hash function used by HW (or to be set to). + * Valid values are one of the %ETH_RSS_HASH_*. * @rsvd: Reserved for future extensions. * @rss_config: RX ring/queue index for each hash value i.e., indirection table * of @indir_size __u32 elements, followed by hash key of @key_size @@ -893,14 +897,16 @@ struct ethtool_rxfh_indir { * size should be returned. For %ETHTOOL_SRSSH, an @indir_size of * %ETH_RXFH_INDIR_NO_CHANGE means that indir table setting is not requested * and a @indir_size of zero means the indir table should be reset to default - * values. + * values. An hfunc of zero means that hash function setting is not requested. */ struct ethtool_rxfh { __u32 cmd; __u32 rss_context; __u32 indir_size; __u32 key_size; - __u32 rsvd[2]; + __u8 hfunc; + __u8 rsvd8[3]; + __u32 rsvd32; __u32 rss_config[0]; }; #define ETH_RXFH_INDIR_NO_CHANGE 0xffffffff @@ -1213,6 +1219,10 @@ enum ethtool_sfeatures_retval_bits { #define SUPPORTED_40000baseCR4_Full (1 << 24) #define SUPPORTED_40000baseSR4_Full (1 << 25) #define SUPPORTED_40000baseLR4_Full (1 << 26) +#define SUPPORTED_56000baseKR4_Full (1 << 27) +#define SUPPORTED_56000baseCR4_Full (1 << 28) +#define SUPPORTED_56000baseSR4_Full (1 << 29) +#define SUPPORTED_56000baseLR4_Full (1 << 30) #define ADVERTISED_10baseT_Half (1 << 0) #define ADVERTISED_10baseT_Full (1 << 1) @@ -1241,6 +1251,10 @@ enum ethtool_sfeatures_retval_bits { #define ADVERTISED_40000baseCR4_Full (1 << 24) #define ADVERTISED_40000baseSR4_Full (1 << 25) #define ADVERTISED_40000baseLR4_Full (1 << 26) +#define ADVERTISED_56000baseKR4_Full (1 << 27) +#define ADVERTISED_56000baseCR4_Full (1 << 28) +#define ADVERTISED_56000baseSR4_Full (1 << 29) +#define ADVERTISED_56000baseLR4_Full (1 << 30) /* The following are all involved in forcing a particular link * mode for the device for setting things. When getting the @@ -1248,12 +1262,16 @@ enum ethtool_sfeatures_retval_bits { * it was forced up into this mode or autonegotiated. */ -/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */ +/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|10|20|40|56]GbE. */ #define SPEED_10 10 #define SPEED_100 100 #define SPEED_1000 1000 #define SPEED_2500 2500 #define SPEED_10000 10000 +#define SPEED_20000 20000 +#define SPEED_40000 40000 +#define SPEED_56000 56000 + #define SPEED_UNKNOWN -1 /* Duplex, half or full. */ @@ -1343,6 +1361,10 @@ enum ethtool_sfeatures_retval_bits { #define ETH_MODULE_SFF_8079_LEN 256 #define ETH_MODULE_SFF_8472 0x2 #define ETH_MODULE_SFF_8472_LEN 512 +#define ETH_MODULE_SFF_8636 0x3 +#define ETH_MODULE_SFF_8636_LEN 256 +#define ETH_MODULE_SFF_8436 0x4 +#define ETH_MODULE_SFF_8436_LEN 256 /* Reset flags */ /* The reset() operation must clear the flags for the components which diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h index 0f9acce5b1ff..f2acd2fde1f3 100644 --- a/include/uapi/linux/if_alg.h +++ b/include/uapi/linux/if_alg.h @@ -32,6 +32,8 @@ struct af_alg_iv { #define ALG_SET_KEY 1 #define ALG_SET_IV 2 #define ALG_SET_OP 3 +#define ALG_SET_AEAD_ASSOCLEN 4 +#define ALG_SET_AEAD_AUTHSIZE 5 /* Operations */ #define ALG_OP_DECRYPT 0 diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 39f621a9fe82..b03ee8f62d3c 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -15,6 +15,7 @@ #include <linux/types.h> #include <linux/if_ether.h> +#include <linux/in6.h> #define SYSFS_BRIDGE_ATTR "bridge" #define SYSFS_BRIDGE_FDB "brforward" @@ -104,6 +105,7 @@ struct __fdb_entry { #define BRIDGE_MODE_VEB 0 /* Default loopback mode */ #define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#define BRIDGE_MODE_UNDEF 0xFFFF /* mode undefined */ /* Bridge management nested attributes * [IFLA_AF_SPEC] = { diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 0bdb77e16875..f7d0d2d7173a 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -145,6 +145,7 @@ enum { IFLA_CARRIER, IFLA_PHYS_PORT_ID, IFLA_CARRIER_CHANGES, + IFLA_PHYS_SWITCH_ID, __IFLA_MAX }; @@ -243,6 +244,8 @@ enum { IFLA_BRPORT_FAST_LEAVE, /* multicast fast leave */ IFLA_BRPORT_LEARNING, /* mac learning */ IFLA_BRPORT_UNICAST_FLOOD, /* flood unicast traffic */ + IFLA_BRPORT_PROXYARP, /* proxy ARP */ + IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */ __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -329,6 +332,21 @@ enum macvlan_macaddr_mode { #define MACVLAN_FLAG_NOPROMISC 1 +/* IPVLAN section */ +enum { + IFLA_IPVLAN_UNSPEC, + IFLA_IPVLAN_MODE, + __IFLA_IPVLAN_MAX +}; + +#define IFLA_IPVLAN_MAX (__IFLA_IPVLAN_MAX - 1) + +enum ipvlan_mode { + IPVLAN_MODE_L2 = 0, + IPVLAN_MODE_L3, + IPVLAN_MODE_MAX +}; + /* VXLAN section */ enum { IFLA_VXLAN_UNSPEC, diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h index e9502dd1ee2c..18b2403982f9 100644 --- a/include/uapi/linux/if_tun.h +++ b/include/uapi/linux/if_tun.h @@ -22,21 +22,11 @@ /* Read queue size */ #define TUN_READQ_SIZE 500 - -/* TUN device flags */ -#define TUN_TUN_DEV 0x0001 -#define TUN_TAP_DEV 0x0002 +/* TUN device type flags: deprecated. Use IFF_TUN/IFF_TAP instead. */ +#define TUN_TUN_DEV IFF_TUN +#define TUN_TAP_DEV IFF_TAP #define TUN_TYPE_MASK 0x000f -#define TUN_FASYNC 0x0010 -#define TUN_NOCHECKSUM 0x0020 -#define TUN_NO_PI 0x0040 -/* This flag has no real effect */ -#define TUN_ONE_QUEUE 0x0080 -#define TUN_PERSIST 0x0100 -#define TUN_VNET_HDR 0x0200 -#define TUN_TAP_MQ 0x0400 - /* Ioctl defines */ #define TUNSETNOCSUM _IOW('T', 200, int) #define TUNSETDEBUG _IOW('T', 201, int) @@ -67,6 +57,7 @@ #define IFF_ONE_QUEUE 0x2000 #define IFF_VNET_HDR 0x4000 #define IFF_TUN_EXCL 0x8000 +#define IFF_VNET_LE 0x10000 #define IFF_MULTI_QUEUE 0x0100 #define IFF_ATTACH_QUEUE 0x0200 #define IFF_DETACH_QUEUE 0x0400 diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index 280d9e092283..bd3cc11a431f 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -69,6 +69,7 @@ enum tunnel_encap_types { #define TUNNEL_ENCAP_FLAG_CSUM (1<<0) #define TUNNEL_ENCAP_FLAG_CSUM6 (1<<1) +#define TUNNEL_ENCAP_FLAG_REMCSUM (1<<2) /* SIT-mode i_flags */ #define SIT_ISATAP 0x0001 diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index efa2666f4b8a..e863d088b9a5 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -164,6 +164,7 @@ enum { DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL, DEVCONF_SUPPRESS_FRAG_NDISC, DEVCONF_ACCEPT_RA_FROM_LOCAL, + DEVCONF_USE_OPTIMISTIC, DEVCONF_MAX }; diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h new file mode 100644 index 000000000000..23b40908be30 --- /dev/null +++ b/include/uapi/linux/media-bus-format.h @@ -0,0 +1,125 @@ +/* + * Media Bus API header + * + * Copyright (C) 2009, Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MEDIA_BUS_FORMAT_H +#define __LINUX_MEDIA_BUS_FORMAT_H + +/* + * These bus formats uniquely identify data formats on the data bus. Format 0 + * is reserved, MEDIA_BUS_FMT_FIXED shall be used by host-client pairs, where + * the data format is fixed. Additionally, "2X8" means that one pixel is + * transferred in two 8-bit samples, "BE" or "LE" specify in which order those + * samples are transferred over the bus: "LE" means that the least significant + * bits are transferred first, "BE" means that the most significant bits are + * transferred first, and "PADHI" and "PADLO" define which bits - low or high, + * in the incomplete high byte, are filled with padding bits. + * + * The bus formats are grouped by type, bus_width, bits per component, samples + * per pixel and order of subsamples. Numerical values are sorted using generic + * numerical sort order (8 thus comes before 10). + * + * As their value can't change when a new bus format is inserted in the + * enumeration, the bus formats are explicitly given a numerical value. The next + * free values for each category are listed below, update them when inserting + * new pixel codes. + */ + +#define MEDIA_BUS_FMT_FIXED 0x0001 + +/* RGB - next is 0x100e */ +#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001 +#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002 +#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE 0x1003 +#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE 0x1004 +#define MEDIA_BUS_FMT_BGR565_2X8_BE 0x1005 +#define MEDIA_BUS_FMT_BGR565_2X8_LE 0x1006 +#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007 +#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008 +#define MEDIA_BUS_FMT_RGB666_1X18 0x1009 +#define MEDIA_BUS_FMT_RGB888_1X24 0x100a +#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b +#define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c +#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d + +/* YUV (including grey) - next is 0x2024 */ +#define MEDIA_BUS_FMT_Y8_1X8 0x2001 +#define MEDIA_BUS_FMT_UV8_1X8 0x2015 +#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002 +#define MEDIA_BUS_FMT_VYUY8_1_5X8 0x2003 +#define MEDIA_BUS_FMT_YUYV8_1_5X8 0x2004 +#define MEDIA_BUS_FMT_YVYU8_1_5X8 0x2005 +#define MEDIA_BUS_FMT_UYVY8_2X8 0x2006 +#define MEDIA_BUS_FMT_VYUY8_2X8 0x2007 +#define MEDIA_BUS_FMT_YUYV8_2X8 0x2008 +#define MEDIA_BUS_FMT_YVYU8_2X8 0x2009 +#define MEDIA_BUS_FMT_Y10_1X10 0x200a +#define MEDIA_BUS_FMT_UYVY10_2X10 0x2018 +#define MEDIA_BUS_FMT_VYUY10_2X10 0x2019 +#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b +#define MEDIA_BUS_FMT_YVYU10_2X10 0x200c +#define MEDIA_BUS_FMT_Y12_1X12 0x2013 +#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f +#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010 +#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011 +#define MEDIA_BUS_FMT_YVYU8_1X16 0x2012 +#define MEDIA_BUS_FMT_YDYUYDYV8_1X16 0x2014 +#define MEDIA_BUS_FMT_UYVY10_1X20 0x201a +#define MEDIA_BUS_FMT_VYUY10_1X20 0x201b +#define MEDIA_BUS_FMT_YUYV10_1X20 0x200d +#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e +#define MEDIA_BUS_FMT_YUV10_1X30 0x2016 +#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017 +#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c +#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d +#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e +#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f +#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020 +#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021 +#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022 +#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023 + +/* Bayer - next is 0x3019 */ +#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001 +#define MEDIA_BUS_FMT_SGBRG8_1X8 0x3013 +#define MEDIA_BUS_FMT_SGRBG8_1X8 0x3002 +#define MEDIA_BUS_FMT_SRGGB8_1X8 0x3014 +#define MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8 0x3015 +#define MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8 0x3016 +#define MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8 0x3017 +#define MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8 0x3018 +#define MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 0x300b +#define MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 0x300c +#define MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 0x3009 +#define MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 0x300d +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE 0x3003 +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE 0x3004 +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE 0x3005 +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE 0x3006 +#define MEDIA_BUS_FMT_SBGGR10_1X10 0x3007 +#define MEDIA_BUS_FMT_SGBRG10_1X10 0x300e +#define MEDIA_BUS_FMT_SGRBG10_1X10 0x300a +#define MEDIA_BUS_FMT_SRGGB10_1X10 0x300f +#define MEDIA_BUS_FMT_SBGGR12_1X12 0x3008 +#define MEDIA_BUS_FMT_SGBRG12_1X12 0x3010 +#define MEDIA_BUS_FMT_SGRBG12_1X12 0x3011 +#define MEDIA_BUS_FMT_SRGGB12_1X12 0x3012 + +/* JPEG compressed formats - next is 0x4002 */ +#define MEDIA_BUS_FMT_JPEG_1X8 0x4001 + +/* Vendor specific formats - next is 0x5002 */ + +/* S5C73M3 sensor specific interleaved UYVY and JPEG */ +#define MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8 0x5001 + +/* HSV - next is 0x6002 */ +#define MEDIA_BUS_FMT_AHSV8888_1X32 0x6001 + +#endif /* __LINUX_MEDIA_BUS_FORMAT_H */ diff --git a/include/uapi/linux/msg.h b/include/uapi/linux/msg.h index a70375526578..f51c8001dbe5 100644 --- a/include/uapi/linux/msg.h +++ b/include/uapi/linux/msg.h @@ -51,16 +51,28 @@ struct msginfo { }; /* - * Scaling factor to compute msgmni: - * the memory dedicated to msg queues (msgmni * msgmnb) should occupy - * at most 1/MSG_MEM_SCALE of the lowmem (see the formula in ipc/msg.c): - * up to 8MB : msgmni = 16 (MSGMNI) - * 4 GB : msgmni = 8K - * more than 16 GB : msgmni = 32K (IPCMNI) + * MSGMNI, MSGMAX and MSGMNB are default values which can be + * modified by sysctl. + * + * MSGMNI is the upper limit for the number of messages queues per + * namespace. + * It has been chosen to be as large possible without facilitating + * scenarios where userspace causes overflows when adjusting the limits via + * operations of the form retrieve current limit; add X; update limit". + * + * MSGMNB is the default size of a new message queue. Non-root tasks can + * decrease the size with msgctl(IPC_SET), root tasks + * (actually: CAP_SYS_RESOURCE) can both increase and decrease the queue + * size. The optimal value is application dependent. + * 16384 is used because it was always used (since 0.99.10) + * + * MAXMAX is the maximum size of an individual message, it's a global + * (per-namespace) limit that applies for all message queues. + * It's set to 1/2 of MSGMNB, to ensure that at least two messages fit into + * the queue. This is also an arbitrary choice (since 2.6.0). */ -#define MSG_MEM_SCALE 32 -#define MSGMNI 16 /* <= IPCMNI */ /* max # of msg queue identifiers */ +#define MSGMNI 32000 /* <= IPCMNI */ /* max # of msg queue identifiers */ #define MSGMAX 8192 /* <= INT_MAX */ /* max size of message (bytes) */ #define MSGMNB 16384 /* <= INT_MAX */ /* default max size of a message queue */ diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 4a1d7e96dfe3..f3d77f9f1e0b 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -35,11 +35,11 @@ enum { */ #define NTF_USE 0x01 -#define NTF_PROXY 0x08 /* == ATF_PUBL */ -#define NTF_ROUTER 0x80 - #define NTF_SELF 0x02 #define NTF_MASTER 0x04 +#define NTF_PROXY 0x08 /* == ATF_PUBL */ +#define NTF_EXT_LEARNED 0x10 +#define NTF_ROUTER 0x80 /* * Neighbor Cache Entry States. diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index ff354021bb69..edbc888ceb51 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -23,8 +23,9 @@ enum { SOF_TIMESTAMPING_OPT_ID = (1<<7), SOF_TIMESTAMPING_TX_SCHED = (1<<8), SOF_TIMESTAMPING_TX_ACK = (1<<9), + SOF_TIMESTAMPING_OPT_CMSG = (1<<10), - SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_TX_ACK, + SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_CMSG, SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) | SOF_TIMESTAMPING_LAST }; diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h index ca03119111a2..5ab4e60894cf 100644 --- a/include/uapi/linux/netfilter/ipset/ip_set.h +++ b/include/uapi/linux/netfilter/ipset/ip_set.h @@ -256,11 +256,17 @@ enum { IPSET_COUNTER_GT, }; -struct ip_set_counter_match { +/* Backward compatibility for set match v3 */ +struct ip_set_counter_match0 { __u8 op; __u64 value; }; +struct ip_set_counter_match { + __aligned_u64 value; + __u8 op; +}; + /* Interface to iptables/ip6tables */ #define SO_IP_SET 83 diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index f31fe7b660a5..832bc46db78b 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -579,6 +579,7 @@ enum nft_exthdr_attributes { * @NFT_META_CPU: cpu id through smp_processor_id() * @NFT_META_IIFGROUP: packet input interface group * @NFT_META_OIFGROUP: packet output interface group + * @NFT_META_CGROUP: socket control group (skb->sk->sk_classid) */ enum nft_meta_keys { NFT_META_LEN, @@ -604,6 +605,7 @@ enum nft_meta_keys { NFT_META_CPU, NFT_META_IIFGROUP, NFT_META_OIFGROUP, + NFT_META_CGROUP, }; /** @@ -838,6 +840,22 @@ enum nft_masq_attributes { #define NFTA_MASQ_MAX (__NFTA_MASQ_MAX - 1) /** + * enum nft_redir_attributes - nf_tables redirect expression netlink attributes + * + * @NFTA_REDIR_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers) + * @NFTA_REDIR_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers) + * @NFTA_REDIR_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32) + */ +enum nft_redir_attributes { + NFTA_REDIR_UNSPEC, + NFTA_REDIR_REG_PROTO_MIN, + NFTA_REDIR_REG_PROTO_MAX, + NFTA_REDIR_FLAGS, + __NFTA_REDIR_MAX +}; +#define NFTA_REDIR_MAX (__NFTA_REDIR_MAX - 1) + +/** * enum nft_gen_attributes - nf_tables ruleset generation attributes * * @NFTA_GEN_ID: Ruleset generation ID (NLA_U32) diff --git a/include/uapi/linux/netfilter/xt_set.h b/include/uapi/linux/netfilter/xt_set.h index d6a1df1f2947..d4e02348384c 100644 --- a/include/uapi/linux/netfilter/xt_set.h +++ b/include/uapi/linux/netfilter/xt_set.h @@ -66,8 +66,8 @@ struct xt_set_info_target_v2 { struct xt_set_info_match_v3 { struct xt_set_info match_set; - struct ip_set_counter_match packets; - struct ip_set_counter_match bytes; + struct ip_set_counter_match0 packets; + struct ip_set_counter_match0 bytes; __u32 flags; }; @@ -81,4 +81,13 @@ struct xt_set_info_target_v3 { __u32 timeout; }; +/* Revision 4 match */ + +struct xt_set_info_match_v4 { + struct xt_set_info match_set; + struct ip_set_counter_match packets; + struct ip_set_counter_match bytes; + __u32 flags; +}; + #endif /*_XT_SET_H*/ diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index 9b19b4461928..8119255feae4 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -116,6 +116,7 @@ enum nfc_commands { NFC_EVENT_SE_TRANSACTION, NFC_CMD_GET_SE, NFC_CMD_SE_IO, + NFC_CMD_ACTIVATE_TARGET, /* private: internal use only */ __NFC_CMD_AFTER_LAST }; @@ -196,15 +197,19 @@ enum nfc_sdp_attr { }; #define NFC_SDP_ATTR_MAX (__NFC_SDP_ATTR_AFTER_LAST - 1) -#define NFC_DEVICE_NAME_MAXSIZE 8 -#define NFC_NFCID1_MAXSIZE 10 -#define NFC_NFCID2_MAXSIZE 8 -#define NFC_NFCID3_MAXSIZE 10 -#define NFC_SENSB_RES_MAXSIZE 12 -#define NFC_SENSF_RES_MAXSIZE 18 -#define NFC_GB_MAXSIZE 48 -#define NFC_FIRMWARE_NAME_MAXSIZE 32 -#define NFC_ISO15693_UID_MAXSIZE 8 +#define NFC_DEVICE_NAME_MAXSIZE 8 +#define NFC_NFCID1_MAXSIZE 10 +#define NFC_NFCID2_MAXSIZE 8 +#define NFC_NFCID3_MAXSIZE 10 +#define NFC_SENSB_RES_MAXSIZE 12 +#define NFC_SENSF_RES_MAXSIZE 18 +#define NFC_ATR_REQ_MAXSIZE 64 +#define NFC_ATR_RES_MAXSIZE 64 +#define NFC_ATR_REQ_GB_MAXSIZE 48 +#define NFC_ATR_RES_GB_MAXSIZE 47 +#define NFC_GB_MAXSIZE 48 +#define NFC_FIRMWARE_NAME_MAXSIZE 32 +#define NFC_ISO15693_UID_MAXSIZE 8 /* NFC protocols */ #define NFC_PROTO_JEWEL 1 diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h index a6f453c740b8..1fdc95bb2375 100644 --- a/include/uapi/linux/nfsd/debug.h +++ b/include/uapi/linux/nfsd/debug.h @@ -15,7 +15,7 @@ * Enable debugging for nfsd. * Requires RPC_DEBUG. */ -#ifdef RPC_DEBUG +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define NFSD_DEBUG 1 #endif diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 4b28dc07bcb1..b37bd5a1cb82 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -227,7 +227,11 @@ * the interface identified by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC * or, if no MAC address given, all stations, on the interface identified - * by %NL80211_ATTR_IFINDEX. + * by %NL80211_ATTR_IFINDEX. %NL80211_ATTR_MGMT_SUBTYPE and + * %NL80211_ATTR_REASON_CODE can optionally be used to specify which type + * of disconnection indication should be sent to the station + * (Deauthentication or Disassociation frame and reason code for that + * frame). * * @NL80211_CMD_GET_MPATH: Get mesh path attributes for mesh path to * destination %NL80211_ATTR_MAC on the interface identified by @@ -639,7 +643,18 @@ * @NL80211_CMD_CH_SWITCH_NOTIFY: An AP or GO may decide to switch channels * independently of the userspace SME, send this event indicating * %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ and the - * attributes determining channel width. + * attributes determining channel width. This indication may also be + * sent when a remotely-initiated switch (e.g., when a STA receives a CSA + * from the remote AP) is completed; + * + * @NL80211_CMD_CH_SWITCH_STARTED_NOTIFY: Notify that a channel switch + * has been started on an interface, regardless of the initiator + * (ie. whether it was requested from a remote device or + * initiated on our own). It indicates that + * %NL80211_ATTR_IFINDEX will be on %NL80211_ATTR_WIPHY_FREQ + * after %NL80211_ATTR_CH_SWITCH_COUNT TBTT's. The userspace may + * decide to react to this indication by requesting other + * interfaces to change channel as well. * * @NL80211_CMD_START_P2P_DEVICE: Start the given P2P Device, identified by * its %NL80211_ATTR_WDEV identifier. It must have been created with @@ -738,6 +753,27 @@ * before removing a station entry entirely, or before disassociating * or similar, cleanup will happen in the driver/device in this case. * + * @NL80211_CMD_GET_MPP: Get mesh path attributes for mesh proxy path to + * destination %NL80211_ATTR_MAC on the interface identified by + * %NL80211_ATTR_IFINDEX. + * + * @NL80211_CMD_JOIN_OCB: Join the OCB network. The center frequency and + * bandwidth of a channel must be given. + * @NL80211_CMD_LEAVE_OCB: Leave the OCB network -- no special arguments, the + * network is determined by the network interface. + * + * @NL80211_CMD_TDLS_CHANNEL_SWITCH: Start channel-switching with a TDLS peer, + * identified by the %NL80211_ATTR_MAC parameter. A target channel is + * provided via %NL80211_ATTR_WIPHY_FREQ and other attributes determining + * channel width/type. The target operating class is given via + * %NL80211_ATTR_OPER_CLASS. + * The driver is responsible for continually initiating channel-switching + * operations and returning to the base channel for communication with the + * AP. + * @NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH: Stop channel-switching with a TDLS + * peer given by %NL80211_ATTR_MAC. Both peers must be on the base channel + * when this command completes. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -912,6 +948,16 @@ enum nl80211_commands { NL80211_CMD_ADD_TX_TS, NL80211_CMD_DEL_TX_TS, + NL80211_CMD_GET_MPP, + + NL80211_CMD_JOIN_OCB, + NL80211_CMD_LEAVE_OCB, + + NL80211_CMD_CH_SWITCH_STARTED_NOTIFY, + + NL80211_CMD_TDLS_CHANNEL_SWITCH, + NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -1606,9 +1652,9 @@ enum nl80211_commands { * @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32. * As specified in the &enum nl80211_tdls_peer_capability. * - * @NL80211_ATTR_IFACE_SOCKET_OWNER: flag attribute, if set during interface + * @NL80211_ATTR_SOCKET_OWNER: Flag attribute, if set during interface * creation then the new interface will be owned by the netlink socket - * that created it and will be destroyed when the socket is closed + * that created it and will be destroyed when the socket is closed. * * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is * the TDLS link initiator. @@ -1638,6 +1684,11 @@ enum nl80211_commands { * @NL80211_ATTR_SMPS_MODE: SMPS mode to use (ap mode). see * &enum nl80211_smps_mode. * + * @NL80211_ATTR_OPER_CLASS: operating class + * + * @NL80211_ATTR_MAC_MASK: MAC address mask + * + * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -1973,7 +2024,7 @@ enum nl80211_attrs { NL80211_ATTR_TDLS_PEER_CAPABILITY, - NL80211_ATTR_IFACE_SOCKET_OWNER, + NL80211_ATTR_SOCKET_OWNER, NL80211_ATTR_CSA_C_OFFSETS_TX, NL80211_ATTR_MAX_CSA_COUNTERS, @@ -1990,15 +2041,21 @@ enum nl80211_attrs { NL80211_ATTR_SMPS_MODE, + NL80211_ATTR_OPER_CLASS, + + NL80211_ATTR_MAC_MASK, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, + NUM_NL80211_ATTR = __NL80211_ATTR_AFTER_LAST, NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 }; /* source-level API compatibility */ #define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION #define NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG +#define NL80211_ATTR_IFACE_SOCKET_OWNER NL80211_ATTR_SOCKET_OWNER /* * Allow user space programs to use #ifdef on new attributes by defining them @@ -2064,6 +2121,8 @@ enum nl80211_attrs { * and therefore can't be created in the normal ways, use the * %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE * commands to create and destroy one + * @NL80211_IF_TYPE_OCB: Outside Context of a BSS + * This mode corresponds to the MIB variable dot11OCBActivated=true * @NL80211_IFTYPE_MAX: highest interface type number currently defined * @NUM_NL80211_IFTYPES: number of defined interface types * @@ -2083,6 +2142,7 @@ enum nl80211_iftype { NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO, NL80211_IFTYPE_P2P_DEVICE, + NL80211_IFTYPE_OCB, /* keep last */ NUM_NL80211_IFTYPES, @@ -2631,6 +2691,11 @@ enum nl80211_sched_scan_match_attr { * @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated * base on contiguous rules and wider channels will be allowed to cross * multiple contiguous/overlapping frequency ranges. + * @NL80211_RRF_GO_CONCURRENT: See &NL80211_FREQUENCY_ATTR_GO_CONCURRENT + * @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation + * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation + * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed + * @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed */ enum nl80211_reg_rule_flags { NL80211_RRF_NO_OFDM = 1<<0, @@ -2643,11 +2708,18 @@ enum nl80211_reg_rule_flags { NL80211_RRF_NO_IR = 1<<7, __NL80211_RRF_NO_IBSS = 1<<8, NL80211_RRF_AUTO_BW = 1<<11, + NL80211_RRF_GO_CONCURRENT = 1<<12, + NL80211_RRF_NO_HT40MINUS = 1<<13, + NL80211_RRF_NO_HT40PLUS = 1<<14, + NL80211_RRF_NO_80MHZ = 1<<15, + NL80211_RRF_NO_160MHZ = 1<<16, }; #define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR #define NL80211_RRF_NO_IBSS NL80211_RRF_NO_IR #define NL80211_RRF_NO_IR NL80211_RRF_NO_IR +#define NL80211_RRF_NO_HT40 (NL80211_RRF_NO_HT40MINUS |\ + NL80211_RRF_NO_HT40PLUS) /* For backport compatibility with older userspace */ #define NL80211_RRF_NO_IR_ALL (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS) @@ -3379,6 +3451,8 @@ enum nl80211_ps_state { * interval in which %NL80211_ATTR_CQM_TXE_PKTS and * %NL80211_ATTR_CQM_TXE_RATE must be satisfied before generating an * %NL80211_CMD_NOTIFY_CQM. Set to 0 to turn off TX error reporting. + * @NL80211_ATTR_CQM_BEACON_LOSS_EVENT: flag attribute that's set in a beacon + * loss event * @__NL80211_ATTR_CQM_AFTER_LAST: internal * @NL80211_ATTR_CQM_MAX: highest key attribute */ @@ -3391,6 +3465,7 @@ enum nl80211_attr_cqm { NL80211_ATTR_CQM_TXE_RATE, NL80211_ATTR_CQM_TXE_PKTS, NL80211_ATTR_CQM_TXE_INTVL, + NL80211_ATTR_CQM_BEACON_LOSS_EVENT, /* keep last */ __NL80211_ATTR_CQM_AFTER_LAST, @@ -3403,9 +3478,7 @@ enum nl80211_attr_cqm { * configured threshold * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH: The RSSI is higher than the * configured threshold - * @NL80211_CQM_RSSI_BEACON_LOSS_EVENT: The device experienced beacon loss. - * (Note that deauth/disassoc will still follow if the AP is not - * available. This event might get used as roaming event, etc.) + * @NL80211_CQM_RSSI_BEACON_LOSS_EVENT: (reserved, never sent) */ enum nl80211_cqm_rssi_threshold_event { NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, @@ -3545,6 +3618,25 @@ struct nl80211_pattern_support { * @NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS: For wakeup reporting only, * the TCP connection ran out of tokens to use for data to send to the * service + * @NL80211_WOWLAN_TRIG_NET_DETECT: wake up when a configured network + * is detected. This is a nested attribute that contains the + * same attributes used with @NL80211_CMD_START_SCHED_SCAN. It + * specifies how the scan is performed (e.g. the interval and the + * channels to scan) as well as the scan results that will + * trigger a wake (i.e. the matchsets). + * @NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS: nested attribute + * containing an array with information about what triggered the + * wake up. If no elements are present in the array, it means + * that the information is not available. If more than one + * element is present, it means that more than one match + * occurred. + * Each element in the array is a nested attribute that contains + * one optional %NL80211_ATTR_SSID attribute and one optional + * %NL80211_ATTR_SCAN_FREQUENCIES attribute. At least one of + * these attributes must be present. If + * %NL80211_ATTR_SCAN_FREQUENCIES contains more than one + * frequency, it means that the match occurred in more than one + * channel. * @NUM_NL80211_WOWLAN_TRIG: number of wake on wireless triggers * @MAX_NL80211_WOWLAN_TRIG: highest wowlan trigger attribute number * @@ -3570,6 +3662,8 @@ enum nl80211_wowlan_triggers { NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH, NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST, NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS, + NL80211_WOWLAN_TRIG_NET_DETECT, + NL80211_WOWLAN_TRIG_NET_DETECT_RESULTS, /* keep last */ NUM_NL80211_WOWLAN_TRIG, @@ -4042,6 +4136,27 @@ enum nl80211_ap_sme_features { * multiplexing powersave, ie. can turn off all but one chain * and then wake the rest up as required after, for example, * rts/cts handshake. + * @NL80211_FEATURE_SUPPORTS_WMM_ADMISSION: the device supports setting up WMM + * TSPEC sessions (TID aka TSID 0-7) with the %NL80211_CMD_ADD_TX_TS + * command. Standard IEEE 802.11 TSPEC setup is not yet supported, it + * needs to be able to handle Block-Ack agreements and other things. + * @NL80211_FEATURE_MAC_ON_CREATE: Device supports configuring + * the vif's MAC address upon creation. + * See 'macaddr' field in the vif_params (cfg80211.h). + * @NL80211_FEATURE_TDLS_CHANNEL_SWITCH: Driver supports channel switching when + * operating as a TDLS peer. + * @NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR: This device/driver supports using a + * random MAC address during scan (if the device is unassociated); the + * %NL80211_SCAN_FLAG_RANDOM_ADDR flag may be set for scans and the MAC + * address mask/value will be used. + * @NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR: This device/driver supports + * using a random MAC address for every scan iteration during scheduled + * scan (while not associated), the %NL80211_SCAN_FLAG_RANDOM_ADDR may + * be set for scheduled scan and the MAC address mask/value will be used. + * @NL80211_FEATURE_ND_RANDOM_MAC_ADDR: This device/driver supports using a + * random MAC address for every scan iteration during "net detect", i.e. + * scan in unassociated WoWLAN, the %NL80211_SCAN_FLAG_RANDOM_ADDR may + * be set for scheduled scan and the MAC address mask/value will be used. */ enum nl80211_feature_flags { NL80211_FEATURE_SK_TX_STATUS = 1 << 0, @@ -4070,6 +4185,12 @@ enum nl80211_feature_flags { NL80211_FEATURE_ACKTO_ESTIMATION = 1 << 23, NL80211_FEATURE_STATIC_SMPS = 1 << 24, NL80211_FEATURE_DYNAMIC_SMPS = 1 << 25, + NL80211_FEATURE_SUPPORTS_WMM_ADMISSION = 1 << 26, + NL80211_FEATURE_MAC_ON_CREATE = 1 << 27, + NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 1 << 28, + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 1 << 29, + NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 1 << 30, + NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1 << 31, }; /** @@ -4118,11 +4239,21 @@ enum nl80211_connect_failed_reason { * dangerous because will destroy stations performance as a lot of frames * will be lost while scanning off-channel, therefore it must be used only * when really needed + * @NL80211_SCAN_FLAG_RANDOM_ADDR: use a random MAC address for this scan (or + * for scheduled scan: a different one for every scan iteration). When the + * flag is set, depending on device capabilities the @NL80211_ATTR_MAC and + * @NL80211_ATTR_MAC_MASK attributes may also be given in which case only + * the masked bits will be preserved from the MAC address and the remainder + * randomised. If the attributes are not given full randomisation (46 bits, + * locally administered 1, multicast 0) is assumed. + * This flag must not be requested when the feature isn't supported, check + * the nl80211 feature flags for the device. */ enum nl80211_scan_flags { NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0, NL80211_SCAN_FLAG_FLUSH = 1<<1, NL80211_SCAN_FLAG_AP = 1<<2, + NL80211_SCAN_FLAG_RANDOM_ADDR = 1<<3, }; /** diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h index 29a7d8619d8d..26386cf3db44 100644 --- a/include/uapi/linux/nvme.h +++ b/include/uapi/linux/nvme.h @@ -181,6 +181,22 @@ enum { NVME_LBART_ATTRIB_HIDE = 1 << 1, }; +struct nvme_reservation_status { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 resv5[2]; + __u8 ptpls; + __u8 resv10[13]; + struct { + __le16 cntlid; + __u8 rcsts; + __u8 resv3[5]; + __le64 hostid; + __le64 rkey; + } regctl_ds[]; +}; + /* I/O commands */ enum nvme_opcode { @@ -189,7 +205,12 @@ enum nvme_opcode { nvme_cmd_read = 0x02, nvme_cmd_write_uncor = 0x04, nvme_cmd_compare = 0x05, + nvme_cmd_write_zeroes = 0x08, nvme_cmd_dsm = 0x09, + nvme_cmd_resv_register = 0x0d, + nvme_cmd_resv_report = 0x0e, + nvme_cmd_resv_acquire = 0x11, + nvme_cmd_resv_release = 0x15, }; struct nvme_common_command { @@ -305,7 +326,11 @@ enum { NVME_FEAT_IRQ_CONFIG = 0x09, NVME_FEAT_WRITE_ATOMIC = 0x0a, NVME_FEAT_ASYNC_EVENT = 0x0b, - NVME_FEAT_SW_PROGRESS = 0x0c, + NVME_FEAT_AUTO_PST = 0x0c, + NVME_FEAT_SW_PROGRESS = 0x80, + NVME_FEAT_HOST_ID = 0x81, + NVME_FEAT_RESV_MASK = 0x82, + NVME_FEAT_RESV_PERSIST = 0x83, NVME_LOG_ERROR = 0x01, NVME_LOG_SMART = 0x02, NVME_LOG_FW_SLOT = 0x03, @@ -440,9 +465,15 @@ enum { NVME_SC_FUSED_MISSING = 0xa, NVME_SC_INVALID_NS = 0xb, NVME_SC_CMD_SEQ_ERROR = 0xc, + NVME_SC_SGL_INVALID_LAST = 0xd, + NVME_SC_SGL_INVALID_COUNT = 0xe, + NVME_SC_SGL_INVALID_DATA = 0xf, + NVME_SC_SGL_INVALID_METADATA = 0x10, + NVME_SC_SGL_INVALID_TYPE = 0x11, NVME_SC_LBA_RANGE = 0x80, NVME_SC_CAP_EXCEEDED = 0x81, NVME_SC_NS_NOT_READY = 0x82, + NVME_SC_RESERVATION_CONFLICT = 0x83, NVME_SC_CQ_INVALID = 0x100, NVME_SC_QID_INVALID = 0x101, NVME_SC_QUEUE_SIZE = 0x102, @@ -454,7 +485,15 @@ enum { NVME_SC_INVALID_VECTOR = 0x108, NVME_SC_INVALID_LOG_PAGE = 0x109, NVME_SC_INVALID_FORMAT = 0x10a, + NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b, + NVME_SC_INVALID_QUEUE = 0x10c, + NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, + NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, + NVME_SC_FEATURE_NOT_PER_NS = 0x10f, + NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, NVME_SC_BAD_ATTRIBUTES = 0x180, + NVME_SC_INVALID_PI = 0x181, + NVME_SC_READ_ONLY = 0x182, NVME_SC_WRITE_FAULT = 0x280, NVME_SC_READ_ERROR = 0x281, NVME_SC_GUARD_CHECK = 0x282, @@ -489,7 +528,7 @@ struct nvme_user_io { __u16 appmask; }; -struct nvme_admin_cmd { +struct nvme_passthru_cmd { __u8 opcode; __u8 flags; __u16 rsvd1; @@ -510,8 +549,11 @@ struct nvme_admin_cmd { __u32 result; }; +#define nvme_admin_cmd nvme_passthru_cmd + #define NVME_IOCTL_ID _IO('N', 0x40) #define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd) #define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io) +#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd) #endif /* _UAPI_LINUX_NVME_H */ diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 435eabc5ffaa..3a6dcaa359b7 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -157,6 +157,11 @@ enum ovs_packet_cmd { * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an * %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content * specified there. + * @OVS_PACKET_ATTR_EGRESS_TUN_KEY: Present for an %OVS_PACKET_CMD_ACTION + * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an + * %OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute, which is sent only if the + * output port is actually a tunnel port. Contains the output tunnel key + * extracted from the packet as nested %OVS_TUNNEL_KEY_ATTR_* attributes. * * These attributes follow the &struct ovs_header within the Generic Netlink * payload for %OVS_PACKET_* commands. @@ -167,6 +172,8 @@ enum ovs_packet_attr { OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */ OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ + OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_* + attributes. */ __OVS_PACKET_ATTR_MAX }; @@ -293,6 +300,9 @@ enum ovs_key_attr { OVS_KEY_ATTR_DP_HASH, /* u32 hash value. Value 0 indicates the hash is not computed by the datapath. */ OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */ + OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls. + * The implementation may restrict + * the accepted length of the array. */ #ifdef __KERNEL__ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */ @@ -312,6 +322,8 @@ enum ovs_tunnel_key_attr { OVS_TUNNEL_KEY_ATTR_CSUM, /* No argument. CSUM packet. */ OVS_TUNNEL_KEY_ATTR_OAM, /* No argument. OAM frame. */ OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS, /* Array of Geneve options. */ + OVS_TUNNEL_KEY_ATTR_TP_SRC, /* be16 src Transport Port. */ + OVS_TUNNEL_KEY_ATTR_TP_DST, /* be16 dst Transport Port. */ __OVS_TUNNEL_KEY_ATTR_MAX }; @@ -340,6 +352,10 @@ struct ovs_key_ethernet { __u8 eth_dst[ETH_ALEN]; }; +struct ovs_key_mpls { + __be32 mpls_lse; +}; + struct ovs_key_ipv4 { __be32 ipv4_src; __be32 ipv4_dst; @@ -393,9 +409,9 @@ struct ovs_key_arp { }; struct ovs_key_nd { - __u32 nd_target[4]; - __u8 nd_sll[ETH_ALEN]; - __u8 nd_tll[ETH_ALEN]; + __be32 nd_target[4]; + __u8 nd_sll[ETH_ALEN]; + __u8 nd_tll[ETH_ALEN]; }; /** @@ -441,6 +457,8 @@ enum ovs_flow_attr { OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */ OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */ OVS_FLOW_ATTR_MASK, /* Sequence of OVS_KEY_ATTR_* attributes. */ + OVS_FLOW_ATTR_PROBE, /* Flow operation is a feature probe, error + * logging should be suppressed. */ __OVS_FLOW_ATTR_MAX }; @@ -473,17 +491,34 @@ enum ovs_sample_attr { * message should be sent. Required. * @OVS_USERSPACE_ATTR_USERDATA: If present, its variable-length argument is * copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA. + * @OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: If present, u32 output port to get + * tunnel info. */ enum ovs_userspace_attr { OVS_USERSPACE_ATTR_UNSPEC, OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */ OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */ + OVS_USERSPACE_ATTR_EGRESS_TUN_PORT, /* Optional, u32 output port + * to get tunnel info. */ __OVS_USERSPACE_ATTR_MAX }; #define OVS_USERSPACE_ATTR_MAX (__OVS_USERSPACE_ATTR_MAX - 1) /** + * struct ovs_action_push_mpls - %OVS_ACTION_ATTR_PUSH_MPLS action argument. + * @mpls_lse: MPLS label stack entry to push. + * @mpls_ethertype: Ethertype to set in the encapsulating ethernet frame. + * + * The only values @mpls_ethertype should ever be given are %ETH_P_MPLS_UC and + * %ETH_P_MPLS_MC, indicating MPLS unicast or multicast. Other are rejected. + */ +struct ovs_action_push_mpls { + __be32 mpls_lse; + __be16 mpls_ethertype; /* Either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC */ +}; + +/** * struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument. * @vlan_tpid: Tag protocol identifier (TPID) to push. * @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set @@ -534,6 +569,15 @@ struct ovs_action_hash { * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet. * @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in * the nested %OVS_SAMPLE_ATTR_* attributes. + * @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the + * top of the packets MPLS label stack. Set the ethertype of the + * encapsulating frame to either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC to + * indicate the new packet contents. + * @OVS_ACTION_ATTR_POP_MPLS: Pop an MPLS label stack entry off of the + * packet's MPLS label stack. Set the encapsulating frame's ethertype to + * indicate the new packet contents. This could potentially still be + * %ETH_P_MPLS if the resulting MPLS label stack is not empty. If there + * is no MPLS label stack, as determined by ethertype, no action is taken. * * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all * fields within a header are modifiable, e.g. the IPv4 protocol and fragment @@ -550,6 +594,9 @@ enum ovs_action_attr { OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ OVS_ACTION_ATTR_RECIRC, /* u32 recirc_id. */ OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */ + OVS_ACTION_ATTR_PUSH_MPLS, /* struct ovs_action_push_mpls. */ + OVS_ACTION_ATTR_POP_MPLS, /* __be16 ethertype. */ + __OVS_ACTION_ATTR_MAX }; diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 9d845404d875..9b79abbd1ab8 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -137,8 +137,9 @@ enum perf_event_sample_format { PERF_SAMPLE_DATA_SRC = 1U << 15, PERF_SAMPLE_IDENTIFIER = 1U << 16, PERF_SAMPLE_TRANSACTION = 1U << 17, + PERF_SAMPLE_REGS_INTR = 1U << 18, - PERF_SAMPLE_MAX = 1U << 18, /* non-ABI */ + PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */ }; /* @@ -238,6 +239,7 @@ enum perf_event_read_format { #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */ /* add: sample_stack_user */ +#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */ /* * Hardware event_id to monitor via a performance monitoring event: @@ -334,6 +336,15 @@ struct perf_event_attr { /* Align to u64. */ __u32 __reserved_2; + /* + * Defines set of regs to dump for each sample + * state captured on: + * - precise = 0: PMU interrupt + * - precise > 0: sampled instruction + * + * See asm/perf_regs.h for details. + */ + __u64 sample_regs_intr; }; #define perf_flags(attr) (*(&(attr)->read_format + 1)) @@ -686,6 +697,8 @@ enum perf_event_type { * { u64 weight; } && PERF_SAMPLE_WEIGHT * { u64 data_src; } && PERF_SAMPLE_DATA_SRC * { u64 transaction; } && PERF_SAMPLE_TRANSACTION + * { u64 abi; # enum perf_sample_regs_abi + * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR * }; */ PERF_RECORD_SAMPLE = 9, diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 513df75d0fc9..89f63503f903 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -179,4 +179,10 @@ struct prctl_mm_map { #define PR_SET_THP_DISABLE 41 #define PR_GET_THP_DISABLE 42 +/* + * Tell the kernel to start/stop helping userspace manage bounds tables. + */ +#define PR_MPX_ENABLE_MANAGEMENT 43 +#define PR_MPX_DISABLE_MANAGEMENT 44 + #endif /* _LINUX_PRCTL_H */ diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index eb0f1a554d7b..9c9b8b4480cd 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -235,6 +235,7 @@ enum { #define RTPROT_NTK 15 /* Netsukuku */ #define RTPROT_DHCP 16 /* DHCP client */ #define RTPROT_MROUTED 17 /* Multicast daemon */ +#define RTPROT_BABEL 42 /* Babel daemon */ /* rtm_scope diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h index b932be9f5c5b..cc89ddefa926 100644 --- a/include/uapi/linux/sched.h +++ b/include/uapi/linux/sched.h @@ -23,8 +23,8 @@ #define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ /* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state) and is now available for re-use. */ -#define CLONE_NEWUTS 0x04000000 /* New utsname group? */ -#define CLONE_NEWIPC 0x08000000 /* New ipcs */ +#define CLONE_NEWUTS 0x04000000 /* New utsname namespace */ +#define CLONE_NEWIPC 0x08000000 /* New ipc namespace */ #define CLONE_NEWUSER 0x10000000 /* New user namespace */ #define CLONE_NEWPID 0x20000000 /* New pid namespace */ #define CLONE_NEWNET 0x40000000 /* New network namespace */ diff --git a/include/uapi/linux/sem.h b/include/uapi/linux/sem.h index 541fce03b50c..dd73b908b2f3 100644 --- a/include/uapi/linux/sem.h +++ b/include/uapi/linux/sem.h @@ -63,10 +63,22 @@ struct seminfo { int semaem; }; -#define SEMMNI 128 /* <= IPCMNI max # of semaphore identifiers */ -#define SEMMSL 250 /* <= 8 000 max num of semaphores per id */ +/* + * SEMMNI, SEMMSL and SEMMNS are default values which can be + * modified by sysctl. + * The values has been chosen to be larger than necessary for any + * known configuration. + * + * SEMOPM should not be increased beyond 1000, otherwise there is the + * risk that semop()/semtimedop() fails due to kernel memory fragmentation when + * allocating the sop array. + */ + + +#define SEMMNI 32000 /* <= IPCMNI max # of semaphore identifiers */ +#define SEMMSL 32000 /* <= INT_MAX max num of semaphores per id */ #define SEMMNS (SEMMNI*SEMMSL) /* <= INT_MAX max # of semaphores in system */ -#define SEMOPM 32 /* <= 1 000 max num of ops per semop call */ +#define SEMOPM 500 /* <= 1 000 max num of ops per semop call */ #define SEMVMX 32767 /* <= 32767 semaphore maximum value */ #define SEMAEM SEMVMX /* adjust on exit max value */ diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index df40137f33dd..b22224100011 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -156,6 +156,7 @@ enum UDP_MIB_RCVBUFERRORS, /* RcvbufErrors */ UDP_MIB_SNDBUFERRORS, /* SndbufErrors */ UDP_MIB_CSUMERRORS, /* InCsumErrors */ + UDP_MIB_IGNOREDMULTI, /* IgnoredMulti */ __UDP_MIB_MAX }; @@ -265,6 +266,10 @@ enum LINUX_MIB_TCPWANTZEROWINDOWADV, /* TCPWantZeroWindowAdv */ LINUX_MIB_TCPSYNRETRANS, /* TCPSynRetrans */ LINUX_MIB_TCPORIGDATASENT, /* TCPOrigDataSent */ + LINUX_MIB_TCPHYSTARTTRAINDETECT, /* TCPHystartTrainDetect */ + LINUX_MIB_TCPHYSTARTTRAINCWND, /* TCPHystartTrainCwnd */ + LINUX_MIB_TCPHYSTARTDELAYDETECT, /* TCPHystartDelayDetect */ + LINUX_MIB_TCPHYSTARTDELAYCWND, /* TCPHystartDelayCwnd */ __LINUX_MIB_MAX }; diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index 43aaba1cc037..0956373b56db 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -153,6 +153,7 @@ enum KERN_MAX_LOCK_DEPTH=74, /* int: rtmutex's maximum lock depth */ KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */ KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ + KERN_PANIC_ON_WARN=77, /* int: call panic() in WARN() functions */ }; diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild index 56f121605c99..b057da2b87a4 100644 --- a/include/uapi/linux/tc_act/Kbuild +++ b/include/uapi/linux/tc_act/Kbuild @@ -7,3 +7,4 @@ header-y += tc_mirred.h header-y += tc_nat.h header-y += tc_pedit.h header-y += tc_skbedit.h +header-y += tc_vlan.h diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h new file mode 100644 index 000000000000..f7b8d448b960 --- /dev/null +++ b/include/uapi/linux/tc_act/tc_vlan.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_TC_VLAN_H +#define __LINUX_TC_VLAN_H + +#include <linux/pkt_cls.h> + +#define TCA_ACT_VLAN 12 + +#define TCA_VLAN_ACT_POP 1 +#define TCA_VLAN_ACT_PUSH 2 + +struct tc_vlan { + tc_gen; + int v_action; +}; + +enum { + TCA_VLAN_UNSPEC, + TCA_VLAN_TM, + TCA_VLAN_PARMS, + TCA_VLAN_PUSH_VLAN_ID, + TCA_VLAN_PUSH_VLAN_PROTOCOL, + __TCA_VLAN_MAX, +}; +#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1) + +#endif diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h new file mode 100644 index 000000000000..8d723824ad69 --- /dev/null +++ b/include/uapi/linux/tipc_netlink.h @@ -0,0 +1,244 @@ +/* + * Copyright (c) 2014, Ericsson AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _LINUX_TIPC_NETLINK_H_ +#define _LINUX_TIPC_NETLINK_H_ + +#define TIPC_GENL_V2_NAME "TIPCv2" +#define TIPC_GENL_V2_VERSION 0x1 + +/* Netlink commands */ +enum { + TIPC_NL_UNSPEC, + TIPC_NL_LEGACY, + TIPC_NL_BEARER_DISABLE, + TIPC_NL_BEARER_ENABLE, + TIPC_NL_BEARER_GET, + TIPC_NL_BEARER_SET, + TIPC_NL_SOCK_GET, + TIPC_NL_PUBL_GET, + TIPC_NL_LINK_GET, + TIPC_NL_LINK_SET, + TIPC_NL_LINK_RESET_STATS, + TIPC_NL_MEDIA_GET, + TIPC_NL_MEDIA_SET, + TIPC_NL_NODE_GET, + TIPC_NL_NET_GET, + TIPC_NL_NET_SET, + TIPC_NL_NAME_TABLE_GET, + + __TIPC_NL_CMD_MAX, + TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1 +}; + +/* Top level netlink attributes */ +enum { + TIPC_NLA_UNSPEC, + TIPC_NLA_BEARER, /* nest */ + TIPC_NLA_SOCK, /* nest */ + TIPC_NLA_PUBL, /* nest */ + TIPC_NLA_LINK, /* nest */ + TIPC_NLA_MEDIA, /* nest */ + TIPC_NLA_NODE, /* nest */ + TIPC_NLA_NET, /* nest */ + TIPC_NLA_NAME_TABLE, /* nest */ + + __TIPC_NLA_MAX, + TIPC_NLA_MAX = __TIPC_NLA_MAX - 1 +}; + +/* Bearer info */ +enum { + TIPC_NLA_BEARER_UNSPEC, + TIPC_NLA_BEARER_NAME, /* string */ + TIPC_NLA_BEARER_PROP, /* nest */ + TIPC_NLA_BEARER_DOMAIN, /* u32 */ + + __TIPC_NLA_BEARER_MAX, + TIPC_NLA_BEARER_MAX = __TIPC_NLA_BEARER_MAX - 1 +}; + +/* Socket info */ +enum { + TIPC_NLA_SOCK_UNSPEC, + TIPC_NLA_SOCK_ADDR, /* u32 */ + TIPC_NLA_SOCK_REF, /* u32 */ + TIPC_NLA_SOCK_CON, /* nest */ + TIPC_NLA_SOCK_HAS_PUBL, /* flag */ + + __TIPC_NLA_SOCK_MAX, + TIPC_NLA_SOCK_MAX = __TIPC_NLA_SOCK_MAX - 1 +}; + +/* Link info */ +enum { + TIPC_NLA_LINK_UNSPEC, + TIPC_NLA_LINK_NAME, /* string */ + TIPC_NLA_LINK_DEST, /* u32 */ + TIPC_NLA_LINK_MTU, /* u32 */ + TIPC_NLA_LINK_BROADCAST, /* flag */ + TIPC_NLA_LINK_UP, /* flag */ + TIPC_NLA_LINK_ACTIVE, /* flag */ + TIPC_NLA_LINK_PROP, /* nest */ + TIPC_NLA_LINK_STATS, /* nest */ + TIPC_NLA_LINK_RX, /* u32 */ + TIPC_NLA_LINK_TX, /* u32 */ + + __TIPC_NLA_LINK_MAX, + TIPC_NLA_LINK_MAX = __TIPC_NLA_LINK_MAX - 1 +}; + +/* Media info */ +enum { + TIPC_NLA_MEDIA_UNSPEC, + TIPC_NLA_MEDIA_NAME, /* string */ + TIPC_NLA_MEDIA_PROP, /* nest */ + + __TIPC_NLA_MEDIA_MAX, + TIPC_NLA_MEDIA_MAX = __TIPC_NLA_MEDIA_MAX - 1 +}; + +/* Node info */ +enum { + TIPC_NLA_NODE_UNSPEC, + TIPC_NLA_NODE_ADDR, /* u32 */ + TIPC_NLA_NODE_UP, /* flag */ + + __TIPC_NLA_NODE_MAX, + TIPC_NLA_NODE_MAX = __TIPC_NLA_NODE_MAX - 1 +}; + +/* Net info */ +enum { + TIPC_NLA_NET_UNSPEC, + TIPC_NLA_NET_ID, /* u32 */ + TIPC_NLA_NET_ADDR, /* u32 */ + + __TIPC_NLA_NET_MAX, + TIPC_NLA_NET_MAX = __TIPC_NLA_NET_MAX - 1 +}; + +/* Name table info */ +enum { + TIPC_NLA_NAME_TABLE_UNSPEC, + TIPC_NLA_NAME_TABLE_PUBL, /* nest */ + + __TIPC_NLA_NAME_TABLE_MAX, + TIPC_NLA_NAME_TABLE_MAX = __TIPC_NLA_NAME_TABLE_MAX - 1 +}; + +/* Publication info */ +enum { + TIPC_NLA_PUBL_UNSPEC, + + TIPC_NLA_PUBL_TYPE, /* u32 */ + TIPC_NLA_PUBL_LOWER, /* u32 */ + TIPC_NLA_PUBL_UPPER, /* u32 */ + TIPC_NLA_PUBL_SCOPE, /* u32 */ + TIPC_NLA_PUBL_NODE, /* u32 */ + TIPC_NLA_PUBL_REF, /* u32 */ + TIPC_NLA_PUBL_KEY, /* u32 */ + + __TIPC_NLA_PUBL_MAX, + TIPC_NLA_PUBL_MAX = __TIPC_NLA_PUBL_MAX - 1 +}; + +/* Nest, connection info */ +enum { + TIPC_NLA_CON_UNSPEC, + + TIPC_NLA_CON_FLAG, /* flag */ + TIPC_NLA_CON_NODE, /* u32 */ + TIPC_NLA_CON_SOCK, /* u32 */ + TIPC_NLA_CON_TYPE, /* u32 */ + TIPC_NLA_CON_INST, /* u32 */ + + __TIPC_NLA_CON_MAX, + TIPC_NLA_CON_MAX = __TIPC_NLA_CON_MAX - 1 +}; + +/* Nest, link propreties. Valid for link, media and bearer */ +enum { + TIPC_NLA_PROP_UNSPEC, + + TIPC_NLA_PROP_PRIO, /* u32 */ + TIPC_NLA_PROP_TOL, /* u32 */ + TIPC_NLA_PROP_WIN, /* u32 */ + + __TIPC_NLA_PROP_MAX, + TIPC_NLA_PROP_MAX = __TIPC_NLA_PROP_MAX - 1 +}; + +/* Nest, statistics info */ +enum { + TIPC_NLA_STATS_UNSPEC, + + TIPC_NLA_STATS_RX_INFO, /* u32 */ + TIPC_NLA_STATS_RX_FRAGMENTS, /* u32 */ + TIPC_NLA_STATS_RX_FRAGMENTED, /* u32 */ + TIPC_NLA_STATS_RX_BUNDLES, /* u32 */ + TIPC_NLA_STATS_RX_BUNDLED, /* u32 */ + TIPC_NLA_STATS_TX_INFO, /* u32 */ + TIPC_NLA_STATS_TX_FRAGMENTS, /* u32 */ + TIPC_NLA_STATS_TX_FRAGMENTED, /* u32 */ + TIPC_NLA_STATS_TX_BUNDLES, /* u32 */ + TIPC_NLA_STATS_TX_BUNDLED, /* u32 */ + TIPC_NLA_STATS_MSG_PROF_TOT, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_CNT, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_TOT, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P0, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P1, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P2, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P3, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P4, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P5, /* u32 */ + TIPC_NLA_STATS_MSG_LEN_P6, /* u32 */ + TIPC_NLA_STATS_RX_STATES, /* u32 */ + TIPC_NLA_STATS_RX_PROBES, /* u32 */ + TIPC_NLA_STATS_RX_NACKS, /* u32 */ + TIPC_NLA_STATS_RX_DEFERRED, /* u32 */ + TIPC_NLA_STATS_TX_STATES, /* u32 */ + TIPC_NLA_STATS_TX_PROBES, /* u32 */ + TIPC_NLA_STATS_TX_NACKS, /* u32 */ + TIPC_NLA_STATS_TX_ACKS, /* u32 */ + TIPC_NLA_STATS_RETRANSMITTED, /* u32 */ + TIPC_NLA_STATS_DUPLICATES, /* u32 */ + TIPC_NLA_STATS_LINK_CONGS, /* u32 */ + TIPC_NLA_STATS_MAX_QUEUE, /* u32 */ + TIPC_NLA_STATS_AVG_QUEUE, /* u32 */ + + __TIPC_NLA_STATS_MAX, + TIPC_NLA_STATS_MAX = __TIPC_NLA_STATS_MAX - 1 +}; + +#endif diff --git a/include/uapi/linux/v4l2-common.h b/include/uapi/linux/v4l2-common.h index 2f6f8cafe773..15273987093e 100644 --- a/include/uapi/linux/v4l2-common.h +++ b/include/uapi/linux/v4l2-common.h @@ -43,6 +43,8 @@ #define V4L2_SEL_TGT_CROP_DEFAULT 0x0001 /* Cropping bounds */ #define V4L2_SEL_TGT_CROP_BOUNDS 0x0002 +/* Native frame size */ +#define V4L2_SEL_TGT_NATIVE_SIZE 0x0003 /* Current composing area */ #define V4L2_SEL_TGT_COMPOSE 0x0100 /* Default composing area */ diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h index 1445e858854f..5a86d8ede09c 100644 --- a/include/uapi/linux/v4l2-mediabus.h +++ b/include/uapi/linux/v4l2-mediabus.h @@ -11,122 +11,10 @@ #ifndef __LINUX_V4L2_MEDIABUS_H #define __LINUX_V4L2_MEDIABUS_H +#include <linux/media-bus-format.h> #include <linux/types.h> #include <linux/videodev2.h> -/* - * These pixel codes uniquely identify data formats on the media bus. Mostly - * they correspond to similarly named V4L2_PIX_FMT_* formats, format 0 is - * reserved, V4L2_MBUS_FMT_FIXED shall be used by host-client pairs, where the - * data format is fixed. Additionally, "2X8" means that one pixel is transferred - * in two 8-bit samples, "BE" or "LE" specify in which order those samples are - * transferred over the bus: "LE" means that the least significant bits are - * transferred first, "BE" means that the most significant bits are transferred - * first, and "PADHI" and "PADLO" define which bits - low or high, in the - * incomplete high byte, are filled with padding bits. - * - * The pixel codes are grouped by type, bus_width, bits per component, samples - * per pixel and order of subsamples. Numerical values are sorted using generic - * numerical sort order (8 thus comes before 10). - * - * As their value can't change when a new pixel code is inserted in the - * enumeration, the pixel codes are explicitly given a numerical value. The next - * free values for each category are listed below, update them when inserting - * new pixel codes. - */ -enum v4l2_mbus_pixelcode { - V4L2_MBUS_FMT_FIXED = 0x0001, - - /* RGB - next is 0x100e */ - V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE = 0x1001, - V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE = 0x1002, - V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE = 0x1003, - V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE = 0x1004, - V4L2_MBUS_FMT_BGR565_2X8_BE = 0x1005, - V4L2_MBUS_FMT_BGR565_2X8_LE = 0x1006, - V4L2_MBUS_FMT_RGB565_2X8_BE = 0x1007, - V4L2_MBUS_FMT_RGB565_2X8_LE = 0x1008, - V4L2_MBUS_FMT_RGB666_1X18 = 0x1009, - V4L2_MBUS_FMT_RGB888_1X24 = 0x100a, - V4L2_MBUS_FMT_RGB888_2X12_BE = 0x100b, - V4L2_MBUS_FMT_RGB888_2X12_LE = 0x100c, - V4L2_MBUS_FMT_ARGB8888_1X32 = 0x100d, - - /* YUV (including grey) - next is 0x2024 */ - V4L2_MBUS_FMT_Y8_1X8 = 0x2001, - V4L2_MBUS_FMT_UV8_1X8 = 0x2015, - V4L2_MBUS_FMT_UYVY8_1_5X8 = 0x2002, - V4L2_MBUS_FMT_VYUY8_1_5X8 = 0x2003, - V4L2_MBUS_FMT_YUYV8_1_5X8 = 0x2004, - V4L2_MBUS_FMT_YVYU8_1_5X8 = 0x2005, - V4L2_MBUS_FMT_UYVY8_2X8 = 0x2006, - V4L2_MBUS_FMT_VYUY8_2X8 = 0x2007, - V4L2_MBUS_FMT_YUYV8_2X8 = 0x2008, - V4L2_MBUS_FMT_YVYU8_2X8 = 0x2009, - V4L2_MBUS_FMT_Y10_1X10 = 0x200a, - V4L2_MBUS_FMT_UYVY10_2X10 = 0x2018, - V4L2_MBUS_FMT_VYUY10_2X10 = 0x2019, - V4L2_MBUS_FMT_YUYV10_2X10 = 0x200b, - V4L2_MBUS_FMT_YVYU10_2X10 = 0x200c, - V4L2_MBUS_FMT_Y12_1X12 = 0x2013, - V4L2_MBUS_FMT_UYVY8_1X16 = 0x200f, - V4L2_MBUS_FMT_VYUY8_1X16 = 0x2010, - V4L2_MBUS_FMT_YUYV8_1X16 = 0x2011, - V4L2_MBUS_FMT_YVYU8_1X16 = 0x2012, - V4L2_MBUS_FMT_YDYUYDYV8_1X16 = 0x2014, - V4L2_MBUS_FMT_UYVY10_1X20 = 0x201a, - V4L2_MBUS_FMT_VYUY10_1X20 = 0x201b, - V4L2_MBUS_FMT_YUYV10_1X20 = 0x200d, - V4L2_MBUS_FMT_YVYU10_1X20 = 0x200e, - V4L2_MBUS_FMT_YUV10_1X30 = 0x2016, - V4L2_MBUS_FMT_AYUV8_1X32 = 0x2017, - V4L2_MBUS_FMT_UYVY12_2X12 = 0x201c, - V4L2_MBUS_FMT_VYUY12_2X12 = 0x201d, - V4L2_MBUS_FMT_YUYV12_2X12 = 0x201e, - V4L2_MBUS_FMT_YVYU12_2X12 = 0x201f, - V4L2_MBUS_FMT_UYVY12_1X24 = 0x2020, - V4L2_MBUS_FMT_VYUY12_1X24 = 0x2021, - V4L2_MBUS_FMT_YUYV12_1X24 = 0x2022, - V4L2_MBUS_FMT_YVYU12_1X24 = 0x2023, - - /* Bayer - next is 0x3019 */ - V4L2_MBUS_FMT_SBGGR8_1X8 = 0x3001, - V4L2_MBUS_FMT_SGBRG8_1X8 = 0x3013, - V4L2_MBUS_FMT_SGRBG8_1X8 = 0x3002, - V4L2_MBUS_FMT_SRGGB8_1X8 = 0x3014, - V4L2_MBUS_FMT_SBGGR10_ALAW8_1X8 = 0x3015, - V4L2_MBUS_FMT_SGBRG10_ALAW8_1X8 = 0x3016, - V4L2_MBUS_FMT_SGRBG10_ALAW8_1X8 = 0x3017, - V4L2_MBUS_FMT_SRGGB10_ALAW8_1X8 = 0x3018, - V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8 = 0x300b, - V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8 = 0x300c, - V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8 = 0x3009, - V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8 = 0x300d, - V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_BE = 0x3003, - V4L2_MBUS_FMT_SBGGR10_2X8_PADHI_LE = 0x3004, - V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_BE = 0x3005, - V4L2_MBUS_FMT_SBGGR10_2X8_PADLO_LE = 0x3006, - V4L2_MBUS_FMT_SBGGR10_1X10 = 0x3007, - V4L2_MBUS_FMT_SGBRG10_1X10 = 0x300e, - V4L2_MBUS_FMT_SGRBG10_1X10 = 0x300a, - V4L2_MBUS_FMT_SRGGB10_1X10 = 0x300f, - V4L2_MBUS_FMT_SBGGR12_1X12 = 0x3008, - V4L2_MBUS_FMT_SGBRG12_1X12 = 0x3010, - V4L2_MBUS_FMT_SGRBG12_1X12 = 0x3011, - V4L2_MBUS_FMT_SRGGB12_1X12 = 0x3012, - - /* JPEG compressed formats - next is 0x4002 */ - V4L2_MBUS_FMT_JPEG_1X8 = 0x4001, - - /* Vendor specific formats - next is 0x5002 */ - - /* S5C73M3 sensor specific interleaved UYVY and JPEG */ - V4L2_MBUS_FMT_S5C_UYVY_JPEG_1X8 = 0x5001, - - /* HSV - next is 0x6002 */ - V4L2_MBUS_FMT_AHSV8888_1X32 = 0x6001, -}; - /** * struct v4l2_mbus_framefmt - frame format on the media bus * @width: frame width @@ -134,6 +22,8 @@ enum v4l2_mbus_pixelcode { * @code: data format code (from enum v4l2_mbus_pixelcode) * @field: used interlacing type (from enum v4l2_field) * @colorspace: colorspace of the data (from enum v4l2_colorspace) + * @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding) + * @quantization: quantization of the data (from enum v4l2_quantization) */ struct v4l2_mbus_framefmt { __u32 width; @@ -141,7 +31,108 @@ struct v4l2_mbus_framefmt { __u32 code; __u32 field; __u32 colorspace; - __u32 reserved[7]; + __u32 ycbcr_enc; + __u32 quantization; + __u32 reserved[5]; +}; + +#ifndef __KERNEL__ +/* + * enum v4l2_mbus_pixelcode and its definitions are now deprecated, and + * MEDIA_BUS_FMT_ definitions (defined in media-bus-format.h) should be + * used instead. + * + * New defines should only be added to media-bus-format.h. The + * v4l2_mbus_pixelcode enum is frozen. + */ + +#define V4L2_MBUS_FROM_MEDIA_BUS_FMT(name) \ + V4L2_MBUS_FMT_ ## name = MEDIA_BUS_FMT_ ## name + +enum v4l2_mbus_pixelcode { + V4L2_MBUS_FROM_MEDIA_BUS_FMT(FIXED), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB444_2X8_PADHI_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB444_2X8_PADHI_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB555_2X8_PADHI_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB555_2X8_PADHI_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(BGR565_2X8_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(BGR565_2X8_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB565_2X8_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB565_2X8_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB666_1X18), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_1X24), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(ARGB8888_1X32), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UV8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_1_5X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_1_5X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_1_5X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_1_5X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_2X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_2X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_2X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_2X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY10_2X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY10_2X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV10_2X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU10_2X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y12_1X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YDYUYDYV8_1X16), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY10_1X20), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY10_1X20), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV10_1X20), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU10_1X20), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUV10_1X30), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(AYUV8_1X32), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY12_2X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_2X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_2X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_2X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY12_1X24), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_1X24), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_1X24), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_1X24), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_ALAW8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_ALAW8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_ALAW8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_ALAW8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_DPCM8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_DPCM8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_DPCM8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_DPCM8_1X8), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADHI_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADHI_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADLO_BE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADLO_LE), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_1X10), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR12_1X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG12_1X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG12_1X12), + V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB12_1X12), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(JPEG_1X8), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(S5C_UYVY_JPEG_1X8), + + V4L2_MBUS_FROM_MEDIA_BUS_FMT(AHSV8888_1X32), }; +#endif /* __KERNEL__ */ #endif diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h index a619cdd300ac..e0a7e3da498a 100644 --- a/include/uapi/linux/v4l2-subdev.h +++ b/include/uapi/linux/v4l2-subdev.h @@ -68,7 +68,7 @@ struct v4l2_subdev_crop { * struct v4l2_subdev_mbus_code_enum - Media bus format enumeration * @pad: pad number, as reported by the media API * @index: format index during enumeration - * @code: format code (from enum v4l2_mbus_pixelcode) + * @code: format code (MEDIA_BUS_FMT_ definitions) */ struct v4l2_subdev_mbus_code_enum { __u32 pad; @@ -81,7 +81,7 @@ struct v4l2_subdev_mbus_code_enum { * struct v4l2_subdev_frame_size_enum - Media bus format enumeration * @pad: pad number, as reported by the media API * @index: format index during enumeration - * @code: format code (from enum v4l2_mbus_pixelcode) + * @code: format code (MEDIA_BUS_FMT_ definitions) */ struct v4l2_subdev_frame_size_enum { __u32 index; @@ -109,7 +109,7 @@ struct v4l2_subdev_frame_interval { * struct v4l2_subdev_frame_interval_enum - Frame interval enumeration * @pad: pad number, as reported by the media API * @index: frame interval index during enumeration - * @code: format code (from enum v4l2_mbus_pixelcode) + * @code: format code (MEDIA_BUS_FMT_ definitions) * @width: frame width in pixels * @height: frame height in pixels * @interval: frame interval in seconds diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 1c2f84fd4d99..d279c1b75cf7 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -178,30 +178,103 @@ enum v4l2_memory { /* see also http://vektor.theorem.ca/graphics/ycbcr/ */ enum v4l2_colorspace { - /* ITU-R 601 -- broadcast NTSC/PAL */ + /* SMPTE 170M: used for broadcast NTSC/PAL SDTV */ V4L2_COLORSPACE_SMPTE170M = 1, - /* 1125-Line (US) HDTV */ + /* Obsolete pre-1998 SMPTE 240M HDTV standard, superseded by Rec 709 */ V4L2_COLORSPACE_SMPTE240M = 2, - /* HD and modern captures. */ + /* Rec.709: used for HDTV */ V4L2_COLORSPACE_REC709 = 3, - /* broken BT878 extents (601, luma range 16-253 instead of 16-235) */ + /* + * Deprecated, do not use. No driver will ever return this. This was + * based on a misunderstanding of the bt878 datasheet. + */ V4L2_COLORSPACE_BT878 = 4, - /* These should be useful. Assume 601 extents. */ + /* + * NTSC 1953 colorspace. This only makes sense when dealing with + * really, really old NTSC recordings. Superseded by SMPTE 170M. + */ V4L2_COLORSPACE_470_SYSTEM_M = 5, + + /* + * EBU Tech 3213 PAL/SECAM colorspace. This only makes sense when + * dealing with really old PAL/SECAM recordings. Superseded by + * SMPTE 170M. + */ V4L2_COLORSPACE_470_SYSTEM_BG = 6, - /* I know there will be cameras that send this. So, this is - * unspecified chromaticities and full 0-255 on each of the - * Y'CbCr components + /* + * Effectively shorthand for V4L2_COLORSPACE_SRGB, V4L2_YCBCR_ENC_601 + * and V4L2_QUANTIZATION_FULL_RANGE. To be used for (Motion-)JPEG. */ V4L2_COLORSPACE_JPEG = 7, - /* For RGB colourspaces, this is probably a good start. */ + /* For RGB colorspaces such as produces by most webcams. */ V4L2_COLORSPACE_SRGB = 8, + + /* AdobeRGB colorspace */ + V4L2_COLORSPACE_ADOBERGB = 9, + + /* BT.2020 colorspace, used for UHDTV. */ + V4L2_COLORSPACE_BT2020 = 10, +}; + +enum v4l2_ycbcr_encoding { + /* + * Mapping of V4L2_YCBCR_ENC_DEFAULT to actual encodings for the + * various colorspaces: + * + * V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M, + * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_ADOBERGB and + * V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601 + * + * V4L2_COLORSPACE_REC709: V4L2_YCBCR_ENC_709 + * + * V4L2_COLORSPACE_SRGB: V4L2_YCBCR_ENC_SYCC + * + * V4L2_COLORSPACE_BT2020: V4L2_YCBCR_ENC_BT2020 + * + * V4L2_COLORSPACE_SMPTE240M: V4L2_YCBCR_ENC_SMPTE240M + */ + V4L2_YCBCR_ENC_DEFAULT = 0, + + /* ITU-R 601 -- SDTV */ + V4L2_YCBCR_ENC_601 = 1, + + /* Rec. 709 -- HDTV */ + V4L2_YCBCR_ENC_709 = 2, + + /* ITU-R 601/EN 61966-2-4 Extended Gamut -- SDTV */ + V4L2_YCBCR_ENC_XV601 = 3, + + /* Rec. 709/EN 61966-2-4 Extended Gamut -- HDTV */ + V4L2_YCBCR_ENC_XV709 = 4, + + /* sYCC (Y'CbCr encoding of sRGB) */ + V4L2_YCBCR_ENC_SYCC = 5, + + /* BT.2020 Non-constant Luminance Y'CbCr */ + V4L2_YCBCR_ENC_BT2020 = 6, + + /* BT.2020 Constant Luminance Y'CbcCrc */ + V4L2_YCBCR_ENC_BT2020_CONST_LUM = 7, + + /* SMPTE 240M -- Obsolete HDTV */ + V4L2_YCBCR_ENC_SMPTE240M = 8, +}; + +enum v4l2_quantization { + /* + * The default for R'G'B' quantization is always full range. For + * Y'CbCr the quantization is always limited range, except for + * SYCC, XV601, XV709 or JPEG: those are full range. + */ + V4L2_QUANTIZATION_DEFAULT = 0, + V4L2_QUANTIZATION_FULL_RANGE = 1, + V4L2_QUANTIZATION_LIM_RANGE = 2, }; enum v4l2_priority { @@ -294,6 +367,8 @@ struct v4l2_pix_format { __u32 colorspace; /* enum v4l2_colorspace */ __u32 priv; /* private data, depends on pixelformat */ __u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */ + __u32 ycbcr_enc; /* enum v4l2_ycbcr_encoding */ + __u32 quantization; /* enum v4l2_quantization */ }; /* Pixel format FOURCC depth Description */ @@ -1249,6 +1324,7 @@ struct v4l2_input { #define V4L2_IN_CAP_DV_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */ #define V4L2_IN_CAP_CUSTOM_TIMINGS V4L2_IN_CAP_DV_TIMINGS /* For compatibility */ #define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */ +#define V4L2_IN_CAP_NATIVE_SIZE 0x00000008 /* Supports setting native size */ /* * V I D E O O U T P U T S @@ -1272,6 +1348,7 @@ struct v4l2_output { #define V4L2_OUT_CAP_DV_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */ #define V4L2_OUT_CAP_CUSTOM_TIMINGS V4L2_OUT_CAP_DV_TIMINGS /* For compatibility */ #define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */ +#define V4L2_OUT_CAP_NATIVE_SIZE 0x00000008 /* Supports setting native size */ /* * C O N T R O L S @@ -1777,6 +1854,8 @@ struct v4l2_plane_pix_format { * @plane_fmt: per-plane information * @num_planes: number of planes for this format * @flags: format flags (V4L2_PIX_FMT_FLAG_*) + * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding + * @quantization: enum v4l2_quantization, colorspace quantization */ struct v4l2_pix_format_mplane { __u32 width; @@ -1788,7 +1867,9 @@ struct v4l2_pix_format_mplane { struct v4l2_plane_pix_format plane_fmt[VIDEO_MAX_PLANES]; __u8 num_planes; __u8 flags; - __u8 reserved[10]; + __u8 ycbcr_enc; + __u8 quantization; + __u8 reserved[8]; } __attribute__ ((packed)); /** diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h index 9ad67b267584..247c8ba8544a 100644 --- a/include/uapi/linux/virtio_blk.h +++ b/include/uapi/linux/virtio_blk.h @@ -28,6 +28,7 @@ #include <linux/types.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> +#include <linux/virtio_types.h> /* Feature bits */ #define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */ @@ -114,18 +115,18 @@ struct virtio_blk_config { /* This is the first element of the read scatter-gather list. */ struct virtio_blk_outhdr { /* VIRTIO_BLK_T* */ - __u32 type; + __virtio32 type; /* io priority. */ - __u32 ioprio; + __virtio32 ioprio; /* Sector (ie. 512 byte offset) */ - __u64 sector; + __virtio64 sector; }; struct virtio_scsi_inhdr { - __u32 errors; - __u32 data_len; - __u32 sense_len; - __u32 residual; + __virtio32 errors; + __virtio32 data_len; + __virtio32 sense_len; + __virtio32 residual; }; /* And this is the final byte of the write scatter-gather list. */ diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index 3ce768c6910d..a6d0cdeaacd4 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -38,14 +38,16 @@ #define VIRTIO_CONFIG_S_DRIVER 2 /* Driver has used its parts of the config, and is happy */ #define VIRTIO_CONFIG_S_DRIVER_OK 4 +/* Driver has finished configuring features */ +#define VIRTIO_CONFIG_S_FEATURES_OK 8 /* We've given up on this device. */ #define VIRTIO_CONFIG_S_FAILED 0x80 -/* Some virtio feature bits (currently bits 28 through 31) are reserved for the +/* Some virtio feature bits (currently bits 28 through 32) are reserved for the * transport being used (eg. virtio_ring), the rest are per-device feature * bits. */ #define VIRTIO_TRANSPORT_F_START 28 -#define VIRTIO_TRANSPORT_F_END 32 +#define VIRTIO_TRANSPORT_F_END 33 /* Do we get callbacks when the ring is completely used, even if we've * suppressed them? */ @@ -54,4 +56,7 @@ /* Can the device handle any descriptor layout? */ #define VIRTIO_F_ANY_LAYOUT 27 +/* v1.0 compliant. */ +#define VIRTIO_F_VERSION_1 32 + #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ diff --git a/include/uapi/linux/virtio_console.h b/include/uapi/linux/virtio_console.h index ba260dd0b33a..b7fb108c9310 100644 --- a/include/uapi/linux/virtio_console.h +++ b/include/uapi/linux/virtio_console.h @@ -32,6 +32,7 @@ #ifndef _UAPI_LINUX_VIRTIO_CONSOLE_H #define _UAPI_LINUX_VIRTIO_CONSOLE_H #include <linux/types.h> +#include <linux/virtio_types.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> @@ -58,9 +59,9 @@ struct virtio_console_config { * particular port. */ struct virtio_console_control { - __u32 id; /* Port number */ - __u16 event; /* The kind of control event (see below) */ - __u16 value; /* Extra information for the key */ + __virtio32 id; /* Port number */ + __virtio16 event; /* The kind of control event (see below) */ + __virtio16 value; /* Extra information for the key */ }; /* Some events for control messages */ diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index 172a7f00780c..b5f1677b291c 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -28,6 +28,7 @@ #include <linux/types.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> +#include <linux/virtio_types.h> #include <linux/if_ether.h> /* The feature bitmap for virtio net */ @@ -84,17 +85,17 @@ struct virtio_net_hdr { #define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP #define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set __u8 gso_type; - __u16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */ - __u16 gso_size; /* Bytes to append to hdr_len per frame */ - __u16 csum_start; /* Position to start checksumming from */ - __u16 csum_offset; /* Offset after that to place checksum */ + __virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */ + __virtio16 gso_size; /* Bytes to append to hdr_len per frame */ + __virtio16 csum_start; /* Position to start checksumming from */ + __virtio16 csum_offset; /* Offset after that to place checksum */ }; /* This is the version of the header to use when the MRG_RXBUF * feature has been negotiated. */ struct virtio_net_hdr_mrg_rxbuf { struct virtio_net_hdr hdr; - __u16 num_buffers; /* Number of merged rx buffers */ + __virtio16 num_buffers; /* Number of merged rx buffers */ }; /* @@ -149,7 +150,7 @@ typedef __u8 virtio_net_ctrl_ack; * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available. */ struct virtio_net_ctrl_mac { - __u32 entries; + __virtio32 entries; __u8 macs[][ETH_ALEN]; } __attribute__((packed)); @@ -193,7 +194,7 @@ struct virtio_net_ctrl_mac { * specified. */ struct virtio_net_ctrl_mq { - __u16 virtqueue_pairs; + __virtio16 virtqueue_pairs; }; #define VIRTIO_NET_CTRL_MQ 4 diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index a99f9b7caa67..61c818a7fe70 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h @@ -32,6 +32,7 @@ * * Copyright Rusty Russell IBM Corporation 2007. */ #include <linux/types.h> +#include <linux/virtio_types.h> /* This marks a buffer as continuing via the next field. */ #define VRING_DESC_F_NEXT 1 @@ -61,32 +62,32 @@ /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ struct vring_desc { /* Address (guest-physical). */ - __u64 addr; + __virtio64 addr; /* Length. */ - __u32 len; + __virtio32 len; /* The flags as indicated above. */ - __u16 flags; + __virtio16 flags; /* We chain unused descriptors via this, too */ - __u16 next; + __virtio16 next; }; struct vring_avail { - __u16 flags; - __u16 idx; - __u16 ring[]; + __virtio16 flags; + __virtio16 idx; + __virtio16 ring[]; }; /* u32 is used here for ids for padding reasons. */ struct vring_used_elem { /* Index of start of used descriptor chain. */ - __u32 id; + __virtio32 id; /* Total length of the descriptor chain which was used (written to) */ - __u32 len; + __virtio32 len; }; struct vring_used { - __u16 flags; - __u16 idx; + __virtio16 flags; + __virtio16 idx; struct vring_used_elem ring[]; }; @@ -109,25 +110,25 @@ struct vring { * struct vring_desc desc[num]; * * // A ring of available descriptor heads with free-running index. - * __u16 avail_flags; - * __u16 avail_idx; - * __u16 available[num]; - * __u16 used_event_idx; + * __virtio16 avail_flags; + * __virtio16 avail_idx; + * __virtio16 available[num]; + * __virtio16 used_event_idx; * * // Padding to the next align boundary. * char pad[]; * * // A ring of used descriptor heads with free-running index. - * __u16 used_flags; - * __u16 used_idx; + * __virtio16 used_flags; + * __virtio16 used_idx; * struct vring_used_elem used[num]; - * __u16 avail_event_idx; + * __virtio16 avail_event_idx; * }; */ /* We publish the used event index at the end of the available ring, and vice * versa. They are at the end for backwards compatibility. */ #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) -#define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num]) +#define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num]) static inline void vring_init(struct vring *vr, unsigned int num, void *p, unsigned long align) @@ -135,15 +136,15 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p, vr->num = num; vr->desc = p; vr->avail = p + num*sizeof(struct vring_desc); - vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__u16) + vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16) + align-1) & ~(align - 1)); } static inline unsigned vring_size(unsigned int num, unsigned long align) { - return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (3 + num) + return ((sizeof(struct vring_desc) * num + sizeof(__virtio16) * (3 + num) + align - 1) & ~(align - 1)) - + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num; + + sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num; } /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ diff --git a/include/linux/virtio_scsi.h b/include/uapi/linux/virtio_scsi.h index de429d1f4357..42b9370771b0 100644 --- a/include/linux/virtio_scsi.h +++ b/include/uapi/linux/virtio_scsi.h @@ -27,83 +27,85 @@ #ifndef _LINUX_VIRTIO_SCSI_H #define _LINUX_VIRTIO_SCSI_H +#include <linux/virtio_types.h> + #define VIRTIO_SCSI_CDB_SIZE 32 #define VIRTIO_SCSI_SENSE_SIZE 96 /* SCSI command request, followed by data-out */ struct virtio_scsi_cmd_req { - u8 lun[8]; /* Logical Unit Number */ - u64 tag; /* Command identifier */ - u8 task_attr; /* Task attribute */ - u8 prio; /* SAM command priority field */ - u8 crn; - u8 cdb[VIRTIO_SCSI_CDB_SIZE]; -} __packed; + __u8 lun[8]; /* Logical Unit Number */ + __virtio64 tag; /* Command identifier */ + __u8 task_attr; /* Task attribute */ + __u8 prio; /* SAM command priority field */ + __u8 crn; + __u8 cdb[VIRTIO_SCSI_CDB_SIZE]; +} __attribute__((packed)); /* SCSI command request, followed by protection information */ struct virtio_scsi_cmd_req_pi { - u8 lun[8]; /* Logical Unit Number */ - u64 tag; /* Command identifier */ - u8 task_attr; /* Task attribute */ - u8 prio; /* SAM command priority field */ - u8 crn; - u32 pi_bytesout; /* DataOUT PI Number of bytes */ - u32 pi_bytesin; /* DataIN PI Number of bytes */ - u8 cdb[VIRTIO_SCSI_CDB_SIZE]; -} __packed; + __u8 lun[8]; /* Logical Unit Number */ + __virtio64 tag; /* Command identifier */ + __u8 task_attr; /* Task attribute */ + __u8 prio; /* SAM command priority field */ + __u8 crn; + __virtio32 pi_bytesout; /* DataOUT PI Number of bytes */ + __virtio32 pi_bytesin; /* DataIN PI Number of bytes */ + __u8 cdb[VIRTIO_SCSI_CDB_SIZE]; +} __attribute__((packed)); /* Response, followed by sense data and data-in */ struct virtio_scsi_cmd_resp { - u32 sense_len; /* Sense data length */ - u32 resid; /* Residual bytes in data buffer */ - u16 status_qualifier; /* Status qualifier */ - u8 status; /* Command completion status */ - u8 response; /* Response values */ - u8 sense[VIRTIO_SCSI_SENSE_SIZE]; -} __packed; + __virtio32 sense_len; /* Sense data length */ + __virtio32 resid; /* Residual bytes in data buffer */ + __virtio16 status_qualifier; /* Status qualifier */ + __u8 status; /* Command completion status */ + __u8 response; /* Response values */ + __u8 sense[VIRTIO_SCSI_SENSE_SIZE]; +} __attribute__((packed)); /* Task Management Request */ struct virtio_scsi_ctrl_tmf_req { - u32 type; - u32 subtype; - u8 lun[8]; - u64 tag; -} __packed; + __virtio32 type; + __virtio32 subtype; + __u8 lun[8]; + __virtio64 tag; +} __attribute__((packed)); struct virtio_scsi_ctrl_tmf_resp { - u8 response; -} __packed; + __u8 response; +} __attribute__((packed)); /* Asynchronous notification query/subscription */ struct virtio_scsi_ctrl_an_req { - u32 type; - u8 lun[8]; - u32 event_requested; -} __packed; + __virtio32 type; + __u8 lun[8]; + __virtio32 event_requested; +} __attribute__((packed)); struct virtio_scsi_ctrl_an_resp { - u32 event_actual; - u8 response; -} __packed; + __virtio32 event_actual; + __u8 response; +} __attribute__((packed)); struct virtio_scsi_event { - u32 event; - u8 lun[8]; - u32 reason; -} __packed; + __virtio32 event; + __u8 lun[8]; + __virtio32 reason; +} __attribute__((packed)); struct virtio_scsi_config { - u32 num_queues; - u32 seg_max; - u32 max_sectors; - u32 cmd_per_lun; - u32 event_info_size; - u32 sense_size; - u32 cdb_size; - u16 max_channel; - u16 max_target; - u32 max_lun; -} __packed; + __u32 num_queues; + __u32 seg_max; + __u32 max_sectors; + __u32 cmd_per_lun; + __u32 event_info_size; + __u32 sense_size; + __u32 cdb_size; + __u16 max_channel; + __u16 max_target; + __u32 max_lun; +} __attribute__((packed)); /* Feature Bits */ #define VIRTIO_SCSI_F_INOUT 0 diff --git a/include/uapi/linux/virtio_types.h b/include/uapi/linux/virtio_types.h new file mode 100644 index 000000000000..e845e8c4cbee --- /dev/null +++ b/include/uapi/linux/virtio_types.h @@ -0,0 +1,46 @@ +#ifndef _UAPI_LINUX_VIRTIO_TYPES_H +#define _UAPI_LINUX_VIRTIO_TYPES_H +/* Type definitions for virtio implementations. + * + * This header is BSD licensed so anyone can use the definitions to implement + * compatible drivers/servers. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Copyright (C) 2014 Red Hat, Inc. + * Author: Michael S. Tsirkin <mst@redhat.com> + */ +#include <linux/types.h> + +/* + * __virtio{16,32,64} have the following meaning: + * - __u{16,32,64} for virtio devices in legacy mode, accessed in native endian + * - __le{16,32,64} for standard-compliant virtio devices + */ + +typedef __u16 __bitwise__ __virtio16; +typedef __u32 __bitwise__ __virtio32; +typedef __u64 __bitwise__ __virtio64; + +#endif /* _UAPI_LINUX_VIRTIO_TYPES_H */ diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index 6ee586728df9..1f23cd635957 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -96,9 +96,10 @@ enum { SNDRV_HWDEP_IFACE_FW_DICE, /* TC DICE FireWire device */ SNDRV_HWDEP_IFACE_FW_FIREWORKS, /* Echo Audio Fireworks based device */ SNDRV_HWDEP_IFACE_FW_BEBOB, /* BridgeCo BeBoB based device */ + SNDRV_HWDEP_IFACE_FW_OXFW, /* Oxford OXFW970/971 based device */ /* Don't forget to change the following: */ - SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_BEBOB + SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_OXFW }; struct snd_hwdep_info { @@ -220,7 +221,9 @@ typedef int __bitwise snd_pcm_format_t; #define SNDRV_PCM_FORMAT_DSD_U8 ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */ #define SNDRV_PCM_FORMAT_DSD_U16_LE ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */ #define SNDRV_PCM_FORMAT_DSD_U32_LE ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */ -#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_LE +#define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */ +#define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */ +#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE #ifdef SNDRV_LITTLE_ENDIAN #define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h index 1964026b5e09..22ed8cb7800b 100644 --- a/include/uapi/sound/compress_offload.h +++ b/include/uapi/sound/compress_offload.h @@ -32,7 +32,7 @@ #define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2) /** - * struct snd_compressed_buffer: compressed buffer + * struct snd_compressed_buffer - compressed buffer * @fragment_size: size of buffer fragment in bytes * @fragments: number of such fragments */ @@ -42,7 +42,7 @@ struct snd_compressed_buffer { } __attribute__((packed, aligned(4))); /** - * struct snd_compr_params: compressed stream params + * struct snd_compr_params - compressed stream params * @buffer: buffer description * @codec: codec parameters * @no_wake_mode: dont wake on fragment elapsed @@ -54,7 +54,7 @@ struct snd_compr_params { } __attribute__((packed, aligned(4))); /** - * struct snd_compr_tstamp: timestamp descriptor + * struct snd_compr_tstamp - timestamp descriptor * @byte_offset: Byte offset in ring buffer to DSP * @copied_total: Total number of bytes copied from/to ring buffer to/by DSP * @pcm_frames: Frames decoded or encoded by DSP. This field will evolve by @@ -73,7 +73,7 @@ struct snd_compr_tstamp { } __attribute__((packed, aligned(4))); /** - * struct snd_compr_avail: avail descriptor + * struct snd_compr_avail - avail descriptor * @avail: Number of bytes available in ring buffer for writing/reading * @tstamp: timestamp infomation */ @@ -88,7 +88,7 @@ enum snd_compr_direction { }; /** - * struct snd_compr_caps: caps descriptor + * struct snd_compr_caps - caps descriptor * @codecs: pointer to array of codecs * @direction: direction supported. Of type snd_compr_direction * @min_fragment_size: minimum fragment supported by DSP @@ -110,7 +110,7 @@ struct snd_compr_caps { } __attribute__((packed, aligned(4))); /** - * struct snd_compr_codec_caps: query capability of codec + * struct snd_compr_codec_caps - query capability of codec * @codec: codec for which capability is queried * @num_descriptors: number of codec descriptors * @descriptor: array of codec capability descriptor @@ -122,18 +122,19 @@ struct snd_compr_codec_caps { } __attribute__((packed, aligned(4))); /** + * enum sndrv_compress_encoder * @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the * end of the track * @SNDRV_COMPRESS_ENCODER_DELAY: no of samples inserted by the encoder at the * beginning of the track */ -enum { +enum sndrv_compress_encoder { SNDRV_COMPRESS_ENCODER_PADDING = 1, SNDRV_COMPRESS_ENCODER_DELAY = 2, }; /** - * struct snd_compr_metadata: compressed stream metadata + * struct snd_compr_metadata - compressed stream metadata * @key: key id * @value: key value */ diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h index af4bd136c75d..49122df3b56b 100644 --- a/include/uapi/sound/firewire.h +++ b/include/uapi/sound/firewire.h @@ -55,7 +55,8 @@ union snd_firewire_event { #define SNDRV_FIREWIRE_TYPE_DICE 1 #define SNDRV_FIREWIRE_TYPE_FIREWORKS 2 #define SNDRV_FIREWIRE_TYPE_BEBOB 3 -/* AV/C, RME, MOTU, ... */ +#define SNDRV_FIREWIRE_TYPE_OXFW 4 +/* RME, MOTU, ... */ struct snd_firewire_get_info { unsigned int type; /* SNDRV_FIREWIRE_TYPE_xxx */ diff --git a/include/uapi/sound/hdspm.h b/include/uapi/sound/hdspm.h index d956c3593f65..b357f1a5e29c 100644 --- a/include/uapi/sound/hdspm.h +++ b/include/uapi/sound/hdspm.h @@ -74,14 +74,14 @@ struct hdspm_config { #define SNDRV_HDSPM_IOCTL_GET_CONFIG \ _IOR('H', 0x41, struct hdspm_config) -/** +/* * If there's a TCO (TimeCode Option) board installed, * there are further options and status data available. * The hdspm_ltc structure contains the current SMPTE * timecode and some status information and can be * obtained via SNDRV_HDSPM_IOCTL_GET_LTC or in the * hdspm_status struct. - **/ + */ enum hdspm_ltc_format { format_invalid, @@ -113,11 +113,11 @@ struct hdspm_ltc { #define SNDRV_HDSPM_IOCTL_GET_LTC _IOR('H', 0x46, struct hdspm_ltc) -/** +/* * The status data reflects the device's current state * as determined by the card's configuration and * connection status. - **/ + */ enum hdspm_sync { hdspm_sync_no_lock = 0, @@ -171,9 +171,9 @@ struct hdspm_status { #define SNDRV_HDSPM_IOCTL_GET_STATUS \ _IOR('H', 0x47, struct hdspm_status) -/** +/* * Get information about the card and its add-ons. - **/ + */ #define HDSPM_ADDON_TCO 1 diff --git a/include/video/omapdss.h b/include/video/omapdss.h index 069dfca9549a..6a84498ea513 100644 --- a/include/video/omapdss.h +++ b/include/video/omapdss.h @@ -166,13 +166,6 @@ enum omap_dss_display_state { OMAP_DSS_DISPLAY_ACTIVE, }; -enum omap_dss_audio_state { - OMAP_DSS_AUDIO_DISABLED = 0, - OMAP_DSS_AUDIO_ENABLED, - OMAP_DSS_AUDIO_CONFIGURED, - OMAP_DSS_AUDIO_PLAYING, -}; - struct omap_dss_audio { struct snd_aes_iec958 *iec; struct snd_cea_861_aud_if *cea; @@ -635,19 +628,6 @@ struct omapdss_hdmi_ops { int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); int (*set_infoframe)(struct omap_dss_device *dssdev, const struct hdmi_avi_infoframe *avi); - - /* - * Note: These functions might sleep. Do not call while - * holding a spinlock/readlock. - */ - int (*audio_enable)(struct omap_dss_device *dssdev); - void (*audio_disable)(struct omap_dss_device *dssdev); - bool (*audio_supported)(struct omap_dss_device *dssdev); - int (*audio_config)(struct omap_dss_device *dssdev, - struct omap_dss_audio *audio); - /* Note: These functions may not sleep */ - int (*audio_start)(struct omap_dss_device *dssdev); - void (*audio_stop)(struct omap_dss_device *dssdev); }; struct omapdss_dsi_ops { @@ -783,8 +763,6 @@ struct omap_dss_device { enum omap_dss_display_state state; - enum omap_dss_audio_state audio_state; - /* OMAP DSS output specific fields */ struct list_head list; @@ -795,6 +773,9 @@ struct omap_dss_device { /* output instance */ enum omap_dss_output_id id; + /* the port number in the DT node */ + int port_num; + /* dynamic fields */ struct omap_overlay_manager *manager; @@ -858,24 +839,6 @@ struct omap_dss_driver { int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev, const struct hdmi_avi_infoframe *avi); - - /* - * For display drivers that support audio. This encompasses - * HDMI and DisplayPort at the moment. - */ - /* - * Note: These functions might sleep. Do not call while - * holding a spinlock/readlock. - */ - int (*audio_enable)(struct omap_dss_device *dssdev); - void (*audio_disable)(struct omap_dss_device *dssdev); - bool (*audio_supported)(struct omap_dss_device *dssdev); - int (*audio_config)(struct omap_dss_device *dssdev, - struct omap_dss_audio *audio); - /* Note: These functions may not sleep */ - int (*audio_start)(struct omap_dss_device *dssdev); - void (*audio_stop)(struct omap_dss_device *dssdev); - }; enum omapdss_version omapdss_get_version(void); @@ -918,7 +881,7 @@ int omapdss_register_output(struct omap_dss_device *output); void omapdss_unregister_output(struct omap_dss_device *output); struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id); struct omap_dss_device *omap_dss_find_output(const char *name); -struct omap_dss_device *omap_dss_find_output_by_node(struct device_node *node); +struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port); int omapdss_output_set_device(struct omap_dss_device *out, struct omap_dss_device *dssdev); int omapdss_output_unset_device(struct omap_dss_device *out); diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h index 14334d0161d5..131a6ccdba25 100644 --- a/include/xen/interface/features.h +++ b/include/xen/interface/features.h @@ -53,9 +53,6 @@ /* operation as Dom0 is supported */ #define XENFEAT_dom0 11 -/* Xen also maps grant references at pfn = mfn */ -#define XENFEAT_grant_map_identity 12 - #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h index e40fae9bf11a..bcce56439d64 100644 --- a/include/xen/interface/grant_table.h +++ b/include/xen/interface/grant_table.h @@ -479,6 +479,25 @@ struct gnttab_get_version { DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version); /* + * Issue one or more cache maintenance operations on a portion of a + * page granted to the calling domain by a foreign domain. + */ +#define GNTTABOP_cache_flush 12 +struct gnttab_cache_flush { + union { + uint64_t dev_bus_addr; + grant_ref_t ref; + } a; + uint16_t offset; /* offset from start of grant */ + uint16_t length; /* size within the grant */ +#define GNTTAB_CACHE_CLEAN (1<<0) +#define GNTTAB_CACHE_INVAL (1<<1) +#define GNTTAB_CACHE_SOURCE_GREF (1<<31) + uint32_t op; +}; +DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush); + +/* * Bitfield values for update_pin_status.flags. */ /* Map the grant entry for access by I/O devices. */ |