diff options
Diffstat (limited to 'include')
220 files changed, 7685 insertions, 1732 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index c9608b0b80c6..ba4dd54f2c82 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -215,7 +215,7 @@ struct acpi_device_flags { u32 of_compatible_ok:1; u32 coherent_dma:1; u32 cca_seen:1; - u32 serial_bus_slave:1; + u32 enumeration_by_parent:1; u32 reserved:19; }; diff --git a/include/acpi/processor.h b/include/acpi/processor.h index d591bb77f592..40a916efd7c0 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -254,6 +254,8 @@ int acpi_processor_pstate_control(void); /* note: this locks both the calling module and the processor module if a _PPC object exists, rmmod is disallowed then */ int acpi_processor_notify_smm(struct module *calling_module); +int acpi_processor_get_psd(acpi_handle handle, + struct acpi_psd_package *pdomain); /* parsing the _P* objects. */ extern int acpi_processor_get_performance_info(struct acpi_processor *pr); diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 8de9cc251286..66d1d45fa2e1 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -25,6 +25,50 @@ #define mmiowb() do {} while (0) #endif +#ifndef __io_br +#define __io_br() barrier() +#endif + +/* prevent prefetching of coherent DMA data ahead of a dma-complete */ +#ifndef __io_ar +#ifdef rmb +#define __io_ar() rmb() +#else +#define __io_ar() barrier() +#endif +#endif + +/* flush writes to coherent DMA data before possibly triggering a DMA read */ +#ifndef __io_bw +#ifdef wmb +#define __io_bw() wmb() +#else +#define __io_bw() barrier() +#endif +#endif + +/* serialize device access against a spin_unlock, usually handled there. */ +#ifndef __io_aw +#define __io_aw() barrier() +#endif + +#ifndef __io_pbw +#define __io_pbw() __io_bw() +#endif + +#ifndef __io_paw +#define __io_paw() __io_aw() +#endif + +#ifndef __io_pbr +#define __io_pbr() __io_br() +#endif + +#ifndef __io_par +#define __io_par() __io_ar() +#endif + + /* * __raw_{read,write}{b,w,l,q}() access memory in native endianness. * @@ -110,7 +154,12 @@ static inline void __raw_writeq(u64 value, volatile void __iomem *addr) #define readb readb static inline u8 readb(const volatile void __iomem *addr) { - return __raw_readb(addr); + u8 val; + + __io_br(); + val = __raw_readb(addr); + __io_ar(); + return val; } #endif @@ -118,7 +167,12 @@ static inline u8 readb(const volatile void __iomem *addr) #define readw readw static inline u16 readw(const volatile void __iomem *addr) { - return __le16_to_cpu(__raw_readw(addr)); + u16 val; + + __io_br(); + val = __le16_to_cpu(__raw_readw(addr)); + __io_ar(); + return val; } #endif @@ -126,7 +180,12 @@ static inline u16 readw(const volatile void __iomem *addr) #define readl readl static inline u32 readl(const volatile void __iomem *addr) { - return __le32_to_cpu(__raw_readl(addr)); + u32 val; + + __io_br(); + val = __le32_to_cpu(__raw_readl(addr)); + __io_ar(); + return val; } #endif @@ -135,7 +194,12 @@ static inline u32 readl(const volatile void __iomem *addr) #define readq readq static inline u64 readq(const volatile void __iomem *addr) { - return __le64_to_cpu(__raw_readq(addr)); + u64 val; + + __io_br(); + val = __le64_to_cpu(__raw_readq(addr)); + __io_ar(); + return val; } #endif #endif /* CONFIG_64BIT */ @@ -144,7 +208,9 @@ static inline u64 readq(const volatile void __iomem *addr) #define writeb writeb static inline void writeb(u8 value, volatile void __iomem *addr) { + __io_bw(); __raw_writeb(value, addr); + __io_aw(); } #endif @@ -152,7 +218,9 @@ static inline void writeb(u8 value, volatile void __iomem *addr) #define writew writew static inline void writew(u16 value, volatile void __iomem *addr) { + __io_bw(); __raw_writew(cpu_to_le16(value), addr); + __io_aw(); } #endif @@ -160,7 +228,9 @@ static inline void writew(u16 value, volatile void __iomem *addr) #define writel writel static inline void writel(u32 value, volatile void __iomem *addr) { + __io_bw(); __raw_writel(__cpu_to_le32(value), addr); + __io_aw(); } #endif @@ -169,7 +239,9 @@ static inline void writel(u32 value, volatile void __iomem *addr) #define writeq writeq static inline void writeq(u64 value, volatile void __iomem *addr) { + __io_bw(); __raw_writeq(__cpu_to_le64(value), addr); + __io_aw(); } #endif #endif /* CONFIG_64BIT */ @@ -180,35 +252,67 @@ static inline void writeq(u64 value, volatile void __iomem *addr) * accesses. */ #ifndef readb_relaxed -#define readb_relaxed readb +#define readb_relaxed readb_relaxed +static inline u8 readb_relaxed(const volatile void __iomem *addr) +{ + return __raw_readb(addr); +} #endif #ifndef readw_relaxed -#define readw_relaxed readw +#define readw_relaxed readw_relaxed +static inline u16 readw_relaxed(const volatile void __iomem *addr) +{ + return __le16_to_cpu(__raw_readw(addr)); +} #endif #ifndef readl_relaxed -#define readl_relaxed readl +#define readl_relaxed readl_relaxed +static inline u32 readl_relaxed(const volatile void __iomem *addr) +{ + return __le32_to_cpu(__raw_readl(addr)); +} #endif #if defined(readq) && !defined(readq_relaxed) -#define readq_relaxed readq +#define readq_relaxed readq_relaxed +static inline u64 readq_relaxed(const volatile void __iomem *addr) +{ + return __le64_to_cpu(__raw_readq(addr)); +} #endif #ifndef writeb_relaxed -#define writeb_relaxed writeb +#define writeb_relaxed writeb_relaxed +static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) +{ + __raw_writeb(value, addr); +} #endif #ifndef writew_relaxed -#define writew_relaxed writew +#define writew_relaxed writew_relaxed +static inline void writew_relaxed(u16 value, volatile void __iomem *addr) +{ + __raw_writew(cpu_to_le16(value), addr); +} #endif #ifndef writel_relaxed -#define writel_relaxed writel +#define writel_relaxed writel_relaxed +static inline void writel_relaxed(u32 value, volatile void __iomem *addr) +{ + __raw_writel(__cpu_to_le32(value), addr); +} #endif #if defined(writeq) && !defined(writeq_relaxed) -#define writeq_relaxed writeq +#define writeq_relaxed writeq_relaxed +static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) +{ + __raw_writeq(__cpu_to_le64(value), addr); +} #endif /* @@ -351,6 +455,8 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer, #define IO_SPACE_LIMIT 0xffff #endif +#include <linux/logic_pio.h> + /* * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be * implemented on hardware that needs an additional delay for I/O accesses to @@ -361,7 +467,12 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer, #define inb inb static inline u8 inb(unsigned long addr) { - return readb(PCI_IOBASE + addr); + u8 val; + + __io_pbr(); + val = __raw_readb(PCI_IOBASE + addr); + __io_par(); + return val; } #endif @@ -369,7 +480,12 @@ static inline u8 inb(unsigned long addr) #define inw inw static inline u16 inw(unsigned long addr) { - return readw(PCI_IOBASE + addr); + u16 val; + + __io_pbr(); + val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr)); + __io_par(); + return val; } #endif @@ -377,7 +493,12 @@ static inline u16 inw(unsigned long addr) #define inl inl static inline u32 inl(unsigned long addr) { - return readl(PCI_IOBASE + addr); + u32 val; + + __io_pbr(); + val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr)); + __io_par(); + return val; } #endif @@ -385,7 +506,9 @@ static inline u32 inl(unsigned long addr) #define outb outb static inline void outb(u8 value, unsigned long addr) { - writeb(value, PCI_IOBASE + addr); + __io_pbw(); + __raw_writeb(value, PCI_IOBASE + addr); + __io_paw(); } #endif @@ -393,7 +516,9 @@ static inline void outb(u8 value, unsigned long addr) #define outw outw static inline void outw(u16 value, unsigned long addr) { - writew(value, PCI_IOBASE + addr); + __io_pbw(); + __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr); + __io_paw(); } #endif @@ -401,7 +526,9 @@ static inline void outw(u16 value, unsigned long addr) #define outl outl static inline void outl(u32 value, unsigned long addr) { - writel(value, PCI_IOBASE + addr); + __io_pbw(); + __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr); + __io_paw(); } #endif @@ -899,7 +1026,7 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size) #define ioport_map ioport_map static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { - return PCI_IOBASE + (port & IO_SPACE_LIMIT); + return PCI_IOBASE + (port & MMIO_UPPER_LIMIT); } #endif diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h index 18c6abe81fbd..728e5c5706c4 100644 --- a/include/asm-generic/kvm_para.h +++ b/include/asm-generic/kvm_para.h @@ -19,6 +19,11 @@ static inline unsigned int kvm_arch_para_features(void) return 0; } +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + static inline bool kvm_para_available(void) { return false; diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h new file mode 100644 index 000000000000..7d9598dc578d --- /dev/null +++ b/include/clocksource/timer-ti-dm.h @@ -0,0 +1,394 @@ +/* + * OMAP Dual-Mode Timers + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ + * Tarun Kanti DebBarma <tarun.kanti@ti.com> + * Thara Gopinath <thara@ti.com> + * + * Platform device conversion and hwmod support. + * + * Copyright (C) 2005 Nokia Corporation + * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com> + * PWM and clock framwork support by Timo Teras. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/platform_device.h> + +#ifndef __CLOCKSOURCE_DMTIMER_H +#define __CLOCKSOURCE_DMTIMER_H + +/* clock sources */ +#define OMAP_TIMER_SRC_SYS_CLK 0x00 +#define OMAP_TIMER_SRC_32_KHZ 0x01 +#define OMAP_TIMER_SRC_EXT_CLK 0x02 + +/* timer interrupt enable bits */ +#define OMAP_TIMER_INT_CAPTURE (1 << 2) +#define OMAP_TIMER_INT_OVERFLOW (1 << 1) +#define OMAP_TIMER_INT_MATCH (1 << 0) + +/* trigger types */ +#define OMAP_TIMER_TRIGGER_NONE 0x00 +#define OMAP_TIMER_TRIGGER_OVERFLOW 0x01 +#define OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02 + +/* posted mode types */ +#define OMAP_TIMER_NONPOSTED 0x00 +#define OMAP_TIMER_POSTED 0x01 + +/* timer capabilities used in hwmod database */ +#define OMAP_TIMER_SECURE 0x80000000 +#define OMAP_TIMER_ALWON 0x40000000 +#define OMAP_TIMER_HAS_PWM 0x20000000 +#define OMAP_TIMER_NEEDS_RESET 0x10000000 +#define OMAP_TIMER_HAS_DSP_IRQ 0x08000000 + +/* + * timer errata flags + * + * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This + * errata prevents us from using posted mode on these devices, unless the + * timer counter register is never read. For more details please refer to + * the OMAP3/4/5 errata documents. + */ +#define OMAP_TIMER_ERRATA_I103_I767 0x80000000 + +struct timer_regs { + u32 tidr; + u32 tier; + u32 twer; + u32 tclr; + u32 tcrr; + u32 tldr; + u32 ttrg; + u32 twps; + u32 tmar; + u32 tcar1; + u32 tsicr; + u32 tcar2; + u32 tpir; + u32 tnir; + u32 tcvr; + u32 tocr; + u32 towr; +}; + +struct omap_dm_timer { + int id; + int irq; + struct clk *fclk; + + void __iomem *io_base; + void __iomem *irq_stat; /* TISR/IRQSTATUS interrupt status */ + void __iomem *irq_ena; /* irq enable */ + void __iomem *irq_dis; /* irq disable, only on v2 ip */ + void __iomem *pend; /* write pending */ + void __iomem *func_base; /* function register base */ + + unsigned long rate; + unsigned reserved:1; + unsigned posted:1; + struct timer_regs context; + int (*get_context_loss_count)(struct device *); + int ctx_loss_count; + int revision; + u32 capability; + u32 errata; + struct platform_device *pdev; + struct list_head node; +}; + +int omap_dm_timer_reserve_systimer(int id); +struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap); + +int omap_dm_timer_get_irq(struct omap_dm_timer *timer); + +u32 omap_dm_timer_modify_idlect_mask(u32 inputmask); + +int omap_dm_timer_trigger(struct omap_dm_timer *timer); + +int omap_dm_timers_active(void); + +/* + * Do not use the defines below, they are not needed. They should be only + * used by dmtimer.c and sys_timer related code. + */ + +/* + * The interrupt registers are different between v1 and v2 ip. + * These registers are offsets from timer->iobase. + */ +#define OMAP_TIMER_ID_OFFSET 0x00 +#define OMAP_TIMER_OCP_CFG_OFFSET 0x10 + +#define OMAP_TIMER_V1_SYS_STAT_OFFSET 0x14 +#define OMAP_TIMER_V1_STAT_OFFSET 0x18 +#define OMAP_TIMER_V1_INT_EN_OFFSET 0x1c + +#define OMAP_TIMER_V2_IRQSTATUS_RAW 0x24 +#define OMAP_TIMER_V2_IRQSTATUS 0x28 +#define OMAP_TIMER_V2_IRQENABLE_SET 0x2c +#define OMAP_TIMER_V2_IRQENABLE_CLR 0x30 + +/* + * The functional registers have a different base on v1 and v2 ip. + * These registers are offsets from timer->func_base. The func_base + * is samae as io_base for v1 and io_base + 0x14 for v2 ip. + * + */ +#define OMAP_TIMER_V2_FUNC_OFFSET 0x14 + +#define _OMAP_TIMER_WAKEUP_EN_OFFSET 0x20 +#define _OMAP_TIMER_CTRL_OFFSET 0x24 +#define OMAP_TIMER_CTRL_GPOCFG (1 << 14) +#define OMAP_TIMER_CTRL_CAPTMODE (1 << 13) +#define OMAP_TIMER_CTRL_PT (1 << 12) +#define OMAP_TIMER_CTRL_TCM_LOWTOHIGH (0x1 << 8) +#define OMAP_TIMER_CTRL_TCM_HIGHTOLOW (0x2 << 8) +#define OMAP_TIMER_CTRL_TCM_BOTHEDGES (0x3 << 8) +#define OMAP_TIMER_CTRL_SCPWM (1 << 7) +#define OMAP_TIMER_CTRL_CE (1 << 6) /* compare enable */ +#define OMAP_TIMER_CTRL_PRE (1 << 5) /* prescaler enable */ +#define OMAP_TIMER_CTRL_PTV_SHIFT 2 /* prescaler value shift */ +#define OMAP_TIMER_CTRL_POSTED (1 << 2) +#define OMAP_TIMER_CTRL_AR (1 << 1) /* auto-reload enable */ +#define OMAP_TIMER_CTRL_ST (1 << 0) /* start timer */ +#define _OMAP_TIMER_COUNTER_OFFSET 0x28 +#define _OMAP_TIMER_LOAD_OFFSET 0x2c +#define _OMAP_TIMER_TRIGGER_OFFSET 0x30 +#define _OMAP_TIMER_WRITE_PEND_OFFSET 0x34 +#define WP_NONE 0 /* no write pending bit */ +#define WP_TCLR (1 << 0) +#define WP_TCRR (1 << 1) +#define WP_TLDR (1 << 2) +#define WP_TTGR (1 << 3) +#define WP_TMAR (1 << 4) +#define WP_TPIR (1 << 5) +#define WP_TNIR (1 << 6) +#define WP_TCVR (1 << 7) +#define WP_TOCR (1 << 8) +#define WP_TOWR (1 << 9) +#define _OMAP_TIMER_MATCH_OFFSET 0x38 +#define _OMAP_TIMER_CAPTURE_OFFSET 0x3c +#define _OMAP_TIMER_IF_CTRL_OFFSET 0x40 +#define _OMAP_TIMER_CAPTURE2_OFFSET 0x44 /* TCAR2, 34xx only */ +#define _OMAP_TIMER_TICK_POS_OFFSET 0x48 /* TPIR, 34xx only */ +#define _OMAP_TIMER_TICK_NEG_OFFSET 0x4c /* TNIR, 34xx only */ +#define _OMAP_TIMER_TICK_COUNT_OFFSET 0x50 /* TCVR, 34xx only */ +#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET 0x54 /* TOCR, 34xx only */ +#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58 /* TOWR, 34xx only */ + +/* register offsets with the write pending bit encoded */ +#define WPSHIFT 16 + +#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \ + | (WP_TCLR << WPSHIFT)) + +#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \ + | (WP_TCRR << WPSHIFT)) + +#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \ + | (WP_TLDR << WPSHIFT)) + +#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \ + | (WP_TTGR << WPSHIFT)) + +#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \ + | (WP_TMAR << WPSHIFT)) + +#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \ + | (WP_NONE << WPSHIFT)) + +#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \ + | (WP_TPIR << WPSHIFT)) + +#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \ + | (WP_TNIR << WPSHIFT)) + +#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \ + | (WP_TCVR << WPSHIFT)) + +#define OMAP_TIMER_TICK_INT_MASK_SET_REG \ + (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT)) + +#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \ + (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT)) + +/* + * The below are inlined to optimize code size for system timers. Other code + * should not need these at all, see + * include/linux/platform_data/pwm_omap_dmtimer.h + */ +#if defined(CONFIG_ARCH_OMAP1) || defined(CONFIG_ARCH_OMAP2PLUS) +static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg, + int posted) +{ + if (posted) + while (readl_relaxed(timer->pend) & (reg >> WPSHIFT)) + cpu_relax(); + + return readl_relaxed(timer->func_base + (reg & 0xff)); +} + +static inline void __omap_dm_timer_write(struct omap_dm_timer *timer, + u32 reg, u32 val, int posted) +{ + if (posted) + while (readl_relaxed(timer->pend) & (reg >> WPSHIFT)) + cpu_relax(); + + writel_relaxed(val, timer->func_base + (reg & 0xff)); +} + +static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer) +{ + u32 tidr; + + /* Assume v1 ip if bits [31:16] are zero */ + tidr = readl_relaxed(timer->io_base); + if (!(tidr >> 16)) { + timer->revision = 1; + timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET; + timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; + timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET; + timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET; + timer->func_base = timer->io_base; + } else { + timer->revision = 2; + timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS; + timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET; + timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR; + timer->pend = timer->io_base + + _OMAP_TIMER_WRITE_PEND_OFFSET + + OMAP_TIMER_V2_FUNC_OFFSET; + timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET; + } +} + +/* + * __omap_dm_timer_enable_posted - enables write posted mode + * @timer: pointer to timer instance handle + * + * Enables the write posted mode for the timer. When posted mode is enabled + * writes to certain timer registers are immediately acknowledged by the + * internal bus and hence prevents stalling the CPU waiting for the write to + * complete. Enabling this feature can improve performance for writing to the + * timer registers. + */ +static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer) +{ + if (timer->posted) + return; + + if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) { + timer->posted = OMAP_TIMER_NONPOSTED; + __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0); + return; + } + + __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, + OMAP_TIMER_CTRL_POSTED, 0); + timer->context.tsicr = OMAP_TIMER_CTRL_POSTED; + timer->posted = OMAP_TIMER_POSTED; +} + +/** + * __omap_dm_timer_override_errata - override errata flags for a timer + * @timer: pointer to timer handle + * @errata: errata flags to be ignored + * + * For a given timer, override a timer errata by clearing the flags + * specified by the errata argument. A specific erratum should only be + * overridden for a timer if the timer is used in such a way the erratum + * has no impact. + */ +static inline void __omap_dm_timer_override_errata(struct omap_dm_timer *timer, + u32 errata) +{ + timer->errata &= ~errata; +} + +static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer, + int posted, unsigned long rate) +{ + u32 l; + + l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); + if (l & OMAP_TIMER_CTRL_ST) { + l &= ~0x1; + __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted); +#ifdef CONFIG_ARCH_OMAP2PLUS + /* Readback to make sure write has completed */ + __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted); + /* + * Wait for functional clock period x 3.5 to make sure that + * timer is stopped + */ + udelay(3500000 / rate + 1); +#endif + } + + /* Ack possibly pending interrupt */ + writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat); +} + +static inline void __omap_dm_timer_load_start(struct omap_dm_timer *timer, + u32 ctrl, unsigned int load, + int posted) +{ + __omap_dm_timer_write(timer, OMAP_TIMER_COUNTER_REG, load, posted); + __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, ctrl, posted); +} + +static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer, + unsigned int value) +{ + writel_relaxed(value, timer->irq_ena); + __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0); +} + +static inline unsigned int +__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted) +{ + return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted); +} + +static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer, + unsigned int value) +{ + writel_relaxed(value, timer->irq_stat); +} +#endif /* CONFIG_ARCH_OMAP1 || CONFIG_ARCH_OMAP2PLUS */ +#endif /* __CLOCKSOURCE_DMTIMER_H */ diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h index e2f99ae72d5c..b2325d3e236a 100644 --- a/include/dt-bindings/clock/imx7d-clock.h +++ b/include/dt-bindings/clock/imx7d-clock.h @@ -452,5 +452,8 @@ #define IMX7D_OCOTP_CLK 439 #define IMX7D_NAND_RAWNAND_CLK 440 #define IMX7D_NAND_USDHC_BUS_RAWNAND_CLK 441 -#define IMX7D_CLK_END 442 +#define IMX7D_SNVS_CLK 442 +#define IMX7D_CAAM_CLK 443 +#define IMX7D_KPP_ROOT_CLK 444 +#define IMX7D_CLK_END 445 #endif /* __DT_BINDINGS_CLOCK_IMX7D_H */ diff --git a/include/dt-bindings/clock/tegra194-clock.h b/include/dt-bindings/clock/tegra194-clock.h new file mode 100644 index 000000000000..a2ff66342d69 --- /dev/null +++ b/include/dt-bindings/clock/tegra194-clock.h @@ -0,0 +1,321 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __ABI_MACH_T194_CLOCK_H +#define __ABI_MACH_T194_CLOCK_H + +#define TEGRA194_CLK_ACTMON 1 +#define TEGRA194_CLK_ADSP 2 +#define TEGRA194_CLK_ADSPNEON 3 +#define TEGRA194_CLK_AHUB 4 +#define TEGRA194_CLK_APB2APE 5 +#define TEGRA194_CLK_APE 6 +#define TEGRA194_CLK_AUD_MCLK 7 +#define TEGRA194_CLK_AXI_CBB 8 +#define TEGRA194_CLK_CAN1 9 +#define TEGRA194_CLK_CAN1_HOST 10 +#define TEGRA194_CLK_CAN2 11 +#define TEGRA194_CLK_CAN2_HOST 12 +#define TEGRA194_CLK_CEC 13 +#define TEGRA194_CLK_CLK_M 14 +#define TEGRA194_CLK_DMIC1 15 +#define TEGRA194_CLK_DMIC2 16 +#define TEGRA194_CLK_DMIC3 17 +#define TEGRA194_CLK_DMIC4 18 +#define TEGRA194_CLK_DPAUX 19 +#define TEGRA194_CLK_DPAUX1 20 +#define TEGRA194_CLK_ACLK 21 +#define TEGRA194_CLK_MSS_ENCRYPT 22 +#define TEGRA194_CLK_EQOS_RX_INPUT 23 +#define TEGRA194_CLK_IQC2 24 +#define TEGRA194_CLK_AON_APB 25 +#define TEGRA194_CLK_AON_NIC 26 +#define TEGRA194_CLK_AON_CPU_NIC 27 +#define TEGRA194_CLK_PLLA1 28 +#define TEGRA194_CLK_DSPK1 29 +#define TEGRA194_CLK_DSPK2 30 +#define TEGRA194_CLK_EMC 31 +#define TEGRA194_CLK_EQOS_AXI 32 +#define TEGRA194_CLK_EQOS_PTP_REF 33 +#define TEGRA194_CLK_EQOS_RX 34 +#define TEGRA194_CLK_EQOS_TX 35 +#define TEGRA194_CLK_EXTPERIPH1 36 +#define TEGRA194_CLK_EXTPERIPH2 37 +#define TEGRA194_CLK_EXTPERIPH3 38 +#define TEGRA194_CLK_EXTPERIPH4 39 +#define TEGRA194_CLK_FUSE 40 +#define TEGRA194_CLK_GPCCLK 41 +#define TEGRA194_CLK_GPU_PWR 42 +#define TEGRA194_CLK_HDA 43 +#define TEGRA194_CLK_HDA2CODEC_2X 44 +#define TEGRA194_CLK_HDA2HDMICODEC 45 +#define TEGRA194_CLK_HOST1X 46 +#define TEGRA194_CLK_HSIC_TRK 47 +#define TEGRA194_CLK_I2C1 48 +#define TEGRA194_CLK_I2C2 49 +#define TEGRA194_CLK_I2C3 50 +#define TEGRA194_CLK_I2C4 51 +#define TEGRA194_CLK_I2C6 52 +#define TEGRA194_CLK_I2C7 53 +#define TEGRA194_CLK_I2C8 54 +#define TEGRA194_CLK_I2C9 55 +#define TEGRA194_CLK_I2S1 56 +#define TEGRA194_CLK_I2S1_SYNC_INPUT 57 +#define TEGRA194_CLK_I2S2 58 +#define TEGRA194_CLK_I2S2_SYNC_INPUT 59 +#define TEGRA194_CLK_I2S3 60 +#define TEGRA194_CLK_I2S3_SYNC_INPUT 61 +#define TEGRA194_CLK_I2S4 62 +#define TEGRA194_CLK_I2S4_SYNC_INPUT 63 +#define TEGRA194_CLK_I2S5 64 +#define TEGRA194_CLK_I2S5_SYNC_INPUT 65 +#define TEGRA194_CLK_I2S6 66 +#define TEGRA194_CLK_I2S6_SYNC_INPUT 67 +#define TEGRA194_CLK_IQC1 68 +#define TEGRA194_CLK_ISP 69 +#define TEGRA194_CLK_KFUSE 70 +#define TEGRA194_CLK_MAUD 71 +#define TEGRA194_CLK_MIPI_CAL 72 +#define TEGRA194_CLK_MPHY_CORE_PLL_FIXED 73 +#define TEGRA194_CLK_MPHY_L0_RX_ANA 74 +#define TEGRA194_CLK_MPHY_L0_RX_LS_BIT 75 +#define TEGRA194_CLK_MPHY_L0_RX_SYMB 76 +#define TEGRA194_CLK_MPHY_L0_TX_LS_3XBIT 77 +#define TEGRA194_CLK_MPHY_L0_TX_SYMB 78 +#define TEGRA194_CLK_MPHY_L1_RX_ANA 79 +#define TEGRA194_CLK_MPHY_TX_1MHZ_REF 80 +#define TEGRA194_CLK_NVCSI 81 +#define TEGRA194_CLK_NVCSILP 82 +#define TEGRA194_CLK_NVDEC 83 +#define TEGRA194_CLK_NVDISPLAYHUB 84 +#define TEGRA194_CLK_NVDISPLAY_DISP 85 +#define TEGRA194_CLK_NVDISPLAY_P0 86 +#define TEGRA194_CLK_NVDISPLAY_P1 87 +#define TEGRA194_CLK_NVDISPLAY_P2 88 +#define TEGRA194_CLK_NVENC 89 +#define TEGRA194_CLK_NVJPG 90 +#define TEGRA194_CLK_OSC 91 +#define TEGRA194_CLK_AON_TOUCH 92 +#define TEGRA194_CLK_PLLA 93 +#define TEGRA194_CLK_PLLAON 94 +#define TEGRA194_CLK_PLLD 95 +#define TEGRA194_CLK_PLLD2 96 +#define TEGRA194_CLK_PLLD3 97 +#define TEGRA194_CLK_PLLDP 98 +#define TEGRA194_CLK_PLLD4 99 +#define TEGRA194_CLK_PLLE 100 +#define TEGRA194_CLK_PLLP 101 +#define TEGRA194_CLK_PLLP_OUT0 102 +#define TEGRA194_CLK_UTMIPLL 103 +#define TEGRA194_CLK_PLLA_OUT0 104 +#define TEGRA194_CLK_PWM1 105 +#define TEGRA194_CLK_PWM2 106 +#define TEGRA194_CLK_PWM3 107 +#define TEGRA194_CLK_PWM4 108 +#define TEGRA194_CLK_PWM5 109 +#define TEGRA194_CLK_PWM6 110 +#define TEGRA194_CLK_PWM7 111 +#define TEGRA194_CLK_PWM8 112 +#define TEGRA194_CLK_RCE_CPU_NIC 113 +#define TEGRA194_CLK_RCE_NIC 114 +#define TEGRA194_CLK_SATA 115 +#define TEGRA194_CLK_SATA_OOB 116 +#define TEGRA194_CLK_AON_I2C_SLOW 117 +#define TEGRA194_CLK_SCE_CPU_NIC 118 +#define TEGRA194_CLK_SCE_NIC 119 +#define TEGRA194_CLK_SDMMC1 120 +#define TEGRA194_CLK_UPHY_PLL3 121 +#define TEGRA194_CLK_SDMMC3 122 +#define TEGRA194_CLK_SDMMC4 123 +#define TEGRA194_CLK_SE 124 +#define TEGRA194_CLK_SOR0_OUT 125 +#define TEGRA194_CLK_SOR0_REF 126 +#define TEGRA194_CLK_SOR0_PAD_CLKOUT 127 +#define TEGRA194_CLK_SOR1_OUT 128 +#define TEGRA194_CLK_SOR1_REF 129 +#define TEGRA194_CLK_SOR1_PAD_CLKOUT 130 +#define TEGRA194_CLK_SOR_SAFE 131 +#define TEGRA194_CLK_IQC1_IN 132 +#define TEGRA194_CLK_IQC2_IN 133 +#define TEGRA194_CLK_DMIC5 134 +#define TEGRA194_CLK_SPI1 135 +#define TEGRA194_CLK_SPI2 136 +#define TEGRA194_CLK_SPI3 137 +#define TEGRA194_CLK_I2C_SLOW 138 +#define TEGRA194_CLK_SYNC_DMIC1 139 +#define TEGRA194_CLK_SYNC_DMIC2 140 +#define TEGRA194_CLK_SYNC_DMIC3 141 +#define TEGRA194_CLK_SYNC_DMIC4 142 +#define TEGRA194_CLK_SYNC_DSPK1 143 +#define TEGRA194_CLK_SYNC_DSPK2 144 +#define TEGRA194_CLK_SYNC_I2S1 145 +#define TEGRA194_CLK_SYNC_I2S2 146 +#define TEGRA194_CLK_SYNC_I2S3 147 +#define TEGRA194_CLK_SYNC_I2S4 148 +#define TEGRA194_CLK_SYNC_I2S5 149 +#define TEGRA194_CLK_SYNC_I2S6 150 +#define TEGRA194_CLK_MPHY_FORCE_LS_MODE 151 +#define TEGRA194_CLK_TACH 152 +#define TEGRA194_CLK_TSEC 153 +#define TEGRA194_CLK_TSECB 154 +#define TEGRA194_CLK_UARTA 155 +#define TEGRA194_CLK_UARTB 156 +#define TEGRA194_CLK_UARTC 157 +#define TEGRA194_CLK_UARTD 158 +#define TEGRA194_CLK_UARTE 159 +#define TEGRA194_CLK_UARTF 160 +#define TEGRA194_CLK_UARTG 161 +#define TEGRA194_CLK_UART_FST_MIPI_CAL 162 +#define TEGRA194_CLK_UFSDEV_REF 163 +#define TEGRA194_CLK_UFSHC 164 +#define TEGRA194_CLK_USB2_TRK 165 +#define TEGRA194_CLK_VI 166 +#define TEGRA194_CLK_VIC 167 +#define TEGRA194_CLK_PVA0_AXI 168 +#define TEGRA194_CLK_PVA0_VPS0 169 +#define TEGRA194_CLK_PVA0_VPS1 170 +#define TEGRA194_CLK_PVA1_AXI 171 +#define TEGRA194_CLK_PVA1_VPS0 172 +#define TEGRA194_CLK_PVA1_VPS1 173 +#define TEGRA194_CLK_DLA0_FALCON 174 +#define TEGRA194_CLK_DLA0_CORE 175 +#define TEGRA194_CLK_DLA1_FALCON 176 +#define TEGRA194_CLK_DLA1_CORE 177 +#define TEGRA194_CLK_SOR2_OUT 178 +#define TEGRA194_CLK_SOR2_REF 179 +#define TEGRA194_CLK_SOR2_PAD_CLKOUT 180 +#define TEGRA194_CLK_SOR3_OUT 181 +#define TEGRA194_CLK_SOR3_REF 182 +#define TEGRA194_CLK_SOR3_PAD_CLKOUT 183 +#define TEGRA194_CLK_NVDISPLAY_P3 184 +#define TEGRA194_CLK_DPAUX2 185 +#define TEGRA194_CLK_DPAUX3 186 +#define TEGRA194_CLK_NVDEC1 187 +#define TEGRA194_CLK_NVENC1 188 +#define TEGRA194_CLK_SE_FREE 189 +#define TEGRA194_CLK_UARTH 190 +#define TEGRA194_CLK_FUSE_SERIAL 191 +#define TEGRA194_CLK_QSPI0 192 +#define TEGRA194_CLK_QSPI1 193 +#define TEGRA194_CLK_QSPI0_PM 194 +#define TEGRA194_CLK_QSPI1_PM 195 +#define TEGRA194_CLK_VI_CONST 196 +#define TEGRA194_CLK_NAFLL_BPMP 197 +#define TEGRA194_CLK_NAFLL_SCE 198 +#define TEGRA194_CLK_NAFLL_NVDEC 199 +#define TEGRA194_CLK_NAFLL_NVJPG 200 +#define TEGRA194_CLK_NAFLL_TSEC 201 +#define TEGRA194_CLK_NAFLL_TSECB 202 +#define TEGRA194_CLK_NAFLL_VI 203 +#define TEGRA194_CLK_NAFLL_SE 204 +#define TEGRA194_CLK_NAFLL_NVENC 205 +#define TEGRA194_CLK_NAFLL_ISP 206 +#define TEGRA194_CLK_NAFLL_VIC 207 +#define TEGRA194_CLK_NAFLL_NVDISPLAYHUB 208 +#define TEGRA194_CLK_NAFLL_AXICBB 209 +#define TEGRA194_CLK_NAFLL_DLA 210 +#define TEGRA194_CLK_NAFLL_PVA_CORE 211 +#define TEGRA194_CLK_NAFLL_PVA_VPS 212 +#define TEGRA194_CLK_NAFLL_CVNAS 213 +#define TEGRA194_CLK_NAFLL_RCE 214 +#define TEGRA194_CLK_NAFLL_NVENC1 215 +#define TEGRA194_CLK_NAFLL_DLA_FALCON 216 +#define TEGRA194_CLK_NAFLL_NVDEC1 217 +#define TEGRA194_CLK_NAFLL_GPU 218 +#define TEGRA194_CLK_SDMMC_LEGACY_TM 219 +#define TEGRA194_CLK_PEX0_CORE_0 220 +#define TEGRA194_CLK_PEX0_CORE_1 221 +#define TEGRA194_CLK_PEX0_CORE_2 222 +#define TEGRA194_CLK_PEX0_CORE_3 223 +#define TEGRA194_CLK_PEX0_CORE_4 224 +#define TEGRA194_CLK_PEX1_CORE_5 225 +#define TEGRA194_CLK_PEX_REF1 226 +#define TEGRA194_CLK_PEX_REF2 227 +#define TEGRA194_CLK_CSI_A 229 +#define TEGRA194_CLK_CSI_B 230 +#define TEGRA194_CLK_CSI_C 231 +#define TEGRA194_CLK_CSI_D 232 +#define TEGRA194_CLK_CSI_E 233 +#define TEGRA194_CLK_CSI_F 234 +#define TEGRA194_CLK_CSI_G 235 +#define TEGRA194_CLK_CSI_H 236 +#define TEGRA194_CLK_PLLC4 237 +#define TEGRA194_CLK_PLLC4_OUT 238 +#define TEGRA194_CLK_PLLC4_OUT1 239 +#define TEGRA194_CLK_PLLC4_OUT2 240 +#define TEGRA194_CLK_PLLC4_MUXED 241 +#define TEGRA194_CLK_PLLC4_VCO_DIV2 242 +#define TEGRA194_CLK_CSI_A_PAD 244 +#define TEGRA194_CLK_CSI_B_PAD 245 +#define TEGRA194_CLK_CSI_C_PAD 246 +#define TEGRA194_CLK_CSI_D_PAD 247 +#define TEGRA194_CLK_CSI_E_PAD 248 +#define TEGRA194_CLK_CSI_F_PAD 249 +#define TEGRA194_CLK_CSI_G_PAD 250 +#define TEGRA194_CLK_CSI_H_PAD 251 +#define TEGRA194_CLK_PEX_SATA_USB_RX_BYP 254 +#define TEGRA194_CLK_PEX_USB_PAD_PLL0_MGMT 255 +#define TEGRA194_CLK_PEX_USB_PAD_PLL1_MGMT 256 +#define TEGRA194_CLK_PEX_USB_PAD_PLL2_MGMT 257 +#define TEGRA194_CLK_PEX_USB_PAD_PLL3_MGMT 258 +#define TEGRA194_CLK_XUSB_CORE_DEV 265 +#define TEGRA194_CLK_XUSB_CORE_MUX 266 +#define TEGRA194_CLK_XUSB_CORE_HOST 267 +#define TEGRA194_CLK_XUSB_CORE_SS 268 +#define TEGRA194_CLK_XUSB_FALCON 269 +#define TEGRA194_CLK_XUSB_FALCON_HOST 270 +#define TEGRA194_CLK_XUSB_FALCON_SS 271 +#define TEGRA194_CLK_XUSB_FS 272 +#define TEGRA194_CLK_XUSB_FS_HOST 273 +#define TEGRA194_CLK_XUSB_FS_DEV 274 +#define TEGRA194_CLK_XUSB_SS 275 +#define TEGRA194_CLK_XUSB_SS_DEV 276 +#define TEGRA194_CLK_XUSB_SS_SUPERSPEED 277 +#define TEGRA194_CLK_PLLDISPHUB 278 +#define TEGRA194_CLK_PLLDISPHUB_DIV 279 +#define TEGRA194_CLK_NAFLL_CLUSTER0 280 +#define TEGRA194_CLK_NAFLL_CLUSTER1 281 +#define TEGRA194_CLK_NAFLL_CLUSTER2 282 +#define TEGRA194_CLK_NAFLL_CLUSTER3 283 +#define TEGRA194_CLK_CAN1_CORE 284 +#define TEGRA194_CLK_CAN2_CORE 285 +#define TEGRA194_CLK_PLLA1_OUT1 286 +#define TEGRA194_CLK_PLLREFE_VCOOUT 288 +#define TEGRA194_CLK_CLK_32K 289 +#define TEGRA194_CLK_SPDIFIN_SYNC_INPUT 290 +#define TEGRA194_CLK_UTMIPLL_CLKOUT48 291 +#define TEGRA194_CLK_UTMIPLL_CLKOUT480 292 +#define TEGRA194_CLK_CVNAS 293 +#define TEGRA194_CLK_PLLNVCSI 294 +#define TEGRA194_CLK_PVA0_CPU_AXI 295 +#define TEGRA194_CLK_PVA1_CPU_AXI 296 +#define TEGRA194_CLK_PVA0_VPS 297 +#define TEGRA194_CLK_PVA1_VPS 298 +#define TEGRA194_CLK_DLA0_FALCON_MUX 299 +#define TEGRA194_CLK_DLA1_FALCON_MUX 300 +#define TEGRA194_CLK_DLA0_CORE_MUX 301 +#define TEGRA194_CLK_DLA1_CORE_MUX 302 +#define TEGRA194_CLK_UTMIPLL_HPS 304 +#define TEGRA194_CLK_I2C5 305 +#define TEGRA194_CLK_I2C10 306 +#define TEGRA194_CLK_BPMP_CPU_NIC 307 +#define TEGRA194_CLK_BPMP_APB 308 +#define TEGRA194_CLK_TSC 309 +#define TEGRA194_CLK_EMCSA 310 +#define TEGRA194_CLK_EMCSB 311 +#define TEGRA194_CLK_EMCSC 312 +#define TEGRA194_CLK_EMCSD 313 +#define TEGRA194_CLK_PLLC 314 +#define TEGRA194_CLK_PLLC2 315 +#define TEGRA194_CLK_PLLC3 316 +#define TEGRA194_CLK_TSC_REF 317 +#define TEGRA194_CLK_FUSE_BURN 318 +#define TEGRA194_CLK_PEX0_CORE_0M 319 +#define TEGRA194_CLK_PEX0_CORE_1M 320 +#define TEGRA194_CLK_PEX0_CORE_2M 321 +#define TEGRA194_CLK_PEX0_CORE_3M 322 +#define TEGRA194_CLK_PEX0_CORE_4M 323 +#define TEGRA194_CLK_PEX1_CORE_5M 324 +#define TEGRA194_CLK_PLLE_HPS 326 + +#endif diff --git a/include/dt-bindings/gpio/tegra194-gpio.h b/include/dt-bindings/gpio/tegra194-gpio.h new file mode 100644 index 000000000000..ede860225f6b --- /dev/null +++ b/include/dt-bindings/gpio/tegra194-gpio.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */ + +/* + * This header provides constants for binding nvidia,tegra194-gpio*. + * + * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below + * provide names for this. + * + * The second cell contains standard flag values specified in gpio.h. + */ + +#ifndef _DT_BINDINGS_GPIO_TEGRA194_GPIO_H +#define _DT_BINDINGS_GPIO_TEGRA194_GPIO_H + +#include <dt-bindings/gpio/gpio.h> + +/* GPIOs implemented by main GPIO controller */ +#define TEGRA194_MAIN_GPIO_PORT_A 0 +#define TEGRA194_MAIN_GPIO_PORT_B 1 +#define TEGRA194_MAIN_GPIO_PORT_C 2 +#define TEGRA194_MAIN_GPIO_PORT_D 3 +#define TEGRA194_MAIN_GPIO_PORT_E 4 +#define TEGRA194_MAIN_GPIO_PORT_F 5 +#define TEGRA194_MAIN_GPIO_PORT_G 6 +#define TEGRA194_MAIN_GPIO_PORT_H 7 +#define TEGRA194_MAIN_GPIO_PORT_I 8 +#define TEGRA194_MAIN_GPIO_PORT_J 9 +#define TEGRA194_MAIN_GPIO_PORT_K 10 +#define TEGRA194_MAIN_GPIO_PORT_L 11 +#define TEGRA194_MAIN_GPIO_PORT_M 12 +#define TEGRA194_MAIN_GPIO_PORT_N 13 +#define TEGRA194_MAIN_GPIO_PORT_O 14 +#define TEGRA194_MAIN_GPIO_PORT_P 15 +#define TEGRA194_MAIN_GPIO_PORT_Q 16 +#define TEGRA194_MAIN_GPIO_PORT_R 17 +#define TEGRA194_MAIN_GPIO_PORT_S 18 +#define TEGRA194_MAIN_GPIO_PORT_T 19 +#define TEGRA194_MAIN_GPIO_PORT_U 20 +#define TEGRA194_MAIN_GPIO_PORT_V 21 +#define TEGRA194_MAIN_GPIO_PORT_W 22 +#define TEGRA194_MAIN_GPIO_PORT_X 23 +#define TEGRA194_MAIN_GPIO_PORT_Y 24 +#define TEGRA194_MAIN_GPIO_PORT_Z 25 +#define TEGRA194_MAIN_GPIO_PORT_FF 26 +#define TEGRA194_MAIN_GPIO_PORT_GG 27 + +#define TEGRA194_MAIN_GPIO(port, offset) \ + ((TEGRA194_MAIN_GPIO_PORT_##port * 8) + offset) + +/* GPIOs implemented by AON GPIO controller */ +#define TEGRA194_AON_GPIO_PORT_AA 0 +#define TEGRA194_AON_GPIO_PORT_BB 1 +#define TEGRA194_AON_GPIO_PORT_CC 2 +#define TEGRA194_AON_GPIO_PORT_DD 3 +#define TEGRA194_AON_GPIO_PORT_EE 4 + +#define TEGRA194_AON_GPIO(port, offset) \ + ((TEGRA194_AON_GPIO_PORT_##port * 8) + offset) + +#endif diff --git a/include/dt-bindings/mfd/stm32f7-rcc.h b/include/dt-bindings/mfd/stm32f7-rcc.h index 8b7b7197ffd7..a90f3613c584 100644 --- a/include/dt-bindings/mfd/stm32f7-rcc.h +++ b/include/dt-bindings/mfd/stm32f7-rcc.h @@ -91,6 +91,7 @@ #define STM32F7_RCC_APB2_TIM8 1 #define STM32F7_RCC_APB2_USART1 4 #define STM32F7_RCC_APB2_USART6 5 +#define STM32F7_RCC_APB2_SDMMC2 7 #define STM32F7_RCC_APB2_ADC1 8 #define STM32F7_RCC_APB2_ADC2 9 #define STM32F7_RCC_APB2_ADC3 10 diff --git a/include/dt-bindings/power/mt2712-power.h b/include/dt-bindings/power/mt2712-power.h index 92b46d772fae..2c147817efc2 100644 --- a/include/dt-bindings/power/mt2712-power.h +++ b/include/dt-bindings/power/mt2712-power.h @@ -22,5 +22,8 @@ #define MT2712_POWER_DOMAIN_USB 5 #define MT2712_POWER_DOMAIN_USB2 6 #define MT2712_POWER_DOMAIN_MFG 7 +#define MT2712_POWER_DOMAIN_MFG_SC1 8 +#define MT2712_POWER_DOMAIN_MFG_SC2 9 +#define MT2712_POWER_DOMAIN_MFG_SC3 10 #endif /* _DT_BINDINGS_POWER_MT2712_POWER_H */ diff --git a/include/dt-bindings/power/mt7623a-power.h b/include/dt-bindings/power/mt7623a-power.h new file mode 100644 index 000000000000..2544822aa76b --- /dev/null +++ b/include/dt-bindings/power/mt7623a-power.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _DT_BINDINGS_POWER_MT7623A_POWER_H +#define _DT_BINDINGS_POWER_MT7623A_POWER_H + +#define MT7623A_POWER_DOMAIN_CONN 0 +#define MT7623A_POWER_DOMAIN_ETH 1 +#define MT7623A_POWER_DOMAIN_HIF 2 +#define MT7623A_POWER_DOMAIN_IFR_MSC 3 + +#endif /* _DT_BINDINGS_POWER_MT7623A_POWER_H */ diff --git a/include/dt-bindings/power/r8a77965-sysc.h b/include/dt-bindings/power/r8a77965-sysc.h new file mode 100644 index 000000000000..05a4b5917314 --- /dev/null +++ b/include/dt-bindings/power/r8a77965-sysc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org> + * Copyright (C) 2016 Glider bvba + */ + +#ifndef __DT_BINDINGS_POWER_R8A77965_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A77965_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A77965_PD_CA57_CPU0 0 +#define R8A77965_PD_CA57_CPU1 1 +#define R8A77965_PD_A3VP 9 +#define R8A77965_PD_CA57_SCU 12 +#define R8A77965_PD_CR7 13 +#define R8A77965_PD_A3VC 14 +#define R8A77965_PD_3DG_A 17 +#define R8A77965_PD_3DG_B 18 +#define R8A77965_PD_A3IR 24 +#define R8A77965_PD_A2VC1 26 + +/* Always-on power area */ +#define R8A77965_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A77965_SYSC_H__ */ diff --git a/include/dt-bindings/power/r8a77980-sysc.h b/include/dt-bindings/power/r8a77980-sysc.h new file mode 100644 index 000000000000..2c90c1237725 --- /dev/null +++ b/include/dt-bindings/power/r8a77980-sysc.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2018 Renesas Electronics Corp. + * Copyright (C) 2018 Cogent Embedded, Inc. + */ +#ifndef __DT_BINDINGS_POWER_R8A77980_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A77980_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A77980_PD_A2SC2 0 +#define R8A77980_PD_A2SC3 1 +#define R8A77980_PD_A2SC4 2 +#define R8A77980_PD_A2PD0 3 +#define R8A77980_PD_A2PD1 4 +#define R8A77980_PD_CA53_CPU0 5 +#define R8A77980_PD_CA53_CPU1 6 +#define R8A77980_PD_CA53_CPU2 7 +#define R8A77980_PD_CA53_CPU3 8 +#define R8A77980_PD_A2CN 10 +#define R8A77980_PD_A3VIP 11 +#define R8A77980_PD_A2IR5 12 +#define R8A77980_PD_CR7 13 +#define R8A77980_PD_A2IR4 15 +#define R8A77980_PD_CA53_SCU 21 +#define R8A77980_PD_A2IR0 23 +#define R8A77980_PD_A3IR 24 +#define R8A77980_PD_A3VIP1 25 +#define R8A77980_PD_A3VIP2 26 +#define R8A77980_PD_A2IR1 27 +#define R8A77980_PD_A2IR2 28 +#define R8A77980_PD_A2IR3 29 +#define R8A77980_PD_A2SC0 30 +#define R8A77980_PD_A2SC1 31 + +/* Always-on power area */ +#define R8A77980_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A77980_SYSC_H__ */ diff --git a/include/dt-bindings/power/tegra194-powergate.h b/include/dt-bindings/power/tegra194-powergate.h new file mode 100644 index 000000000000..82253742a493 --- /dev/null +++ b/include/dt-bindings/power/tegra194-powergate.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __ABI_MACH_T194_POWERGATE_T194_H_ +#define __ABI_MACH_T194_POWERGATE_T194_H_ + +#define TEGRA194_POWER_DOMAIN_AUD 1 +#define TEGRA194_POWER_DOMAIN_DISP 2 +#define TEGRA194_POWER_DOMAIN_DISPB 3 +#define TEGRA194_POWER_DOMAIN_DISPC 4 +#define TEGRA194_POWER_DOMAIN_ISPA 5 +#define TEGRA194_POWER_DOMAIN_NVDECA 6 +#define TEGRA194_POWER_DOMAIN_NVJPG 7 +#define TEGRA194_POWER_DOMAIN_NVENCA 8 +#define TEGRA194_POWER_DOMAIN_NVENCB 9 +#define TEGRA194_POWER_DOMAIN_NVDECB 10 +#define TEGRA194_POWER_DOMAIN_SAX 11 +#define TEGRA194_POWER_DOMAIN_VE 12 +#define TEGRA194_POWER_DOMAIN_VIC 13 +#define TEGRA194_POWER_DOMAIN_XUSBA 14 +#define TEGRA194_POWER_DOMAIN_XUSBB 15 +#define TEGRA194_POWER_DOMAIN_XUSBC 16 +#define TEGRA194_POWER_DOMAIN_PCIEX8A 17 +#define TEGRA194_POWER_DOMAIN_PCIEX4A 18 +#define TEGRA194_POWER_DOMAIN_PCIEX1A 19 +#define TEGRA194_POWER_DOMAIN_PCIEX8B 21 +#define TEGRA194_POWER_DOMAIN_PVAA 22 +#define TEGRA194_POWER_DOMAIN_PVAB 23 +#define TEGRA194_POWER_DOMAIN_DLAA 24 +#define TEGRA194_POWER_DOMAIN_DLAB 25 +#define TEGRA194_POWER_DOMAIN_CV 26 +#define TEGRA194_POWER_DOMAIN_GPU 27 +#define TEGRA194_POWER_DOMAIN_MAX 27 + +#endif diff --git a/include/dt-bindings/reset/stm32mp1-resets.h b/include/dt-bindings/reset/stm32mp1-resets.h new file mode 100644 index 000000000000..f0c3aaef67a0 --- /dev/null +++ b/include/dt-bindings/reset/stm32mp1-resets.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved + * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics. + */ + +#ifndef _DT_BINDINGS_STM32MP1_RESET_H_ +#define _DT_BINDINGS_STM32MP1_RESET_H_ + +#define LTDC_R 3072 +#define DSI_R 3076 +#define DDRPERFM_R 3080 +#define USBPHY_R 3088 +#define SPI6_R 3136 +#define I2C4_R 3138 +#define I2C6_R 3139 +#define USART1_R 3140 +#define STGEN_R 3156 +#define GPIOZ_R 3200 +#define CRYP1_R 3204 +#define HASH1_R 3205 +#define RNG1_R 3206 +#define AXIM_R 3216 +#define GPU_R 3269 +#define ETHMAC_R 3274 +#define FMC_R 3276 +#define QSPI_R 3278 +#define SDMMC1_R 3280 +#define SDMMC2_R 3281 +#define CRC1_R 3284 +#define USBH_R 3288 +#define MDMA_R 3328 +#define MCU_R 8225 +#define TIM2_R 19456 +#define TIM3_R 19457 +#define TIM4_R 19458 +#define TIM5_R 19459 +#define TIM6_R 19460 +#define TIM7_R 19461 +#define TIM12_R 16462 +#define TIM13_R 16463 +#define TIM14_R 16464 +#define LPTIM1_R 19465 +#define SPI2_R 19467 +#define SPI3_R 19468 +#define USART2_R 19470 +#define USART3_R 19471 +#define UART4_R 19472 +#define UART5_R 19473 +#define UART7_R 19474 +#define UART8_R 19475 +#define I2C1_R 19477 +#define I2C2_R 19478 +#define I2C3_R 19479 +#define I2C5_R 19480 +#define SPDIF_R 19482 +#define CEC_R 19483 +#define DAC12_R 19485 +#define MDIO_R 19847 +#define TIM1_R 19520 +#define TIM8_R 19521 +#define TIM15_R 19522 +#define TIM16_R 19523 +#define TIM17_R 19524 +#define SPI1_R 19528 +#define SPI4_R 19529 +#define SPI5_R 19530 +#define USART6_R 19533 +#define SAI1_R 19536 +#define SAI2_R 19537 +#define SAI3_R 19538 +#define DFSDM_R 19540 +#define FDCAN_R 19544 +#define LPTIM2_R 19584 +#define LPTIM3_R 19585 +#define LPTIM4_R 19586 +#define LPTIM5_R 19587 +#define SAI4_R 19592 +#define SYSCFG_R 19595 +#define VREF_R 19597 +#define TMPSENS_R 19600 +#define PMBCTRL_R 19601 +#define DMA1_R 19648 +#define DMA2_R 19649 +#define DMAMUX_R 19650 +#define ADC12_R 19653 +#define USBO_R 19656 +#define SDMMC3_R 19664 +#define CAMITF_R 19712 +#define CRYP2_R 19716 +#define HASH2_R 19717 +#define RNG2_R 19718 +#define CRC2_R 19719 +#define HSEM_R 19723 +#define MBOX_R 19724 +#define GPIOA_R 19776 +#define GPIOB_R 19777 +#define GPIOC_R 19778 +#define GPIOD_R 19779 +#define GPIOE_R 19780 +#define GPIOF_R 19781 +#define GPIOG_R 19782 +#define GPIOH_R 19783 +#define GPIOI_R 19784 +#define GPIOJ_R 19785 +#define GPIOK_R 19786 + +#endif /* _DT_BINDINGS_STM32MP1_RESET_H_ */ diff --git a/include/dt-bindings/reset/tegra194-reset.h b/include/dt-bindings/reset/tegra194-reset.h new file mode 100644 index 000000000000..473afaa25bfb --- /dev/null +++ b/include/dt-bindings/reset/tegra194-reset.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __ABI_MACH_T194_RESET_H +#define __ABI_MACH_T194_RESET_H + +#define TEGRA194_RESET_ACTMON 1 +#define TEGRA194_RESET_ADSP_ALL 2 +#define TEGRA194_RESET_AFI 3 +#define TEGRA194_RESET_CAN1 4 +#define TEGRA194_RESET_CAN2 5 +#define TEGRA194_RESET_DLA0 6 +#define TEGRA194_RESET_DLA1 7 +#define TEGRA194_RESET_DPAUX 8 +#define TEGRA194_RESET_DPAUX1 9 +#define TEGRA194_RESET_DPAUX2 10 +#define TEGRA194_RESET_DPAUX3 11 +#define TEGRA194_RESET_EQOS 17 +#define TEGRA194_RESET_GPCDMA 18 +#define TEGRA194_RESET_GPU 19 +#define TEGRA194_RESET_HDA 20 +#define TEGRA194_RESET_HDA2CODEC_2X 21 +#define TEGRA194_RESET_HDA2HDMICODEC 22 +#define TEGRA194_RESET_HOST1X 23 +#define TEGRA194_RESET_I2C1 24 +#define TEGRA194_RESET_I2C10 25 +#define TEGRA194_RESET_RSVD_26 26 +#define TEGRA194_RESET_RSVD_27 27 +#define TEGRA194_RESET_RSVD_28 28 +#define TEGRA194_RESET_I2C2 29 +#define TEGRA194_RESET_I2C3 30 +#define TEGRA194_RESET_I2C4 31 +#define TEGRA194_RESET_I2C6 32 +#define TEGRA194_RESET_I2C7 33 +#define TEGRA194_RESET_I2C8 34 +#define TEGRA194_RESET_I2C9 35 +#define TEGRA194_RESET_ISP 36 +#define TEGRA194_RESET_MIPI_CAL 37 +#define TEGRA194_RESET_MPHY_CLK_CTL 38 +#define TEGRA194_RESET_MPHY_L0_RX 39 +#define TEGRA194_RESET_MPHY_L0_TX 40 +#define TEGRA194_RESET_MPHY_L1_RX 41 +#define TEGRA194_RESET_MPHY_L1_TX 42 +#define TEGRA194_RESET_NVCSI 43 +#define TEGRA194_RESET_NVDEC 44 +#define TEGRA194_RESET_NVDISPLAY0_HEAD0 45 +#define TEGRA194_RESET_NVDISPLAY0_HEAD1 46 +#define TEGRA194_RESET_NVDISPLAY0_HEAD2 47 +#define TEGRA194_RESET_NVDISPLAY0_HEAD3 48 +#define TEGRA194_RESET_NVDISPLAY0_MISC 49 +#define TEGRA194_RESET_NVDISPLAY0_WGRP0 50 +#define TEGRA194_RESET_NVDISPLAY0_WGRP1 51 +#define TEGRA194_RESET_NVDISPLAY0_WGRP2 52 +#define TEGRA194_RESET_NVDISPLAY0_WGRP3 53 +#define TEGRA194_RESET_NVDISPLAY0_WGRP4 54 +#define TEGRA194_RESET_NVDISPLAY0_WGRP5 55 +#define TEGRA194_RESET_RSVD_56 56 +#define TEGRA194_RESET_RSVD_57 57 +#define TEGRA194_RESET_RSVD_58 58 +#define TEGRA194_RESET_NVENC 59 +#define TEGRA194_RESET_NVENC1 60 +#define TEGRA194_RESET_NVJPG 61 +#define TEGRA194_RESET_PCIE 62 +#define TEGRA194_RESET_PCIEXCLK 63 +#define TEGRA194_RESET_RSVD_64 64 +#define TEGRA194_RESET_RSVD_65 65 +#define TEGRA194_RESET_PVA0_ALL 66 +#define TEGRA194_RESET_PVA1_ALL 67 +#define TEGRA194_RESET_PWM1 68 +#define TEGRA194_RESET_PWM2 69 +#define TEGRA194_RESET_PWM3 70 +#define TEGRA194_RESET_PWM4 71 +#define TEGRA194_RESET_PWM5 72 +#define TEGRA194_RESET_PWM6 73 +#define TEGRA194_RESET_PWM7 74 +#define TEGRA194_RESET_PWM8 75 +#define TEGRA194_RESET_QSPI0 76 +#define TEGRA194_RESET_QSPI1 77 +#define TEGRA194_RESET_SATA 78 +#define TEGRA194_RESET_SATACOLD 79 +#define TEGRA194_RESET_SCE_ALL 80 +#define TEGRA194_RESET_RCE_ALL 81 +#define TEGRA194_RESET_SDMMC1 82 +#define TEGRA194_RESET_RSVD_83 83 +#define TEGRA194_RESET_SDMMC3 84 +#define TEGRA194_RESET_SDMMC4 85 +#define TEGRA194_RESET_SE 86 +#define TEGRA194_RESET_SOR0 87 +#define TEGRA194_RESET_SOR1 88 +#define TEGRA194_RESET_SOR2 89 +#define TEGRA194_RESET_SOR3 90 +#define TEGRA194_RESET_SPI1 91 +#define TEGRA194_RESET_SPI2 92 +#define TEGRA194_RESET_SPI3 93 +#define TEGRA194_RESET_SPI4 94 +#define TEGRA194_RESET_TACH 95 +#define TEGRA194_RESET_RSVD_96 96 +#define TEGRA194_RESET_TSCTNVI 97 +#define TEGRA194_RESET_TSEC 98 +#define TEGRA194_RESET_TSECB 99 +#define TEGRA194_RESET_UARTA 100 +#define TEGRA194_RESET_UARTB 101 +#define TEGRA194_RESET_UARTC 102 +#define TEGRA194_RESET_UARTD 103 +#define TEGRA194_RESET_UARTE 104 +#define TEGRA194_RESET_UARTF 105 +#define TEGRA194_RESET_UARTG 106 +#define TEGRA194_RESET_UARTH 107 +#define TEGRA194_RESET_UFSHC 108 +#define TEGRA194_RESET_UFSHC_AXI_M 109 +#define TEGRA194_RESET_UFSHC_LP_SEQ 110 +#define TEGRA194_RESET_RSVD_111 111 +#define TEGRA194_RESET_VI 112 +#define TEGRA194_RESET_VIC 113 +#define TEGRA194_RESET_XUSB_PADCTL 114 +#define TEGRA194_RESET_NVDEC1 115 +#define TEGRA194_RESET_PEX0_CORE_0 116 +#define TEGRA194_RESET_PEX0_CORE_1 117 +#define TEGRA194_RESET_PEX0_CORE_2 118 +#define TEGRA194_RESET_PEX0_CORE_3 119 +#define TEGRA194_RESET_PEX0_CORE_4 120 +#define TEGRA194_RESET_PEX0_CORE_0_APB 121 +#define TEGRA194_RESET_PEX0_CORE_1_APB 122 +#define TEGRA194_RESET_PEX0_CORE_2_APB 123 +#define TEGRA194_RESET_PEX0_CORE_3_APB 124 +#define TEGRA194_RESET_PEX0_CORE_4_APB 125 +#define TEGRA194_RESET_PEX0_COMMON_APB 126 +#define TEGRA194_RESET_PEX1_CORE_5 129 +#define TEGRA194_RESET_PEX1_CORE_5_APB 130 +#define TEGRA194_RESET_CVNAS 131 +#define TEGRA194_RESET_CVNAS_FCM 132 +#define TEGRA194_RESET_DMIC5 144 +#define TEGRA194_RESET_APE 145 +#define TEGRA194_RESET_PEX_USB_UPHY 146 +#define TEGRA194_RESET_PEX_USB_UPHY_L0 147 +#define TEGRA194_RESET_PEX_USB_UPHY_L1 148 +#define TEGRA194_RESET_PEX_USB_UPHY_L2 149 +#define TEGRA194_RESET_PEX_USB_UPHY_L3 150 +#define TEGRA194_RESET_PEX_USB_UPHY_L4 151 +#define TEGRA194_RESET_PEX_USB_UPHY_L5 152 +#define TEGRA194_RESET_PEX_USB_UPHY_L6 153 +#define TEGRA194_RESET_PEX_USB_UPHY_L7 154 +#define TEGRA194_RESET_PEX_USB_UPHY_L8 155 +#define TEGRA194_RESET_PEX_USB_UPHY_L9 156 +#define TEGRA194_RESET_PEX_USB_UPHY_L10 157 +#define TEGRA194_RESET_PEX_USB_UPHY_L11 158 +#define TEGRA194_RESET_PEX_USB_UPHY_PLL0 159 +#define TEGRA194_RESET_PEX_USB_UPHY_PLL1 160 +#define TEGRA194_RESET_PEX_USB_UPHY_PLL2 161 +#define TEGRA194_RESET_PEX_USB_UPHY_PLL3 162 + +#endif diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 02924ae2527e..24f03941ada8 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -57,11 +57,15 @@ struct vgic_global { /* Physical address of vgic virtual cpu interface */ phys_addr_t vcpu_base; - /* GICV mapping */ + /* GICV mapping, kernel VA */ void __iomem *vcpu_base_va; + /* GICV mapping, HYP VA */ + void __iomem *vcpu_hyp_va; - /* virtual control interface mapping */ + /* virtual control interface mapping, kernel VA */ void __iomem *vctrl_base; + /* virtual control interface mapping, HYP VA */ + void __iomem *vctrl_hyp; /* Number of implemented list registers */ int nr_lr; @@ -209,10 +213,6 @@ struct vgic_dist { int nr_spis; - /* TODO: Consider moving to global state */ - /* Virtual control interface mapping */ - void __iomem *vctrl_base; - /* base addresses in guest physical address space: */ gpa_t vgic_dist_base; /* distributor */ union { @@ -263,7 +263,6 @@ struct vgic_dist { struct vgic_v2_cpu_if { u32 vgic_hcr; u32 vgic_vmcr; - u64 vgic_elrsr; /* Saved only */ u32 vgic_apr; u32 vgic_lr[VGIC_V2_MAX_LRS]; }; @@ -272,7 +271,6 @@ struct vgic_v3_cpu_if { u32 vgic_hcr; u32 vgic_vmcr; u32 vgic_sre; /* Restored only, change ignored */ - u32 vgic_elrsr; /* Saved only */ u32 vgic_ap0r[4]; u32 vgic_ap1r[4]; u64 vgic_lr[VGIC_V3_MAX_LRS]; diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 2f7a29242b87..38cd77b39a64 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -26,7 +26,8 @@ #define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL) #define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL) -int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node); +int iort_register_domain_token(int trans_id, phys_addr_t base, + struct fwnode_handle *fw_node); void iort_deregister_domain_token(int trans_id); struct fwnode_handle *iort_find_domain_token(int trans_id); #ifdef CONFIG_ACPI_IORT @@ -38,6 +39,7 @@ int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); /* IOMMU interface */ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size); const struct iommu_ops *iort_iommu_configure(struct device *dev); +int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); #else static inline void acpi_iort_init(void) { } static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) @@ -52,6 +54,9 @@ static inline void iort_dma_setup(struct device *dev, u64 *dma_addr, static inline const struct iommu_ops *iort_iommu_configure( struct device *dev) { return NULL; } +static inline +int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) +{ return 0; } #endif #endif /* __ACPI_IORT_H__ */ diff --git a/include/linux/audit.h b/include/linux/audit.h index af410d9fbf2d..75d5b031e802 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -146,8 +146,7 @@ extern void audit_log_d_path(struct audit_buffer *ab, const struct path *path); extern void audit_log_key(struct audit_buffer *ab, char *key); -extern void audit_log_link_denied(const char *operation, - const struct path *link); +extern void audit_log_link_denied(const char *operation); extern void audit_log_lost(const char *message); extern int audit_log_task_context(struct audit_buffer *ab); @@ -194,8 +193,7 @@ static inline void audit_log_d_path(struct audit_buffer *ab, { } static inline void audit_log_key(struct audit_buffer *ab, char *key) { } -static inline void audit_log_link_denied(const char *string, - const struct path *link) +static inline void audit_log_link_denied(const char *string) { } static inline int audit_log_task_context(struct audit_buffer *ab) { diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 0e9c0f71f726..f6be4b0b6c18 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -176,7 +176,7 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) } long congestion_wait(int sync, long timeout); -long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout); +long wait_iff_congested(int sync, long timeout); static inline bool bdi_cap_synchronous_io(struct backing_dev_info *bdi) { @@ -330,7 +330,7 @@ static inline bool inode_to_wb_is_valid(struct inode *inode) * @inode: inode of interest * * Returns the wb @inode is currently associated with. The caller must be - * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the + * holding either @inode->i_lock, the i_pages lock, or the * associated wb's list_lock. */ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) @@ -338,7 +338,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(debug_locks && (!lockdep_is_held(&inode->i_lock) && - !lockdep_is_held(&inode->i_mapping->tree_lock) && + !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && !lockdep_is_held(&inode->i_wb->list_lock))); #endif return inode->i_wb; @@ -350,7 +350,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) * @lockedp: temp bool output param, to be passed to the end function * * The caller wants to access the wb associated with @inode but isn't - * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This + * holding inode->i_lock, the i_pages lock or wb->list_lock. This * function determines the wb associated with @inode and ensures that the * association doesn't change until the transaction is finished with * unlocked_inode_to_wb_end(). @@ -371,11 +371,11 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; if (unlikely(*lockedp)) - spin_lock_irq(&inode->i_mapping->tree_lock); + xa_lock_irq(&inode->i_mapping->i_pages); /* - * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock. - * inode_to_wb() will bark. Deref directly. + * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages + * lock. inode_to_wb() will bark. Deref directly. */ return inode->i_wb; } @@ -388,7 +388,7 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) { if (unlikely(locked)) - spin_unlock_irq(&inode->i_mapping->tree_lock); + xa_unlock_irq(&inode->i_mapping->i_pages); rcu_read_unlock(); } diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index b0abe21d6cc9..4955e0863b83 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -61,6 +61,8 @@ struct linux_binprm { unsigned interp_flags; unsigned interp_data; unsigned long loader, exec; + + struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */ } __randomize_layout; #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 @@ -118,6 +120,7 @@ extern int __must_check remove_arg_zero(struct linux_binprm *); extern int search_binary_handler(struct linux_binprm *); extern int flush_old_exec(struct linux_binprm * bprm); extern void setup_new_exec(struct linux_binprm * bprm); +extern void finalize_exec(struct linux_binprm *bprm); extern void would_dump(struct linux_binprm *, struct file *); extern int suid_dumpable; diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 59042d5ac520..3901927cf6a0 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -204,6 +204,7 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \ CEPH_FEATURE_MSGR_KEEPALIVE2 | \ CEPH_FEATURE_OSD_POOLRESEND | \ + CEPH_FEATURE_MDS_QUOTA | \ CEPH_FEATURE_CRUSH_V4 | \ CEPH_FEATURE_NEW_OSDOP_ENCODING | \ CEPH_FEATURE_SERVER_JEWEL | \ diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 88dd51381aaf..7ecfc88314d8 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -134,6 +134,7 @@ struct ceph_dir_layout { #define CEPH_MSG_CLIENT_LEASE 0x311 #define CEPH_MSG_CLIENT_SNAP 0x312 #define CEPH_MSG_CLIENT_CAPRELEASE 0x313 +#define CEPH_MSG_CLIENT_QUOTA 0x314 /* pool ops */ #define CEPH_MSG_POOLOP_REPLY 48 @@ -807,4 +808,20 @@ struct ceph_mds_snap_realm { } __attribute__ ((packed)); /* followed by my snap list, then prior parent snap list */ +/* + * quotas + */ +struct ceph_mds_quota { + __le64 ino; /* ino */ + struct ceph_timespec rctime; + __le64 rbytes; /* dir stats */ + __le64 rfiles; + __le64 rsubdirs; + __u8 struct_v; /* compat */ + __u8 struct_compat; + __le32 struct_len; + __le64 max_bytes; /* quota max. bytes */ + __le64 max_files; /* quota max. files */ +} __attribute__ ((packed)); + #endif diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index c2ec44cf5098..49c93b9308d7 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -262,6 +262,7 @@ extern struct kmem_cache *ceph_cap_cachep; extern struct kmem_cache *ceph_cap_flush_cachep; extern struct kmem_cache *ceph_dentry_cachep; extern struct kmem_cache *ceph_file_cachep; +extern struct kmem_cache *ceph_dir_file_cachep; /* ceph_common.c */ extern bool libceph_compatible(void *data); diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index ead9d85f1c11..c7dfcb8a1fb2 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -76,6 +76,7 @@ enum ceph_msg_data_type { #ifdef CONFIG_BLOCK CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */ #endif /* CONFIG_BLOCK */ + CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */ }; static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) @@ -87,22 +88,106 @@ static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) #ifdef CONFIG_BLOCK case CEPH_MSG_DATA_BIO: #endif /* CONFIG_BLOCK */ + case CEPH_MSG_DATA_BVECS: return true; default: return false; } } +#ifdef CONFIG_BLOCK + +struct ceph_bio_iter { + struct bio *bio; + struct bvec_iter iter; +}; + +#define __ceph_bio_iter_advance_step(it, n, STEP) do { \ + unsigned int __n = (n), __cur_n; \ + \ + while (__n) { \ + BUG_ON(!(it)->iter.bi_size); \ + __cur_n = min((it)->iter.bi_size, __n); \ + (void)(STEP); \ + bio_advance_iter((it)->bio, &(it)->iter, __cur_n); \ + if (!(it)->iter.bi_size && (it)->bio->bi_next) { \ + dout("__ceph_bio_iter_advance_step next bio\n"); \ + (it)->bio = (it)->bio->bi_next; \ + (it)->iter = (it)->bio->bi_iter; \ + } \ + __n -= __cur_n; \ + } \ +} while (0) + +/* + * Advance @it by @n bytes. + */ +#define ceph_bio_iter_advance(it, n) \ + __ceph_bio_iter_advance_step(it, n, 0) + +/* + * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec. + */ +#define ceph_bio_iter_advance_step(it, n, BVEC_STEP) \ + __ceph_bio_iter_advance_step(it, n, ({ \ + struct bio_vec bv; \ + struct bvec_iter __cur_iter; \ + \ + __cur_iter = (it)->iter; \ + __cur_iter.bi_size = __cur_n; \ + __bio_for_each_segment(bv, (it)->bio, __cur_iter, __cur_iter) \ + (void)(BVEC_STEP); \ + })) + +#endif /* CONFIG_BLOCK */ + +struct ceph_bvec_iter { + struct bio_vec *bvecs; + struct bvec_iter iter; +}; + +#define __ceph_bvec_iter_advance_step(it, n, STEP) do { \ + BUG_ON((n) > (it)->iter.bi_size); \ + (void)(STEP); \ + bvec_iter_advance((it)->bvecs, &(it)->iter, (n)); \ +} while (0) + +/* + * Advance @it by @n bytes. + */ +#define ceph_bvec_iter_advance(it, n) \ + __ceph_bvec_iter_advance_step(it, n, 0) + +/* + * Advance @it by @n bytes, executing BVEC_STEP for each bio_vec. + */ +#define ceph_bvec_iter_advance_step(it, n, BVEC_STEP) \ + __ceph_bvec_iter_advance_step(it, n, ({ \ + struct bio_vec bv; \ + struct bvec_iter __cur_iter; \ + \ + __cur_iter = (it)->iter; \ + __cur_iter.bi_size = (n); \ + for_each_bvec(bv, (it)->bvecs, __cur_iter, __cur_iter) \ + (void)(BVEC_STEP); \ + })) + +#define ceph_bvec_iter_shorten(it, n) do { \ + BUG_ON((n) > (it)->iter.bi_size); \ + (it)->iter.bi_size = (n); \ +} while (0) + struct ceph_msg_data { struct list_head links; /* ceph_msg->data */ enum ceph_msg_data_type type; union { #ifdef CONFIG_BLOCK struct { - struct bio *bio; - size_t bio_length; + struct ceph_bio_iter bio_pos; + u32 bio_length; }; #endif /* CONFIG_BLOCK */ + struct ceph_bvec_iter bvec_pos; struct { struct page **pages; /* NOT OWNER. */ size_t length; /* total # bytes */ @@ -122,11 +207,9 @@ struct ceph_msg_data_cursor { bool need_crc; /* crc update needed */ union { #ifdef CONFIG_BLOCK - struct { /* bio */ - struct bio *bio; /* bio from list */ - struct bvec_iter bvec_iter; - }; + struct ceph_bio_iter bio_iter; #endif /* CONFIG_BLOCK */ + struct bvec_iter bvec_iter; struct { /* pages */ unsigned int page_offset; /* offset in page */ unsigned short page_index; /* index in array */ @@ -290,9 +373,11 @@ extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg, struct ceph_pagelist *pagelist); #ifdef CONFIG_BLOCK -extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, - size_t length); +void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos, + u32 length); #endif /* CONFIG_BLOCK */ +void ceph_msg_data_add_bvecs(struct ceph_msg *msg, + struct ceph_bvec_iter *bvec_pos); extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, bool can_fail); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 52fb37d1c2a5..528ccc943cee 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -57,6 +57,7 @@ enum ceph_osd_data_type { #ifdef CONFIG_BLOCK CEPH_OSD_DATA_TYPE_BIO, #endif /* CONFIG_BLOCK */ + CEPH_OSD_DATA_TYPE_BVECS, }; struct ceph_osd_data { @@ -72,10 +73,11 @@ struct ceph_osd_data { struct ceph_pagelist *pagelist; #ifdef CONFIG_BLOCK struct { - struct bio *bio; /* list of bios */ - size_t bio_length; /* total in list */ + struct ceph_bio_iter bio_pos; + u32 bio_length; }; #endif /* CONFIG_BLOCK */ + struct ceph_bvec_iter bvec_pos; }; }; @@ -405,10 +407,14 @@ extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *, unsigned int which, struct ceph_pagelist *pagelist); #ifdef CONFIG_BLOCK -extern void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *, - unsigned int which, - struct bio *bio, size_t bio_length); +void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, + unsigned int which, + struct ceph_bio_iter *bio_pos, + u32 bio_length); #endif /* CONFIG_BLOCK */ +void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req, + unsigned int which, + struct ceph_bvec_iter *bvec_pos); extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *, unsigned int which, @@ -418,6 +424,9 @@ extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages); +void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req, + unsigned int which, + struct bio_vec *bvecs, u32 bytes); extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, unsigned int which, struct page **pages, u64 length, diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index d41fad99c0fa..e71fb222c7c3 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -5,7 +5,6 @@ #include <linux/rbtree.h> #include <linux/ceph/types.h> #include <linux/ceph/decode.h> -#include <linux/ceph/ceph_fs.h> #include <linux/crush/crush.h> /* @@ -280,11 +279,6 @@ bool ceph_osds_changed(const struct ceph_osds *old_acting, const struct ceph_osds *new_acting, bool any_change); -/* calculate mapping of a file extent to an object */ -extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, - u64 off, u64 len, - u64 *bno, u64 *oxoff, u64 *oxlen); - int __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi, const struct ceph_object_id *oid, const struct ceph_object_locator *oloc, diff --git a/include/linux/ceph/striper.h b/include/linux/ceph/striper.h new file mode 100644 index 000000000000..cbd0d24b7148 --- /dev/null +++ b/include/linux/ceph/striper.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CEPH_STRIPER_H +#define _LINUX_CEPH_STRIPER_H + +#include <linux/list.h> +#include <linux/types.h> + +struct ceph_file_layout; + +void ceph_calc_file_object_mapping(struct ceph_file_layout *l, + u64 off, u64 len, + u64 *objno, u64 *objoff, u32 *xlen); + +struct ceph_object_extent { + struct list_head oe_item; + u64 oe_objno; + u64 oe_off; + u64 oe_len; +}; + +static inline void ceph_object_extent_init(struct ceph_object_extent *ex) +{ + INIT_LIST_HEAD(&ex->oe_item); +} + +/* + * Called for each mapped stripe unit. + * + * @bytes: number of bytes mapped, i.e. the minimum of the full length + * requested (file extent length) or the remainder of the stripe + * unit within an object + */ +typedef void (*ceph_object_extent_fn_t)(struct ceph_object_extent *ex, + u32 bytes, void *arg); + +int ceph_file_to_extents(struct ceph_file_layout *l, u64 off, u64 len, + struct list_head *object_extents, + struct ceph_object_extent *alloc_fn(void *arg), + void *alloc_arg, + ceph_object_extent_fn_t action_fn, + void *action_arg); +int ceph_iterate_extents(struct ceph_file_layout *l, u64 off, u64 len, + struct list_head *object_extents, + ceph_object_extent_fn_t action_fn, + void *action_arg); + +struct ceph_file_extent { + u64 fe_off; + u64 fe_len; +}; + +static inline u64 ceph_file_extents_bytes(struct ceph_file_extent *file_extents, + u32 num_file_extents) +{ + u64 bytes = 0; + u32 i; + + for (i = 0; i < num_file_extents; i++) + bytes += file_extents[i].fe_len; + + return bytes; +} + +int ceph_extent_to_file(struct ceph_file_layout *l, + u64 objno, u64 objoff, u64 objlen, + struct ceph_file_extent **file_extents, + u32 *num_file_extents); + +#endif diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index d18da839b810..7e3bceee3489 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -203,6 +203,7 @@ enum { TI_CLKM_PRM, TI_CLKM_SCRM, TI_CLKM_CTRL, + TI_CLKM_CTRL_AUX, TI_CLKM_PLLSS, CLK_MAX_MEMMAPS }; diff --git a/include/linux/compat.h b/include/linux/compat.h index 9847c5a013c3..f188eab10570 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -226,6 +226,8 @@ typedef struct compat_siginfo { #ifdef __ARCH_SI_TRAPNO int _trapno; /* TRAP # which caused the signal */ #endif +#define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \ + sizeof(short) : __alignof__(compat_uptr_t)) union { /* * used when si_code=BUS_MCEERR_AR or @@ -234,13 +236,13 @@ typedef struct compat_siginfo { short int _addr_lsb; /* Valid LSB of the reported address. */ /* used when si_code=SEGV_BNDERR */ struct { - compat_uptr_t _dummy_bnd; + char _dummy_bnd[__COMPAT_ADDR_BND_PKEY_PAD]; compat_uptr_t _lower; compat_uptr_t _upper; } _addr_bnd; /* used when si_code=SEGV_PKUERR */ struct { - compat_uptr_t _dummy_pkey; + char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD]; u32 _pkey; } _addr_pkey; }; diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index d3f264a5b04d..ceb96ecab96e 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -17,9 +17,6 @@ */ #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) -#define randomized_struct_fields_start struct { -#define randomized_struct_fields_end }; - /* all clang versions usable with the kernel support KASAN ABI version 5 */ #define KASAN_ABI_VERSION 5 diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index e2c7f4369eff..b4bf73f5e38f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -242,6 +242,9 @@ #if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__) #define __randomize_layout __attribute__((randomize_layout)) #define __no_randomize_layout __attribute__((no_randomize_layout)) +/* This anon struct can add padding, so only enable it under randstruct. */ +#define randomized_struct_fields_start struct { +#define randomized_struct_fields_end } __randomize_layout; #endif #endif /* GCC_VERSION >= 40500 */ @@ -256,15 +259,6 @@ */ #define __visible __attribute__((externally_visible)) -/* - * RANDSTRUCT_PLUGIN wants to use an anonymous struct, but it is only - * possible since GCC 4.6. To provide as much build testing coverage - * as possible, this is used for all GCC 4.6+ builds, and not just on - * RANDSTRUCT_PLUGIN builds. - */ -#define randomized_struct_fields_start struct { -#define randomized_struct_fields_end } __randomize_layout; - #endif /* GCC_VERSION >= 40600 */ diff --git a/include/linux/const.h b/include/linux/const.h new file mode 100644 index 000000000000..7b55a55f5911 --- /dev/null +++ b/include/linux/const.h @@ -0,0 +1,9 @@ +#ifndef _LINUX_CONST_H +#define _LINUX_CONST_H + +#include <uapi/linux/const.h> + +#define UL(x) (_UL(x)) +#define ULL(x) (_ULL(x)) + +#endif /* _LINUX_CONST_H */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 1fe49724da9e..87f48dd932eb 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -960,8 +960,6 @@ extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; extern struct freq_attr *cpufreq_generic_attr[]; -int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, - struct cpufreq_frequency_table *table); int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy); unsigned int cpufreq_generic_get(unsigned int cpu); diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index a806e94c482f..1eefabf1621f 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -135,7 +135,8 @@ extern bool cpuidle_not_available(struct cpuidle_driver *drv, struct cpuidle_device *dev); extern int cpuidle_select(struct cpuidle_driver *drv, - struct cpuidle_device *dev); + struct cpuidle_device *dev, + bool *stop_tick); extern int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index); extern void cpuidle_reflect(struct cpuidle_device *dev, int index); @@ -167,7 +168,7 @@ static inline bool cpuidle_not_available(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return true; } static inline int cpuidle_select(struct cpuidle_driver *drv, - struct cpuidle_device *dev) + struct cpuidle_device *dev, bool *stop_tick) {return -ENODEV; } static inline int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) @@ -250,7 +251,8 @@ struct cpuidle_governor { struct cpuidle_device *dev); int (*select) (struct cpuidle_driver *drv, - struct cpuidle_device *dev); + struct cpuidle_device *dev, + bool *stop_tick); void (*reflect) (struct cpuidle_device *dev, int index); }; diff --git a/include/linux/dax.h b/include/linux/dax.h index 0185ecdae135..f9eb22ad341e 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -26,18 +26,42 @@ extern struct attribute_group dax_attribute_group; #if IS_ENABLED(CONFIG_DAX) struct dax_device *dax_get_by_host(const char *host); +struct dax_device *alloc_dax(void *private, const char *host, + const struct dax_operations *ops); void put_dax(struct dax_device *dax_dev); +void kill_dax(struct dax_device *dax_dev); +void dax_write_cache(struct dax_device *dax_dev, bool wc); +bool dax_write_cache_enabled(struct dax_device *dax_dev); #else static inline struct dax_device *dax_get_by_host(const char *host) { return NULL; } - +static inline struct dax_device *alloc_dax(void *private, const char *host, + const struct dax_operations *ops) +{ + /* + * Callers should check IS_ENABLED(CONFIG_DAX) to know if this + * NULL is an error or expected. + */ + return NULL; +} static inline void put_dax(struct dax_device *dax_dev) { } +static inline void kill_dax(struct dax_device *dax_dev) +{ +} +static inline void dax_write_cache(struct dax_device *dax_dev, bool wc) +{ +} +static inline bool dax_write_cache_enabled(struct dax_device *dax_dev) +{ + return false; +} #endif +struct writeback_control; int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); #if IS_ENABLED(CONFIG_FS_DAX) int __bdev_dax_supported(struct super_block *sb, int blocksize); @@ -57,6 +81,8 @@ static inline void fs_put_dax(struct dax_device *dax_dev) } struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); +int dax_writeback_mapping_range(struct address_space *mapping, + struct block_device *bdev, struct writeback_control *wbc); #else static inline int bdev_dax_supported(struct super_block *sb, int blocksize) { @@ -76,22 +102,23 @@ static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) { return NULL; } + +static inline int dax_writeback_mapping_range(struct address_space *mapping, + struct block_device *bdev, struct writeback_control *wbc) +{ + return -EOPNOTSUPP; +} #endif int dax_read_lock(void); void dax_read_unlock(int id); -struct dax_device *alloc_dax(void *private, const char *host, - const struct dax_operations *ops); bool dax_alive(struct dax_device *dax_dev); -void kill_dax(struct dax_device *dax_dev); void *dax_get_private(struct dax_device *dax_dev); long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn); size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); -void dax_write_cache(struct dax_device *dax_dev, bool wc); -bool dax_write_cache_enabled(struct dax_device *dax_dev); ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops); @@ -121,7 +148,4 @@ static inline bool dax_mapping(struct address_space *mapping) return mapping->host && IS_DAX(mapping->host); } -struct writeback_control; -int dax_writeback_mapping_range(struct address_space *mapping, - struct block_device *bdev, struct writeback_control *wbc); #endif diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 4384433b50e7..31fef7c34185 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -87,10 +87,10 @@ typedef void (*dm_resume_fn) (struct dm_target *ti); typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, unsigned status_flags, char *result, unsigned maxlen); -typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); +typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv, + char *result, unsigned maxlen); -typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, - struct block_device **bdev, fmode_t *mode); +typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); /* * These iteration functions are typically used to check (and combine) @@ -267,6 +267,12 @@ struct dm_target { unsigned num_discard_bios; /* + * The number of secure erase bios that will be submitted to the target. + * The bio number can be accessed with dm_bio_get_target_bio_nr. + */ + unsigned num_secure_erase_bios; + + /* * The number of WRITE SAME bios that will be submitted to the target. * The bio number can be accessed with dm_bio_get_target_bio_nr. */ diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h new file mode 100644 index 000000000000..3c8b7d274bd9 --- /dev/null +++ b/include/linux/dm-bufio.h @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2009-2011 Red Hat, Inc. + * + * Author: Mikulas Patocka <mpatocka@redhat.com> + * + * This file is released under the GPL. + */ + +#ifndef _LINUX_DM_BUFIO_H +#define _LINUX_DM_BUFIO_H + +#include <linux/blkdev.h> +#include <linux/types.h> + +/*----------------------------------------------------------------*/ + +struct dm_bufio_client; +struct dm_buffer; + +/* + * Create a buffered IO cache on a given device + */ +struct dm_bufio_client * +dm_bufio_client_create(struct block_device *bdev, unsigned block_size, + unsigned reserved_buffers, unsigned aux_size, + void (*alloc_callback)(struct dm_buffer *), + void (*write_callback)(struct dm_buffer *)); + +/* + * Release a buffered IO cache. + */ +void dm_bufio_client_destroy(struct dm_bufio_client *c); + +/* + * Set the sector range. + * When this function is called, there must be no I/O in progress on the bufio + * client. + */ +void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start); + +/* + * WARNING: to avoid deadlocks, these conditions are observed: + * + * - At most one thread can hold at most "reserved_buffers" simultaneously. + * - Each other threads can hold at most one buffer. + * - Threads which call only dm_bufio_get can hold unlimited number of + * buffers. + */ + +/* + * Read a given block from disk. Returns pointer to data. Returns a + * pointer to dm_buffer that can be used to release the buffer or to make + * it dirty. + */ +void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, + struct dm_buffer **bp); + +/* + * Like dm_bufio_read, but return buffer from cache, don't read + * it. If the buffer is not in the cache, return NULL. + */ +void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, + struct dm_buffer **bp); + +/* + * Like dm_bufio_read, but don't read anything from the disk. It is + * expected that the caller initializes the buffer and marks it dirty. + */ +void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, + struct dm_buffer **bp); + +/* + * Prefetch the specified blocks to the cache. + * The function starts to read the blocks and returns without waiting for + * I/O to finish. + */ +void dm_bufio_prefetch(struct dm_bufio_client *c, + sector_t block, unsigned n_blocks); + +/* + * Release a reference obtained with dm_bufio_{read,get,new}. The data + * pointer and dm_buffer pointer is no longer valid after this call. + */ +void dm_bufio_release(struct dm_buffer *b); + +/* + * Mark a buffer dirty. It should be called after the buffer is modified. + * + * In case of memory pressure, the buffer may be written after + * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers. So + * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but + * the actual writing may occur earlier. + */ +void dm_bufio_mark_buffer_dirty(struct dm_buffer *b); + +/* + * Mark a part of the buffer dirty. + * + * The specified part of the buffer is scheduled to be written. dm-bufio may + * write the specified part of the buffer or it may write a larger superset. + */ +void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, + unsigned start, unsigned end); + +/* + * Initiate writing of dirty buffers, without waiting for completion. + */ +void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c); + +/* + * Write all dirty buffers. Guarantees that all dirty buffers created prior + * to this call are on disk when this call exits. + */ +int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c); + +/* + * Send an empty write barrier to the device to flush hardware disk cache. + */ +int dm_bufio_issue_flush(struct dm_bufio_client *c); + +/* + * Like dm_bufio_release but also move the buffer to the new + * block. dm_bufio_write_dirty_buffers is needed to commit the new block. + */ +void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block); + +/* + * Free the given buffer. + * This is just a hint, if the buffer is in use or dirty, this function + * does nothing. + */ +void dm_bufio_forget(struct dm_bufio_client *c, sector_t block); + +/* + * Set the minimum number of buffers before cleanup happens. + */ +void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n); + +unsigned dm_bufio_get_block_size(struct dm_bufio_client *c); +sector_t dm_bufio_get_device_size(struct dm_bufio_client *c); +sector_t dm_bufio_get_block_number(struct dm_buffer *b); +void *dm_bufio_get_block_data(struct dm_buffer *b); +void *dm_bufio_get_aux_data(struct dm_buffer *b); +struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b); + +/*----------------------------------------------------------------*/ + +#endif diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index f838764993eb..861be5cab1df 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -470,7 +470,11 @@ typedef void (*dma_async_tx_callback_result)(void *dma_async_param, const struct dmaengine_result *result); struct dmaengine_unmap_data { +#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) + u16 map_cnt; +#else u8 map_cnt; +#endif u8 to_cnt; u8 from_cnt; u8 bidi_cnt; diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 58aecb60ea51..aa5db8b5521a 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -21,6 +21,7 @@ #define F2FS_BLKSIZE 4096 /* support only 4KB block */ #define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */ #define F2FS_MAX_EXTENSION 64 /* # of extension entries */ +#define F2FS_EXTENSION_LEN 8 /* max size of extension */ #define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS) #define NULL_ADDR ((block_t)0) /* used as block_t addresses */ @@ -38,15 +39,14 @@ #define F2FS_MAX_QUOTAS 3 -#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */ -#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */ -#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */ -#define F2FS_IO_SIZE_BITS(sbi) ((sbi)->write_io_size_bits) /* power of 2 */ +#define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */ +#define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */ +#define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */ +#define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */ #define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1) /* This flag is used by node and meta inodes, and by recovery */ #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) -#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM) /* * For further optimization on multi-head logs, on-disk layout supports maximum @@ -102,7 +102,7 @@ struct f2fs_super_block { __u8 uuid[16]; /* 128-bit uuid for volume */ __le16 volume_name[MAX_VOLUME_NAME]; /* volume name */ __le32 extension_count; /* # of extensions below */ - __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */ + __u8 extension_list[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];/* extension array */ __le32 cp_payload; __u8 version[VERSION_LEN]; /* the kernel version */ __u8 init_version[VERSION_LEN]; /* the initial kernel version */ @@ -111,12 +111,14 @@ struct f2fs_super_block { __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */ struct f2fs_device devs[MAX_DEVICES]; /* device list */ __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */ - __u8 reserved[315]; /* valid reserved region */ + __u8 hot_ext_count; /* # of hot file extension */ + __u8 reserved[314]; /* valid reserved region */ } __packed; /* * For checkpoint */ +#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400 #define CP_NOCRC_RECOVERY_FLAG 0x00000200 #define CP_TRIMMED_FLAG 0x00000100 #define CP_NAT_BITS_FLAG 0x00000080 @@ -303,6 +305,10 @@ struct f2fs_node { */ #define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry)) #define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8) +#define NAT_ENTRY_BITMAP_SIZE_ALIGNED \ + ((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) / \ + BITS_PER_LONG * BITS_PER_LONG) + struct f2fs_nat_entry { __u8 version; /* latest version of cached nat entry */ diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index c3c95d18bf43..7e6c77740413 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -64,10 +64,11 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name, struct kmem_cache; +int should_failslab(struct kmem_cache *s, gfp_t gfpflags); #ifdef CONFIG_FAILSLAB -extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags); +extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags); #else -static inline bool should_failslab(struct kmem_cache *s, gfp_t gfpflags) +static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) { return false; } diff --git a/include/linux/fs.h b/include/linux/fs.h index 070807ce3e41..760d8da1b6c7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -13,6 +13,7 @@ #include <linux/list_lru.h> #include <linux/llist.h> #include <linux/radix-tree.h> +#include <linux/xarray.h> #include <linux/rbtree.h> #include <linux/init.h> #include <linux/pid.h> @@ -390,12 +391,11 @@ int pagecache_write_end(struct file *, struct address_space *mapping, struct address_space { struct inode *host; /* owner: inode, block_device */ - struct radix_tree_root page_tree; /* radix tree of all pages */ - spinlock_t tree_lock; /* and lock protecting it */ + struct radix_tree_root i_pages; /* cached pages */ atomic_t i_mmap_writable;/* count VM_SHARED mappings */ struct rb_root_cached i_mmap; /* tree of private and shared mappings */ struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */ - /* Protected by tree_lock together with the radix tree */ + /* Protected by the i_pages lock */ unsigned long nrpages; /* number of total pages */ /* number of shadow or DAX exceptional entries */ unsigned long nrexceptional; @@ -1321,6 +1321,8 @@ extern int send_sigurg(struct fown_struct *fown); /* sb->s_iflags to limit user namespace mounts */ #define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ +#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 +#define SB_I_UNTRUSTED_MOUNTER 0x00000040 /* Possible states of 'frozen' field */ enum { @@ -1665,7 +1667,7 @@ typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned); struct dir_context { - const filldir_t actor; + filldir_t actor; loff_t pos; }; @@ -1987,7 +1989,7 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) * * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to * synchronize competing switching instances and to tell - * wb stat updates to grab mapping->tree_lock. See + * wb stat updates to grab the i_pages lock. See * inode_switch_wb_work_fn() for details. * * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper @@ -2015,7 +2017,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) #define I_WB_SWITCH (1 << 13) #define I_OVL_INUSE (1 << 14) -#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) +#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) +#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) #define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME) extern void __mark_inode_dirty(struct inode *, int); @@ -2381,8 +2384,8 @@ struct audit_names; struct filename { const char *name; /* pointer to actual string */ const __user char *uptr; /* original userland pointer */ - struct audit_names *aname; int refcnt; + struct audit_names *aname; const char iname[]; }; @@ -2442,6 +2445,7 @@ extern int sync_blockdev(struct block_device *bdev); extern void kill_bdev(struct block_device *); extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_all(void); +extern void emergency_thaw_bdev(struct super_block *sb); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); extern int fsync_bdev(struct block_device *); @@ -2467,6 +2471,11 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) return 0; } +static inline int emergency_thaw_bdev(struct super_block *sb) +{ + return 0; +} + static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg) { } @@ -3124,6 +3133,10 @@ extern int simple_rmdir(struct inode *, struct dentry *); extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); extern int noop_fsync(struct file *, loff_t, loff_t, int); +extern int noop_set_page_dirty(struct page *page); +extern void noop_invalidatepage(struct page *page, unsigned int offset, + unsigned int length); +extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter); extern int simple_empty(struct dentry *); extern int simple_readpage(struct file *file, struct page *page); extern int simple_write_begin(struct file *file, struct address_space *mapping, diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 3b03e29e2f1a..34cf0fdd7dc7 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -29,6 +29,18 @@ struct fscache_cache_ops; struct fscache_object; struct fscache_operation; +enum fscache_obj_ref_trace { + fscache_obj_get_add_to_deps, + fscache_obj_get_queue, + fscache_obj_put_alloc_fail, + fscache_obj_put_attach_fail, + fscache_obj_put_drop_obj, + fscache_obj_put_enq_dep, + fscache_obj_put_queue, + fscache_obj_put_work, + fscache_obj_ref__nr_traces +}; + /* * cache tag definition */ @@ -123,7 +135,8 @@ extern void fscache_op_work_func(struct work_struct *work); extern void fscache_enqueue_operation(struct fscache_operation *); extern void fscache_op_complete(struct fscache_operation *, bool); extern void fscache_put_operation(struct fscache_operation *); -extern void fscache_operation_init(struct fscache_operation *, +extern void fscache_operation_init(struct fscache_cookie *, + struct fscache_operation *, fscache_operation_processor_t, fscache_operation_cancel_t, fscache_operation_release_t); @@ -185,7 +198,7 @@ static inline void fscache_retrieval_complete(struct fscache_retrieval *op, { atomic_sub(n_pages, &op->n_pages); if (atomic_read(&op->n_pages) <= 0) - fscache_op_complete(&op->op, true); + fscache_op_complete(&op->op, false); } /** @@ -231,7 +244,8 @@ struct fscache_cache_ops { void (*lookup_complete)(struct fscache_object *object); /* increment the usage count on this object (may fail if unmounting) */ - struct fscache_object *(*grab_object)(struct fscache_object *object); + struct fscache_object *(*grab_object)(struct fscache_object *object, + enum fscache_obj_ref_trace why); /* pin an object in the cache */ int (*pin_object)(struct fscache_object *object); @@ -254,7 +268,8 @@ struct fscache_cache_ops { void (*drop_object)(struct fscache_object *object); /* dispose of a reference to an object */ - void (*put_object)(struct fscache_object *object); + void (*put_object)(struct fscache_object *object, + enum fscache_obj_ref_trace why); /* sync a cache */ void (*sync_cache)(struct fscache_cache *cache); @@ -538,7 +553,8 @@ extern bool fscache_object_sleep_till_congested(signed long *timeoutp); extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, const void *data, - uint16_t datalen); + uint16_t datalen, + loff_t object_size); extern void fscache_object_retrying_stale(struct fscache_object *object); diff --git a/include/linux/fscache.h b/include/linux/fscache.h index fe0c349684fa..84b90a79d75a 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -22,6 +22,7 @@ #include <linux/list.h> #include <linux/pagemap.h> #include <linux/pagevec.h> +#include <linux/list_bl.h> #if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE) #define fscache_available() (1) @@ -83,45 +84,15 @@ struct fscache_cookie_def { const void *parent_netfs_data, const void *cookie_netfs_data); - /* get an index key - * - should store the key data in the buffer - * - should return the amount of data stored - * - not permitted to return an error - * - the netfs data from the cookie being used as the source is - * presented - */ - uint16_t (*get_key)(const void *cookie_netfs_data, - void *buffer, - uint16_t bufmax); - - /* get certain file attributes from the netfs data - * - this function can be absent for an index - * - not permitted to return an error - * - the netfs data from the cookie being used as the source is - * presented - */ - void (*get_attr)(const void *cookie_netfs_data, uint64_t *size); - - /* get the auxiliary data from netfs data - * - this function can be absent if the index carries no state data - * - should store the auxiliary data in the buffer - * - should return the amount of amount stored - * - not permitted to return an error - * - the netfs data from the cookie being used as the source is - * presented - */ - uint16_t (*get_aux)(const void *cookie_netfs_data, - void *buffer, - uint16_t bufmax); - /* consult the netfs about the state of an object * - this function can be absent if the index carries no state data * - the netfs data from the cookie being used as the target is - * presented, as is the auxiliary data + * presented, as is the auxiliary data and the object size */ enum fscache_checkaux (*check_aux)(void *cookie_netfs_data, const void *data, - uint16_t datalen); + uint16_t datalen, + loff_t object_size); /* get an extra reference on a read context * - this function can be absent if the completion function doesn't @@ -154,7 +125,6 @@ struct fscache_netfs { uint32_t version; /* indexing version */ const char *name; /* filesystem name */ struct fscache_cookie *primary_index; - struct list_head link; /* internal link */ }; /* @@ -173,6 +143,7 @@ struct fscache_cookie { struct hlist_head backing_objects; /* object(s) backing this file/index */ const struct fscache_cookie_def *def; /* definition */ struct fscache_cookie *parent; /* parent of this entry */ + struct hlist_bl_node hash_link; /* Link in hash table */ void *netfs_data; /* back pointer to netfs */ struct radix_tree_root stores; /* pages to be stored on this cookie */ #define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ @@ -186,6 +157,22 @@ struct fscache_cookie { #define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */ #define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */ #define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */ +#define FSCACHE_COOKIE_AUX_UPDATED 8 /* T if the auxiliary data was updated */ +#define FSCACHE_COOKIE_ACQUIRED 9 /* T if cookie is in use */ +#define FSCACHE_COOKIE_RELINQUISHING 10 /* T if cookie is being relinquished */ + + u8 type; /* Type of object */ + u8 key_len; /* Length of index key */ + u8 aux_len; /* Length of auxiliary data */ + u32 key_hash; /* Hash of parent, type, key, len */ + union { + void *key; /* Index key */ + u8 inline_key[16]; /* - If the key is short enough */ + }; + union { + void *aux; /* Auxiliary data */ + u8 inline_aux[8]; /* - If the aux data is short enough */ + }; }; static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie) @@ -208,10 +195,12 @@ extern void __fscache_release_cache_tag(struct fscache_cache_tag *); extern struct fscache_cookie *__fscache_acquire_cookie( struct fscache_cookie *, const struct fscache_cookie_def *, - void *, bool); -extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool); -extern int __fscache_check_consistency(struct fscache_cookie *); -extern void __fscache_update_cookie(struct fscache_cookie *); + const void *, size_t, + const void *, size_t, + void *, loff_t, bool); +extern void __fscache_relinquish_cookie(struct fscache_cookie *, const void *, bool); +extern int __fscache_check_consistency(struct fscache_cookie *, const void *); +extern void __fscache_update_cookie(struct fscache_cookie *, const void *); extern int __fscache_attr_changed(struct fscache_cookie *); extern void __fscache_invalidate(struct fscache_cookie *); extern void __fscache_wait_on_invalidate(struct fscache_cookie *); @@ -228,7 +217,7 @@ extern int __fscache_read_or_alloc_pages(struct fscache_cookie *, void *, gfp_t); extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t); -extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t); +extern int __fscache_write_page(struct fscache_cookie *, struct page *, loff_t, gfp_t); extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); @@ -238,8 +227,8 @@ extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *, struct inode *); extern void __fscache_readpages_cancel(struct fscache_cookie *cookie, struct list_head *pages); -extern void __fscache_disable_cookie(struct fscache_cookie *, bool); -extern void __fscache_enable_cookie(struct fscache_cookie *, +extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool); +extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t, bool (*)(void *), void *); /** @@ -317,8 +306,13 @@ void fscache_release_cache_tag(struct fscache_cache_tag *tag) * fscache_acquire_cookie - Acquire a cookie to represent a cache object * @parent: The cookie that's to be the parent of this one * @def: A description of the cache object, including callback operations + * @index_key: The index key for this cookie + * @index_key_len: Size of the index key + * @aux_data: The auxiliary data for the cookie (may be NULL) + * @aux_data_len: Size of the auxiliary data buffer * @netfs_data: An arbitrary piece of data to be kept in the cookie to * represent the cache object to the netfs + * @object_size: The initial size of object * @enable: Whether or not to enable a data cookie immediately * * This function is used to inform FS-Cache about part of an index hierarchy @@ -332,12 +326,19 @@ static inline struct fscache_cookie *fscache_acquire_cookie( struct fscache_cookie *parent, const struct fscache_cookie_def *def, + const void *index_key, + size_t index_key_len, + const void *aux_data, + size_t aux_data_len, void *netfs_data, + loff_t object_size, bool enable) { if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent)) - return __fscache_acquire_cookie(parent, def, netfs_data, - enable); + return __fscache_acquire_cookie(parent, def, + index_key, index_key_len, + aux_data, aux_data_len, + netfs_data, object_size, enable); else return NULL; } @@ -346,36 +347,44 @@ struct fscache_cookie *fscache_acquire_cookie( * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding * it * @cookie: The cookie being returned + * @aux_data: The updated auxiliary data for the cookie (may be NULL) * @retire: True if the cache object the cookie represents is to be discarded * * This function returns a cookie to the cache, forcibly discarding the - * associated cache object if retire is set to true. + * associated cache object if retire is set to true. The opportunity is + * provided to update the auxiliary data in the cache before the object is + * disconnected. * * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline -void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire) +void fscache_relinquish_cookie(struct fscache_cookie *cookie, + const void *aux_data, + bool retire) { if (fscache_cookie_valid(cookie)) - __fscache_relinquish_cookie(cookie, retire); + __fscache_relinquish_cookie(cookie, aux_data, retire); } /** - * fscache_check_consistency - Request that if the cache is updated + * fscache_check_consistency - Request validation of a cache's auxiliary data * @cookie: The cookie representing the cache object + * @aux_data: The updated auxiliary data for the cookie (may be NULL) * - * Request an consistency check from fscache, which passes the request - * to the backing cache. + * Request an consistency check from fscache, which passes the request to the + * backing cache. The auxiliary data on the cookie will be updated first if + * @aux_data is set. * * Returns 0 if consistent and -ESTALE if inconsistent. May also * return -ENOMEM and -ERESTARTSYS. */ static inline -int fscache_check_consistency(struct fscache_cookie *cookie) +int fscache_check_consistency(struct fscache_cookie *cookie, + const void *aux_data) { if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) - return __fscache_check_consistency(cookie); + return __fscache_check_consistency(cookie, aux_data); else return 0; } @@ -383,18 +392,20 @@ int fscache_check_consistency(struct fscache_cookie *cookie) /** * fscache_update_cookie - Request that a cache object be updated * @cookie: The cookie representing the cache object + * @aux_data: The updated auxiliary data for the cookie (may be NULL) * * Request an update of the index data for the cache object associated with the - * cookie. + * cookie. The auxiliary data on the cookie will be updated first if @aux_data + * is set. * * See Documentation/filesystems/caching/netfs-api.txt for a complete * description. */ static inline -void fscache_update_cookie(struct fscache_cookie *cookie) +void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data) { if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) - __fscache_update_cookie(cookie); + __fscache_update_cookie(cookie, aux_data); } /** @@ -648,6 +659,7 @@ void fscache_readpages_cancel(struct fscache_cookie *cookie, * fscache_write_page - Request storage of a page in the cache * @cookie: The cookie representing the cache object * @page: The netfs page to store + * @object_size: Updated size of object * @gfp: The conditions under which memory allocation should be made * * Request the contents of the netfs page be written into the cache. This @@ -665,10 +677,11 @@ void fscache_readpages_cancel(struct fscache_cookie *cookie, static inline int fscache_write_page(struct fscache_cookie *cookie, struct page *page, + loff_t object_size, gfp_t gfp) { if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) - return __fscache_write_page(cookie, page, gfp); + return __fscache_write_page(cookie, page, object_size, gfp); else return -ENOBUFS; } @@ -780,6 +793,7 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, /** * fscache_disable_cookie - Disable a cookie * @cookie: The cookie representing the cache object + * @aux_data: The updated auxiliary data for the cookie (may be NULL) * @invalidate: Invalidate the backing object * * Disable a cookie from accepting further alloc, read, write, invalidate, @@ -790,34 +804,44 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, * * If @invalidate is set, then the backing object will be invalidated and * detached, otherwise it will just be detached. + * + * If @aux_data is set, then auxiliary data will be updated from that. */ static inline -void fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate) +void fscache_disable_cookie(struct fscache_cookie *cookie, + const void *aux_data, + bool invalidate) { if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie)) - __fscache_disable_cookie(cookie, invalidate); + __fscache_disable_cookie(cookie, aux_data, invalidate); } /** * fscache_enable_cookie - Reenable a cookie * @cookie: The cookie representing the cache object + * @aux_data: The updated auxiliary data for the cookie (may be NULL) + * @object_size: Current size of object * @can_enable: A function to permit enablement once lock is held * @data: Data for can_enable() * * Reenable a previously disabled cookie, allowing it to accept further alloc, * read, write, invalidate, update or acquire operations. An attempt will be - * made to immediately reattach the cookie to a backing object. + * made to immediately reattach the cookie to a backing object. If @aux_data + * is set, the auxiliary data attached to the cookie will be updated. * * The can_enable() function is called (if not NULL) once the enablement lock * is held to rule on whether enablement is still permitted to go ahead. */ static inline void fscache_enable_cookie(struct fscache_cookie *cookie, + const void *aux_data, + loff_t object_size, bool (*can_enable)(void *data), void *data) { if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie)) - __fscache_enable_cookie(cookie, can_enable, data); + __fscache_enable_cookie(cookie, aux_data, object_size, + can_enable, data); } #endif /* _LINUX_FSCACHE_H */ diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 067d52e95f02..9f1edb92c97e 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -331,6 +331,12 @@ extern int fsnotify_add_event(struct fsnotify_group *group, struct fsnotify_event *event, int (*merge)(struct list_head *, struct fsnotify_event *)); +/* Queue overflow event to a notification group */ +static inline void fsnotify_queue_overflow(struct fsnotify_group *group) +{ + fsnotify_add_event(group, group->overflow_event, NULL); +} + /* true if the group notification queue is empty */ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); /* return, but do not dequeue the first event on the notification queue */ diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 325017ad9311..39988924de3a 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -80,76 +80,145 @@ struct hmm; /* - * hmm_pfn_t - HMM uses its own pfn type to keep several flags per page + * hmm_pfn_flag_e - HMM flag enums * * Flags: - * HMM_PFN_VALID: pfn is valid - * HMM_PFN_READ: CPU page table has read permission set + * HMM_PFN_VALID: pfn is valid. It has, at least, read permission. * HMM_PFN_WRITE: CPU page table has write permission set + * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE) + * + * The driver provide a flags array, if driver valid bit for an entry is bit + * 3 ie (entry & (1 << 3)) is true if entry is valid then driver must provide + * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3. + * Same logic apply to all flags. This is same idea as vm_page_prot in vma + * except that this is per device driver rather than per architecture. + */ +enum hmm_pfn_flag_e { + HMM_PFN_VALID = 0, + HMM_PFN_WRITE, + HMM_PFN_DEVICE_PRIVATE, + HMM_PFN_FLAG_MAX +}; + +/* + * hmm_pfn_value_e - HMM pfn special value + * + * Flags: * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory - * HMM_PFN_EMPTY: corresponding CPU page table entry is pte_none() + * HMM_PFN_NONE: corresponding CPU page table entry is pte_none() * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the * result of vm_insert_pfn() or vm_insert_page(). Therefore, it should not * be mirrored by a device, because the entry will never have HMM_PFN_VALID * set and the pfn value is undefined. - * HMM_PFN_DEVICE_UNADDRESSABLE: unaddressable device memory (ZONE_DEVICE) + * + * Driver provide entry value for none entry, error entry and special entry, + * driver can alias (ie use same value for error and special for instance). It + * should not alias none and error or special. + * + * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be: + * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous, + * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table + * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one */ -typedef unsigned long hmm_pfn_t; +enum hmm_pfn_value_e { + HMM_PFN_ERROR, + HMM_PFN_NONE, + HMM_PFN_SPECIAL, + HMM_PFN_VALUE_MAX +}; -#define HMM_PFN_VALID (1 << 0) -#define HMM_PFN_READ (1 << 1) -#define HMM_PFN_WRITE (1 << 2) -#define HMM_PFN_ERROR (1 << 3) -#define HMM_PFN_EMPTY (1 << 4) -#define HMM_PFN_SPECIAL (1 << 5) -#define HMM_PFN_DEVICE_UNADDRESSABLE (1 << 6) -#define HMM_PFN_SHIFT 7 +/* + * struct hmm_range - track invalidation lock on virtual address range + * + * @vma: the vm area struct for the range + * @list: all range lock are on a list + * @start: range virtual start address (inclusive) + * @end: range virtual end address (exclusive) + * @pfns: array of pfns (big enough for the range) + * @flags: pfn flags to match device driver page table + * @values: pfn value for some special case (none, special, error, ...) + * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT) + * @valid: pfns array did not change since it has been fill by an HMM function + */ +struct hmm_range { + struct vm_area_struct *vma; + struct list_head list; + unsigned long start; + unsigned long end; + uint64_t *pfns; + const uint64_t *flags; + const uint64_t *values; + uint8_t pfn_shift; + bool valid; +}; /* - * hmm_pfn_t_to_page() - return struct page pointed to by a valid hmm_pfn_t - * @pfn: hmm_pfn_t to convert to struct page - * Returns: struct page pointer if pfn is a valid hmm_pfn_t, NULL otherwise + * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn + * @range: range use to decode HMM pfn value + * @pfn: HMM pfn value to get corresponding struct page from + * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise * - * If the hmm_pfn_t is valid (ie valid flag set) then return the struct page - * matching the pfn value stored in the hmm_pfn_t. Otherwise return NULL. + * If the HMM pfn is valid (ie valid flag set) then return the struct page + * matching the pfn value stored in the HMM pfn. Otherwise return NULL. */ -static inline struct page *hmm_pfn_t_to_page(hmm_pfn_t pfn) +static inline struct page *hmm_pfn_to_page(const struct hmm_range *range, + uint64_t pfn) { - if (!(pfn & HMM_PFN_VALID)) + if (pfn == range->values[HMM_PFN_NONE]) + return NULL; + if (pfn == range->values[HMM_PFN_ERROR]) return NULL; - return pfn_to_page(pfn >> HMM_PFN_SHIFT); + if (pfn == range->values[HMM_PFN_SPECIAL]) + return NULL; + if (!(pfn & range->flags[HMM_PFN_VALID])) + return NULL; + return pfn_to_page(pfn >> range->pfn_shift); } /* - * hmm_pfn_t_to_pfn() - return pfn value store in a hmm_pfn_t - * @pfn: hmm_pfn_t to extract pfn from - * Returns: pfn value if hmm_pfn_t is valid, -1UL otherwise + * hmm_pfn_to_pfn() - return pfn value store in a HMM pfn + * @range: range use to decode HMM pfn value + * @pfn: HMM pfn value to extract pfn from + * Returns: pfn value if HMM pfn is valid, -1UL otherwise */ -static inline unsigned long hmm_pfn_t_to_pfn(hmm_pfn_t pfn) +static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range, + uint64_t pfn) { - if (!(pfn & HMM_PFN_VALID)) + if (pfn == range->values[HMM_PFN_NONE]) + return -1UL; + if (pfn == range->values[HMM_PFN_ERROR]) + return -1UL; + if (pfn == range->values[HMM_PFN_SPECIAL]) + return -1UL; + if (!(pfn & range->flags[HMM_PFN_VALID])) return -1UL; - return (pfn >> HMM_PFN_SHIFT); + return (pfn >> range->pfn_shift); } /* - * hmm_pfn_t_from_page() - create a valid hmm_pfn_t value from struct page - * @page: struct page pointer for which to create the hmm_pfn_t - * Returns: valid hmm_pfn_t for the page + * hmm_pfn_from_page() - create a valid HMM pfn value from struct page + * @range: range use to encode HMM pfn value + * @page: struct page pointer for which to create the HMM pfn + * Returns: valid HMM pfn for the page */ -static inline hmm_pfn_t hmm_pfn_t_from_page(struct page *page) +static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range, + struct page *page) { - return (page_to_pfn(page) << HMM_PFN_SHIFT) | HMM_PFN_VALID; + return (page_to_pfn(page) << range->pfn_shift) | + range->flags[HMM_PFN_VALID]; } /* - * hmm_pfn_t_from_pfn() - create a valid hmm_pfn_t value from pfn - * @pfn: pfn value for which to create the hmm_pfn_t - * Returns: valid hmm_pfn_t for the pfn + * hmm_pfn_from_pfn() - create a valid HMM pfn value from pfn + * @range: range use to encode HMM pfn value + * @pfn: pfn value for which to create the HMM pfn + * Returns: valid HMM pfn for the pfn */ -static inline hmm_pfn_t hmm_pfn_t_from_pfn(unsigned long pfn) +static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, + unsigned long pfn) { - return (pfn << HMM_PFN_SHIFT) | HMM_PFN_VALID; + return (pfn << range->pfn_shift) | + range->flags[HMM_PFN_VALID]; } @@ -218,6 +287,16 @@ enum hmm_update_type { * @update: callback to update range on a device */ struct hmm_mirror_ops { + /* release() - release hmm_mirror + * + * @mirror: pointer to struct hmm_mirror + * + * This is called when the mm_struct is being released. + * The callback should make sure no references to the mirror occur + * after the callback returns. + */ + void (*release)(struct hmm_mirror *mirror); + /* sync_cpu_device_pagetables() - synchronize page tables * * @mirror: pointer to struct hmm_mirror @@ -262,23 +341,6 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror); /* - * struct hmm_range - track invalidation lock on virtual address range - * - * @list: all range lock are on a list - * @start: range virtual start address (inclusive) - * @end: range virtual end address (exclusive) - * @pfns: array of pfns (big enough for the range) - * @valid: pfns array did not change since it has been fill by an HMM function - */ -struct hmm_range { - struct list_head list; - unsigned long start; - unsigned long end; - hmm_pfn_t *pfns; - bool valid; -}; - -/* * To snapshot the CPU page table, call hmm_vma_get_pfns(), then take a device * driver lock that serializes device page table updates, then call * hmm_vma_range_done(), to check if the snapshot is still valid. The same @@ -291,17 +353,13 @@ struct hmm_range { * * IF YOU DO NOT FOLLOW THE ABOVE RULE THE SNAPSHOT CONTENT MIGHT BE INVALID ! */ -int hmm_vma_get_pfns(struct vm_area_struct *vma, - struct hmm_range *range, - unsigned long start, - unsigned long end, - hmm_pfn_t *pfns); -bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range); +int hmm_vma_get_pfns(struct hmm_range *range); +bool hmm_vma_range_done(struct hmm_range *range); /* * Fault memory on behalf of device driver. Unlike handle_mm_fault(), this will - * not migrate any device memory back to system memory. The hmm_pfn_t array will + * not migrate any device memory back to system memory. The HMM pfn array will * be updated with the fault result and current snapshot of the CPU page table * for the range. * @@ -310,22 +368,26 @@ bool hmm_vma_range_done(struct vm_area_struct *vma, struct hmm_range *range); * function returns -EAGAIN. * * Return value does not reflect if the fault was successful for every single - * address or not. Therefore, the caller must to inspect the hmm_pfn_t array to + * address or not. Therefore, the caller must to inspect the HMM pfn array to * determine fault status for each address. * * Trying to fault inside an invalid vma will result in -EINVAL. * * See the function description in mm/hmm.c for further documentation. */ -int hmm_vma_fault(struct vm_area_struct *vma, - struct hmm_range *range, - unsigned long start, - unsigned long end, - hmm_pfn_t *pfns, - bool write, - bool block); -#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ +int hmm_vma_fault(struct hmm_range *range, bool block); +/* Below are for HMM internal use only! Not to be used by device driver! */ +void hmm_mm_destroy(struct mm_struct *mm); + +static inline void hmm_mm_init(struct mm_struct *mm) +{ + mm->hmm = NULL; +} +#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */ +static inline void hmm_mm_destroy(struct mm_struct *mm) {} +static inline void hmm_mm_init(struct mm_struct *mm) {} +#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) struct hmm_devmem; @@ -498,23 +560,9 @@ struct hmm_device { struct hmm_device *hmm_device_new(void *drvdata); void hmm_device_put(struct hmm_device *hmm_device); #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ -#endif /* IS_ENABLED(CONFIG_HMM) */ - -/* Below are for HMM internal use only! Not to be used by device driver! */ -#if IS_ENABLED(CONFIG_HMM_MIRROR) -void hmm_mm_destroy(struct mm_struct *mm); - -static inline void hmm_mm_init(struct mm_struct *mm) -{ - mm->hmm = NULL; -} -#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */ -static inline void hmm_mm_destroy(struct mm_struct *mm) {} -static inline void hmm_mm_init(struct mm_struct *mm) {} -#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ - - #else /* IS_ENABLED(CONFIG_HMM) */ static inline void hmm_mm_destroy(struct mm_struct *mm) {} static inline void hmm_mm_init(struct mm_struct *mm) {} +#endif /* IS_ENABLED(CONFIG_HMM) */ + #endif /* LINUX_HMM_H */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 78f456fcd242..a2656c3ebe81 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -424,6 +424,7 @@ static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer) } extern u64 hrtimer_get_next_event(void); +extern u64 hrtimer_next_event_without(const struct hrtimer *exclude); extern bool hrtimer_active(const struct hrtimer *timer); diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index ceb751987c40..e5fd2707b6df 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -29,6 +29,7 @@ enum hwmon_sensor_types { hwmon_humidity, hwmon_fan, hwmon_pwm, + hwmon_max, }; enum hwmon_chip_attributes { diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 2048f3c3b68a..192ed8fbc403 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -26,7 +26,6 @@ #define _HYPERV_H #include <uapi/linux/hyperv.h> -#include <uapi/asm/hyperv.h> #include <linux/types.h> #include <linux/scatterlist.h> diff --git a/include/linux/i2c-pca-platform.h b/include/linux/i2c-pca-platform.h index 0e5f7c77d1d8..c37329432a8e 100644 --- a/include/linux/i2c-pca-platform.h +++ b/include/linux/i2c-pca-platform.h @@ -3,9 +3,6 @@ #define I2C_PCA9564_PLATFORM_H struct i2c_pca9564_pf_platform_data { - int gpio; /* pin to reset chip. driver will work when - * not supplied (negative value), but it - * cannot exit some error conditions then */ int i2c_clock_speed; /* values are defined in linux/i2c-algo-pca.h */ int timeout; /* timeout in jiffies */ }; diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 419a38e7c315..44ad14e016b5 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -47,6 +47,7 @@ struct i2c_algorithm; struct i2c_adapter; struct i2c_client; struct i2c_driver; +struct i2c_device_identity; union i2c_smbus_data; struct i2c_board_info; enum i2c_slave_event; @@ -186,8 +187,37 @@ extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, extern s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client, u8 command, u8 length, u8 *values); +int i2c_get_device_id(const struct i2c_client *client, + struct i2c_device_identity *id); #endif /* I2C */ +/** + * struct i2c_device_identity - i2c client device identification + * @manufacturer_id: 0 - 4095, database maintained by NXP + * @part_id: 0 - 511, according to manufacturer + * @die_revision: 0 - 7, according to manufacturer + */ +struct i2c_device_identity { + u16 manufacturer_id; +#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS 0 +#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_1 1 +#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_2 2 +#define I2C_DEVICE_ID_NXP_SEMICONDUCTORS_3 3 +#define I2C_DEVICE_ID_RAMTRON_INTERNATIONAL 4 +#define I2C_DEVICE_ID_ANALOG_DEVICES 5 +#define I2C_DEVICE_ID_STMICROELECTRONICS 6 +#define I2C_DEVICE_ID_ON_SEMICONDUCTOR 7 +#define I2C_DEVICE_ID_SPRINTEK_CORPORATION 8 +#define I2C_DEVICE_ID_ESPROS_PHOTONICS_AG 9 +#define I2C_DEVICE_ID_FUJITSU_SEMICONDUCTOR 10 +#define I2C_DEVICE_ID_FLIR 11 +#define I2C_DEVICE_ID_O2MICRO 12 +#define I2C_DEVICE_ID_ATMEL 13 +#define I2C_DEVICE_ID_NONE 0xffff + u16 part_id; + u8 die_revision; +}; + enum i2c_alert_protocol { I2C_PROTOCOL_SMBUS_ALERT, I2C_PROTOCOL_SMBUS_HOST_NOTIFY, diff --git a/include/linux/idr.h b/include/linux/idr.h index 7d6a6313f0ab..e856f4e0ab35 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -29,29 +29,31 @@ struct idr { #define IDR_FREE 0 /* Set the IDR flag and the IDR_FREE tag */ -#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT)) +#define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \ + (1 << (ROOT_TAG_SHIFT + IDR_FREE))) -#define IDR_INIT_BASE(base) { \ - .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER), \ +#define IDR_INIT_BASE(name, base) { \ + .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \ .idr_base = (base), \ .idr_next = 0, \ } /** * IDR_INIT() - Initialise an IDR. + * @name: Name of IDR. * * A freshly-initialised IDR contains no IDs. */ -#define IDR_INIT IDR_INIT_BASE(0) +#define IDR_INIT(name) IDR_INIT_BASE(name, 0) /** - * DEFINE_IDR() - Define a statically-allocated IDR - * @name: Name of IDR + * DEFINE_IDR() - Define a statically-allocated IDR. + * @name: Name of IDR. * * An IDR defined using this macro is ready for use with no additional * initialisation required. It contains no IDs. */ -#define DEFINE_IDR(name) struct idr name = IDR_INIT +#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) /** * idr_get_cursor - Return the current position of the cyclic allocator @@ -218,10 +220,10 @@ struct ida { struct radix_tree_root ida_rt; }; -#define IDA_INIT { \ - .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \ +#define IDA_INIT(name) { \ + .ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT), \ } -#define DEFINE_IDA(name) struct ida name = IDA_INIT +#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) int ida_pre_get(struct ida *ida, gfp_t gfp_mask); int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 8dad3dd26eae..ef169d67df92 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -209,12 +209,12 @@ #define DMA_FECTL_IM (((u32)1) << 31) /* FSTS_REG */ -#define DMA_FSTS_PPF ((u32)2) -#define DMA_FSTS_PFO ((u32)1) -#define DMA_FSTS_IQE (1 << 4) -#define DMA_FSTS_ICE (1 << 5) -#define DMA_FSTS_ITE (1 << 6) -#define DMA_FSTS_PRO (1 << 7) +#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */ +#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */ +#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */ +#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */ +#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */ +#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */ #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) /* FRCD_REG, 32 bits access */ diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 41b8c5757859..19938ee6eb31 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -465,23 +465,23 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, return -ENODEV; } -static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size) +static inline size_t iommu_unmap(struct iommu_domain *domain, + unsigned long iova, size_t size) { - return -ENODEV; + return 0; } -static inline int iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, - int gfp_order) +static inline size_t iommu_unmap_fast(struct iommu_domain *domain, + unsigned long iova, int gfp_order) { - return -ENODEV; + return 0; } static inline size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) { - return -ENODEV; + return 0; } static inline void iommu_flush_tlb_all(struct iommu_domain *domain) diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 9385aa57497b..a27cf6652327 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -62,8 +62,11 @@ extern int register_refined_jiffies(long clock_tick_rate); /* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ #define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ) -/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ -#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) +/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */ +#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ) + +/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ +#define USER_TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) #ifndef __jiffy_arch_data #define __jiffy_arch_data diff --git a/include/linux/kasan.h b/include/linux/kasan.h index d6459bd1376d..de784fd11d12 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -43,7 +43,7 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark); void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); -void kasan_cache_create(struct kmem_cache *cache, size_t *size, +void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags); void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); @@ -92,7 +92,7 @@ static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_cache_create(struct kmem_cache *cache, - size_t *size, + unsigned int *size, slab_flags_t *flags) {} static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 4ae1dfd9bf05..6a1eb0b0aad9 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -439,7 +439,8 @@ extern long simple_strtol(const char *,char **,unsigned int); extern unsigned long long simple_strtoull(const char *,char **,unsigned int); extern long long simple_strtoll(const char *,char **,unsigned int); -extern int num_to_str(char *buf, int size, unsigned long long num); +extern int num_to_str(char *buf, int size, + unsigned long long num, unsigned int width); /* lib/printf utilities */ @@ -543,6 +544,7 @@ extern enum system_states { SYSTEM_RESTART, } system_state; +/* This cannot be an enum because some may be used in assembly source. */ #define TAINT_PROPRIETARY_MODULE 0 #define TAINT_FORCED_MODULE 1 #define TAINT_CPU_OUT_OF_SPEC 2 @@ -560,7 +562,8 @@ extern enum system_states { #define TAINT_SOFTLOCKUP 14 #define TAINT_LIVEPATCH 15 #define TAINT_AUX 16 -#define TAINT_FLAGS_COUNT 17 +#define TAINT_RANDSTRUCT 17 +#define TAINT_FLAGS_COUNT 18 struct taint_flag { char c_true; /* character printed when tainted */ @@ -822,14 +825,15 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } #define __cmp(x, y, op) ((x) op (y) ? (x) : (y)) -#define __cmp_once(x, y, op) ({ \ - typeof(x) __x = (x); \ - typeof(y) __y = (y); \ - __cmp(__x, __y, op); }) +#define __cmp_once(x, y, unique_x, unique_y, op) ({ \ + typeof(x) unique_x = (x); \ + typeof(y) unique_y = (y); \ + __cmp(unique_x, unique_y, op); }) -#define __careful_cmp(x, y, op) \ - __builtin_choose_expr(__safe_cmp(x, y), \ - __cmp(x, y, op), __cmp_once(x, y, op)) +#define __careful_cmp(x, y, op) \ + __builtin_choose_expr(__safe_cmp(x, y), \ + __cmp(x, y, op), \ + __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op)) /** * min - return minimum of two values of the same or compatible types diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index e251533a5939..89fc8dc7bf38 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -41,11 +41,11 @@ */ /* - * Note about locking : There is no locking required until only * one reader - * and one writer is using the fifo and no kfifo_reset() will be * called - * kfifo_reset_out() can be safely used, until it will be only called + * Note about locking: There is no locking required until only one reader + * and one writer is using the fifo and no kfifo_reset() will be called. + * kfifo_reset_out() can be safely used, until it will be only called * in the reader thread. - * For multiple writer and one reader there is only a need to lock the writer. + * For multiple writer and one reader there is only a need to lock the writer. * And vice versa for only one writer and multiple reader there is only a need * to lock the reader. */ diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h index 51f6ef2c2ff4..f23b90b02898 100644 --- a/include/linux/kvm_para.h +++ b/include/linux/kvm_para.h @@ -9,4 +9,9 @@ static inline bool kvm_para_has_feature(unsigned int feature) { return !!(kvm_arch_para_features() & (1UL << feature)); } + +static inline bool kvm_para_has_hint(unsigned int feature) +{ + return !!(kvm_arch_para_hints() & (1UL << feature)); +} #endif /* __LINUX_KVM_PARA_H */ diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h index 14997285e53d..c6ac1fe7ec68 100644 --- a/include/linux/libfdt_env.h +++ b/include/linux/libfdt_env.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBFDT_ENV_H -#define _LIBFDT_ENV_H +#ifndef LIBFDT_ENV_H +#define LIBFDT_ENV_H #include <linux/string.h> @@ -15,4 +15,4 @@ typedef __be64 fdt64_t; #define fdt64_to_cpu(x) be64_to_cpu(x) #define cpu_to_fdt64(x) cpu_to_be64(x) -#endif /* _LIBFDT_ENV_H */ +#endif /* LIBFDT_ENV_H */ diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index ff855ed965fb..097072c5a852 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -76,12 +76,14 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc); +struct device_node; struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; unsigned long bus_dsm_mask; unsigned long cmd_mask; struct module *module; char *provider_name; + struct device_node *of_node; ndctl_fn ndctl; int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc, @@ -123,6 +125,7 @@ struct nd_region_desc { int num_lanes; int numa_node; unsigned long flags; + struct device_node *of_node; }; struct device; @@ -164,6 +167,7 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus); struct nvdimm_bus *to_nvdimm_bus(struct device *dev); struct nvdimm *to_nvdimm(struct device *dev); struct nd_region *to_nd_region(struct device *dev); +struct device *nd_region_dev(struct nd_region *nd_region); struct nd_blk_region *to_nd_blk_region(struct device *dev); struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus); struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus); diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index bb8129a3474d..96def9d15b1b 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -32,6 +32,7 @@ struct list_lru_one { }; struct list_lru_memcg { + struct rcu_head rcu; /* array of per cgroup lists, indexed by memcg_cache_id */ struct list_lru_one *lru[0]; }; @@ -43,7 +44,7 @@ struct list_lru_node { struct list_lru_one lru; #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ - struct list_lru_memcg *memcg_lrus; + struct list_lru_memcg __rcu *memcg_lrus; #endif long nr_items; } ____cacheline_aligned_in_smp; diff --git a/include/linux/lockref.h b/include/linux/lockref.h index 2eac32095113..99f17cc8e163 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -37,6 +37,7 @@ struct lockref { extern void lockref_get(struct lockref *); extern int lockref_put_return(struct lockref *); extern int lockref_get_not_zero(struct lockref *); +extern int lockref_put_not_zero(struct lockref *); extern int lockref_get_or_lock(struct lockref *); extern int lockref_put_or_lock(struct lockref *); diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h new file mode 100644 index 000000000000..cbd9d8495690 --- /dev/null +++ b/include/linux/logic_pio.h @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved. + * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com> + * Author: Zhichang Yuan <yuanzhichang@hisilicon.com> + */ + +#ifndef __LINUX_LOGIC_PIO_H +#define __LINUX_LOGIC_PIO_H + +#include <linux/fwnode.h> + +enum { + LOGIC_PIO_INDIRECT, /* Indirect IO flag */ + LOGIC_PIO_CPU_MMIO, /* Memory-mapped IO flag */ +}; + +struct logic_pio_hwaddr { + struct list_head list; + struct fwnode_handle *fwnode; + resource_size_t hw_start; + resource_size_t io_start; + resource_size_t size; /* range size populated */ + unsigned long flags; + + void *hostdata; + const struct logic_pio_host_ops *ops; +}; + +struct logic_pio_host_ops { + u32 (*in)(void *hostdata, unsigned long addr, size_t dwidth); + void (*out)(void *hostdata, unsigned long addr, u32 val, + size_t dwidth); + u32 (*ins)(void *hostdata, unsigned long addr, void *buffer, + size_t dwidth, unsigned int count); + void (*outs)(void *hostdata, unsigned long addr, const void *buffer, + size_t dwidth, unsigned int count); +}; + +#ifdef CONFIG_INDIRECT_PIO +u8 logic_inb(unsigned long addr); +void logic_outb(u8 value, unsigned long addr); +void logic_outw(u16 value, unsigned long addr); +void logic_outl(u32 value, unsigned long addr); +u16 logic_inw(unsigned long addr); +u32 logic_inl(unsigned long addr); +void logic_outb(u8 value, unsigned long addr); +void logic_outw(u16 value, unsigned long addr); +void logic_outl(u32 value, unsigned long addr); +void logic_insb(unsigned long addr, void *buffer, unsigned int count); +void logic_insl(unsigned long addr, void *buffer, unsigned int count); +void logic_insw(unsigned long addr, void *buffer, unsigned int count); +void logic_outsb(unsigned long addr, const void *buffer, unsigned int count); +void logic_outsw(unsigned long addr, const void *buffer, unsigned int count); +void logic_outsl(unsigned long addr, const void *buffer, unsigned int count); + +#ifndef inb +#define inb logic_inb +#endif + +#ifndef inw +#define inw logic_inw +#endif + +#ifndef inl +#define inl logic_inl +#endif + +#ifndef outb +#define outb logic_outb +#endif + +#ifndef outw +#define outw logic_outw +#endif + +#ifndef outl +#define outl logic_outl +#endif + +#ifndef insb +#define insb logic_insb +#endif + +#ifndef insw +#define insw logic_insw +#endif + +#ifndef insl +#define insl logic_insl +#endif + +#ifndef outsb +#define outsb logic_outsb +#endif + +#ifndef outsw +#define outsw logic_outsw +#endif + +#ifndef outsl +#define outsl logic_outsl +#endif + +/* + * We reserve 0x4000 bytes for Indirect IO as so far this library is only + * used by the HiSilicon LPC Host. If needed, we can reserve a wider IO + * area by redefining the macro below. + */ +#define PIO_INDIRECT_SIZE 0x4000 +#define MMIO_UPPER_LIMIT (IO_SPACE_LIMIT - PIO_INDIRECT_SIZE) +#else +#define MMIO_UPPER_LIMIT IO_SPACE_LIMIT +#endif /* CONFIG_INDIRECT_PIO */ + +struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode); +unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode, + resource_size_t hw_addr, resource_size_t size); +int logic_pio_register_range(struct logic_pio_hwaddr *newrange); +resource_size_t logic_pio_to_hwaddr(unsigned long pio); +unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr); + +#endif /* __LINUX_LOGIC_PIO_H */ diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index bde167fa2c51..9d0b286f3dba 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -554,6 +554,10 @@ * @new points to the new credentials. * @old points to the original credentials. * Transfer data from original creds to new creds + * @cred_getsecid: + * Retrieve the security identifier of the cred structure @c + * @c contains the credentials, secid will be placed into @secid. + * In case of failure, @secid will be set to zero. * @kernel_act_as: * Set the credentials for a kernel service to act as (subjective context). * @new points to the credentials to be modified. @@ -672,7 +676,8 @@ * @p contains the task_struct for process. * @info contains the signal information. * @sig contains the signal value. - * @secid contains the sid of the process where the signal originated + * @cred contains the cred of the process where the signal originated, or + * NULL if the current task is the originator. * Return 0 if permission is granted. * @task_prctl: * Check permission before performing a process control operation on the @@ -906,6 +911,33 @@ * associated with the TUN device's security structure. * @security pointer to the TUN devices's security structure. * + * Security hooks for SCTP + * + * @sctp_assoc_request: + * Passes the @ep and @chunk->skb of the association INIT packet to + * the security module. + * @ep pointer to sctp endpoint structure. + * @skb pointer to skbuff of association packet. + * Return 0 on success, error on failure. + * @sctp_bind_connect: + * Validiate permissions required for each address associated with sock + * @sk. Depending on @optname, the addresses will be treated as either + * for a connect or bind service. The @addrlen is calculated on each + * ipv4 and ipv6 address using sizeof(struct sockaddr_in) or + * sizeof(struct sockaddr_in6). + * @sk pointer to sock structure. + * @optname name of the option to validate. + * @address list containing one or more ipv4/ipv6 addresses. + * @addrlen total length of address(s). + * Return 0 on success, error on failure. + * @sctp_sk_clone: + * Called whenever a new socket is created by accept(2) (i.e. a TCP + * style socket) or when a socket is 'peeled off' e.g userspace + * calls sctp_peeloff(3). + * @ep pointer to current sctp endpoint structure. + * @sk pointer to current sock structure. + * @sk pointer to new sock structure. + * * Security hooks for Infiniband * * @ib_pkey_access: @@ -1541,6 +1573,7 @@ union security_list_options { int (*cred_prepare)(struct cred *new, const struct cred *old, gfp_t gfp); void (*cred_transfer)(struct cred *new, const struct cred *old); + void (*cred_getsecid)(const struct cred *c, u32 *secid); int (*kernel_act_as)(struct cred *new, u32 secid); int (*kernel_create_files_as)(struct cred *new, struct inode *inode); int (*kernel_module_request)(char *kmod_name); @@ -1564,7 +1597,7 @@ union security_list_options { int (*task_getscheduler)(struct task_struct *p); int (*task_movememory)(struct task_struct *p); int (*task_kill)(struct task_struct *p, struct siginfo *info, - int sig, u32 secid); + int sig, const struct cred *cred); int (*task_prctl)(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); void (*task_to_inode)(struct task_struct *p, struct inode *inode); @@ -1665,6 +1698,12 @@ union security_list_options { int (*tun_dev_attach_queue)(void *security); int (*tun_dev_attach)(struct sock *sk, void *security); int (*tun_dev_open)(void *security); + int (*sctp_assoc_request)(struct sctp_endpoint *ep, + struct sk_buff *skb); + int (*sctp_bind_connect)(struct sock *sk, int optname, + struct sockaddr *address, int addrlen); + void (*sctp_sk_clone)(struct sctp_endpoint *ep, struct sock *sk, + struct sock *newsk); #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND @@ -1730,230 +1769,234 @@ union security_list_options { }; struct security_hook_heads { - struct list_head binder_set_context_mgr; - struct list_head binder_transaction; - struct list_head binder_transfer_binder; - struct list_head binder_transfer_file; - struct list_head ptrace_access_check; - struct list_head ptrace_traceme; - struct list_head capget; - struct list_head capset; - struct list_head capable; - struct list_head quotactl; - struct list_head quota_on; - struct list_head syslog; - struct list_head settime; - struct list_head vm_enough_memory; - struct list_head bprm_set_creds; - struct list_head bprm_check_security; - struct list_head bprm_committing_creds; - struct list_head bprm_committed_creds; - struct list_head sb_alloc_security; - struct list_head sb_free_security; - struct list_head sb_copy_data; - struct list_head sb_remount; - struct list_head sb_kern_mount; - struct list_head sb_show_options; - struct list_head sb_statfs; - struct list_head sb_mount; - struct list_head sb_umount; - struct list_head sb_pivotroot; - struct list_head sb_set_mnt_opts; - struct list_head sb_clone_mnt_opts; - struct list_head sb_parse_opts_str; - struct list_head dentry_init_security; - struct list_head dentry_create_files_as; + struct hlist_head binder_set_context_mgr; + struct hlist_head binder_transaction; + struct hlist_head binder_transfer_binder; + struct hlist_head binder_transfer_file; + struct hlist_head ptrace_access_check; + struct hlist_head ptrace_traceme; + struct hlist_head capget; + struct hlist_head capset; + struct hlist_head capable; + struct hlist_head quotactl; + struct hlist_head quota_on; + struct hlist_head syslog; + struct hlist_head settime; + struct hlist_head vm_enough_memory; + struct hlist_head bprm_set_creds; + struct hlist_head bprm_check_security; + struct hlist_head bprm_committing_creds; + struct hlist_head bprm_committed_creds; + struct hlist_head sb_alloc_security; + struct hlist_head sb_free_security; + struct hlist_head sb_copy_data; + struct hlist_head sb_remount; + struct hlist_head sb_kern_mount; + struct hlist_head sb_show_options; + struct hlist_head sb_statfs; + struct hlist_head sb_mount; + struct hlist_head sb_umount; + struct hlist_head sb_pivotroot; + struct hlist_head sb_set_mnt_opts; + struct hlist_head sb_clone_mnt_opts; + struct hlist_head sb_parse_opts_str; + struct hlist_head dentry_init_security; + struct hlist_head dentry_create_files_as; #ifdef CONFIG_SECURITY_PATH - struct list_head path_unlink; - struct list_head path_mkdir; - struct list_head path_rmdir; - struct list_head path_mknod; - struct list_head path_truncate; - struct list_head path_symlink; - struct list_head path_link; - struct list_head path_rename; - struct list_head path_chmod; - struct list_head path_chown; - struct list_head path_chroot; + struct hlist_head path_unlink; + struct hlist_head path_mkdir; + struct hlist_head path_rmdir; + struct hlist_head path_mknod; + struct hlist_head path_truncate; + struct hlist_head path_symlink; + struct hlist_head path_link; + struct hlist_head path_rename; + struct hlist_head path_chmod; + struct hlist_head path_chown; + struct hlist_head path_chroot; #endif - struct list_head inode_alloc_security; - struct list_head inode_free_security; - struct list_head inode_init_security; - struct list_head inode_create; - struct list_head inode_link; - struct list_head inode_unlink; - struct list_head inode_symlink; - struct list_head inode_mkdir; - struct list_head inode_rmdir; - struct list_head inode_mknod; - struct list_head inode_rename; - struct list_head inode_readlink; - struct list_head inode_follow_link; - struct list_head inode_permission; - struct list_head inode_setattr; - struct list_head inode_getattr; - struct list_head inode_setxattr; - struct list_head inode_post_setxattr; - struct list_head inode_getxattr; - struct list_head inode_listxattr; - struct list_head inode_removexattr; - struct list_head inode_need_killpriv; - struct list_head inode_killpriv; - struct list_head inode_getsecurity; - struct list_head inode_setsecurity; - struct list_head inode_listsecurity; - struct list_head inode_getsecid; - struct list_head inode_copy_up; - struct list_head inode_copy_up_xattr; - struct list_head file_permission; - struct list_head file_alloc_security; - struct list_head file_free_security; - struct list_head file_ioctl; - struct list_head mmap_addr; - struct list_head mmap_file; - struct list_head file_mprotect; - struct list_head file_lock; - struct list_head file_fcntl; - struct list_head file_set_fowner; - struct list_head file_send_sigiotask; - struct list_head file_receive; - struct list_head file_open; - struct list_head task_alloc; - struct list_head task_free; - struct list_head cred_alloc_blank; - struct list_head cred_free; - struct list_head cred_prepare; - struct list_head cred_transfer; - struct list_head kernel_act_as; - struct list_head kernel_create_files_as; - struct list_head kernel_read_file; - struct list_head kernel_post_read_file; - struct list_head kernel_module_request; - struct list_head task_fix_setuid; - struct list_head task_setpgid; - struct list_head task_getpgid; - struct list_head task_getsid; - struct list_head task_getsecid; - struct list_head task_setnice; - struct list_head task_setioprio; - struct list_head task_getioprio; - struct list_head task_prlimit; - struct list_head task_setrlimit; - struct list_head task_setscheduler; - struct list_head task_getscheduler; - struct list_head task_movememory; - struct list_head task_kill; - struct list_head task_prctl; - struct list_head task_to_inode; - struct list_head ipc_permission; - struct list_head ipc_getsecid; - struct list_head msg_msg_alloc_security; - struct list_head msg_msg_free_security; - struct list_head msg_queue_alloc_security; - struct list_head msg_queue_free_security; - struct list_head msg_queue_associate; - struct list_head msg_queue_msgctl; - struct list_head msg_queue_msgsnd; - struct list_head msg_queue_msgrcv; - struct list_head shm_alloc_security; - struct list_head shm_free_security; - struct list_head shm_associate; - struct list_head shm_shmctl; - struct list_head shm_shmat; - struct list_head sem_alloc_security; - struct list_head sem_free_security; - struct list_head sem_associate; - struct list_head sem_semctl; - struct list_head sem_semop; - struct list_head netlink_send; - struct list_head d_instantiate; - struct list_head getprocattr; - struct list_head setprocattr; - struct list_head ismaclabel; - struct list_head secid_to_secctx; - struct list_head secctx_to_secid; - struct list_head release_secctx; - struct list_head inode_invalidate_secctx; - struct list_head inode_notifysecctx; - struct list_head inode_setsecctx; - struct list_head inode_getsecctx; + struct hlist_head inode_alloc_security; + struct hlist_head inode_free_security; + struct hlist_head inode_init_security; + struct hlist_head inode_create; + struct hlist_head inode_link; + struct hlist_head inode_unlink; + struct hlist_head inode_symlink; + struct hlist_head inode_mkdir; + struct hlist_head inode_rmdir; + struct hlist_head inode_mknod; + struct hlist_head inode_rename; + struct hlist_head inode_readlink; + struct hlist_head inode_follow_link; + struct hlist_head inode_permission; + struct hlist_head inode_setattr; + struct hlist_head inode_getattr; + struct hlist_head inode_setxattr; + struct hlist_head inode_post_setxattr; + struct hlist_head inode_getxattr; + struct hlist_head inode_listxattr; + struct hlist_head inode_removexattr; + struct hlist_head inode_need_killpriv; + struct hlist_head inode_killpriv; + struct hlist_head inode_getsecurity; + struct hlist_head inode_setsecurity; + struct hlist_head inode_listsecurity; + struct hlist_head inode_getsecid; + struct hlist_head inode_copy_up; + struct hlist_head inode_copy_up_xattr; + struct hlist_head file_permission; + struct hlist_head file_alloc_security; + struct hlist_head file_free_security; + struct hlist_head file_ioctl; + struct hlist_head mmap_addr; + struct hlist_head mmap_file; + struct hlist_head file_mprotect; + struct hlist_head file_lock; + struct hlist_head file_fcntl; + struct hlist_head file_set_fowner; + struct hlist_head file_send_sigiotask; + struct hlist_head file_receive; + struct hlist_head file_open; + struct hlist_head task_alloc; + struct hlist_head task_free; + struct hlist_head cred_alloc_blank; + struct hlist_head cred_free; + struct hlist_head cred_prepare; + struct hlist_head cred_transfer; + struct hlist_head cred_getsecid; + struct hlist_head kernel_act_as; + struct hlist_head kernel_create_files_as; + struct hlist_head kernel_read_file; + struct hlist_head kernel_post_read_file; + struct hlist_head kernel_module_request; + struct hlist_head task_fix_setuid; + struct hlist_head task_setpgid; + struct hlist_head task_getpgid; + struct hlist_head task_getsid; + struct hlist_head task_getsecid; + struct hlist_head task_setnice; + struct hlist_head task_setioprio; + struct hlist_head task_getioprio; + struct hlist_head task_prlimit; + struct hlist_head task_setrlimit; + struct hlist_head task_setscheduler; + struct hlist_head task_getscheduler; + struct hlist_head task_movememory; + struct hlist_head task_kill; + struct hlist_head task_prctl; + struct hlist_head task_to_inode; + struct hlist_head ipc_permission; + struct hlist_head ipc_getsecid; + struct hlist_head msg_msg_alloc_security; + struct hlist_head msg_msg_free_security; + struct hlist_head msg_queue_alloc_security; + struct hlist_head msg_queue_free_security; + struct hlist_head msg_queue_associate; + struct hlist_head msg_queue_msgctl; + struct hlist_head msg_queue_msgsnd; + struct hlist_head msg_queue_msgrcv; + struct hlist_head shm_alloc_security; + struct hlist_head shm_free_security; + struct hlist_head shm_associate; + struct hlist_head shm_shmctl; + struct hlist_head shm_shmat; + struct hlist_head sem_alloc_security; + struct hlist_head sem_free_security; + struct hlist_head sem_associate; + struct hlist_head sem_semctl; + struct hlist_head sem_semop; + struct hlist_head netlink_send; + struct hlist_head d_instantiate; + struct hlist_head getprocattr; + struct hlist_head setprocattr; + struct hlist_head ismaclabel; + struct hlist_head secid_to_secctx; + struct hlist_head secctx_to_secid; + struct hlist_head release_secctx; + struct hlist_head inode_invalidate_secctx; + struct hlist_head inode_notifysecctx; + struct hlist_head inode_setsecctx; + struct hlist_head inode_getsecctx; #ifdef CONFIG_SECURITY_NETWORK - struct list_head unix_stream_connect; - struct list_head unix_may_send; - struct list_head socket_create; - struct list_head socket_post_create; - struct list_head socket_bind; - struct list_head socket_connect; - struct list_head socket_listen; - struct list_head socket_accept; - struct list_head socket_sendmsg; - struct list_head socket_recvmsg; - struct list_head socket_getsockname; - struct list_head socket_getpeername; - struct list_head socket_getsockopt; - struct list_head socket_setsockopt; - struct list_head socket_shutdown; - struct list_head socket_sock_rcv_skb; - struct list_head socket_getpeersec_stream; - struct list_head socket_getpeersec_dgram; - struct list_head sk_alloc_security; - struct list_head sk_free_security; - struct list_head sk_clone_security; - struct list_head sk_getsecid; - struct list_head sock_graft; - struct list_head inet_conn_request; - struct list_head inet_csk_clone; - struct list_head inet_conn_established; - struct list_head secmark_relabel_packet; - struct list_head secmark_refcount_inc; - struct list_head secmark_refcount_dec; - struct list_head req_classify_flow; - struct list_head tun_dev_alloc_security; - struct list_head tun_dev_free_security; - struct list_head tun_dev_create; - struct list_head tun_dev_attach_queue; - struct list_head tun_dev_attach; - struct list_head tun_dev_open; + struct hlist_head unix_stream_connect; + struct hlist_head unix_may_send; + struct hlist_head socket_create; + struct hlist_head socket_post_create; + struct hlist_head socket_bind; + struct hlist_head socket_connect; + struct hlist_head socket_listen; + struct hlist_head socket_accept; + struct hlist_head socket_sendmsg; + struct hlist_head socket_recvmsg; + struct hlist_head socket_getsockname; + struct hlist_head socket_getpeername; + struct hlist_head socket_getsockopt; + struct hlist_head socket_setsockopt; + struct hlist_head socket_shutdown; + struct hlist_head socket_sock_rcv_skb; + struct hlist_head socket_getpeersec_stream; + struct hlist_head socket_getpeersec_dgram; + struct hlist_head sk_alloc_security; + struct hlist_head sk_free_security; + struct hlist_head sk_clone_security; + struct hlist_head sk_getsecid; + struct hlist_head sock_graft; + struct hlist_head inet_conn_request; + struct hlist_head inet_csk_clone; + struct hlist_head inet_conn_established; + struct hlist_head secmark_relabel_packet; + struct hlist_head secmark_refcount_inc; + struct hlist_head secmark_refcount_dec; + struct hlist_head req_classify_flow; + struct hlist_head tun_dev_alloc_security; + struct hlist_head tun_dev_free_security; + struct hlist_head tun_dev_create; + struct hlist_head tun_dev_attach_queue; + struct hlist_head tun_dev_attach; + struct hlist_head tun_dev_open; + struct hlist_head sctp_assoc_request; + struct hlist_head sctp_bind_connect; + struct hlist_head sctp_sk_clone; #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND - struct list_head ib_pkey_access; - struct list_head ib_endport_manage_subnet; - struct list_head ib_alloc_security; - struct list_head ib_free_security; + struct hlist_head ib_pkey_access; + struct hlist_head ib_endport_manage_subnet; + struct hlist_head ib_alloc_security; + struct hlist_head ib_free_security; #endif /* CONFIG_SECURITY_INFINIBAND */ #ifdef CONFIG_SECURITY_NETWORK_XFRM - struct list_head xfrm_policy_alloc_security; - struct list_head xfrm_policy_clone_security; - struct list_head xfrm_policy_free_security; - struct list_head xfrm_policy_delete_security; - struct list_head xfrm_state_alloc; - struct list_head xfrm_state_alloc_acquire; - struct list_head xfrm_state_free_security; - struct list_head xfrm_state_delete_security; - struct list_head xfrm_policy_lookup; - struct list_head xfrm_state_pol_flow_match; - struct list_head xfrm_decode_session; + struct hlist_head xfrm_policy_alloc_security; + struct hlist_head xfrm_policy_clone_security; + struct hlist_head xfrm_policy_free_security; + struct hlist_head xfrm_policy_delete_security; + struct hlist_head xfrm_state_alloc; + struct hlist_head xfrm_state_alloc_acquire; + struct hlist_head xfrm_state_free_security; + struct hlist_head xfrm_state_delete_security; + struct hlist_head xfrm_policy_lookup; + struct hlist_head xfrm_state_pol_flow_match; + struct hlist_head xfrm_decode_session; #endif /* CONFIG_SECURITY_NETWORK_XFRM */ #ifdef CONFIG_KEYS - struct list_head key_alloc; - struct list_head key_free; - struct list_head key_permission; - struct list_head key_getsecurity; + struct hlist_head key_alloc; + struct hlist_head key_free; + struct hlist_head key_permission; + struct hlist_head key_getsecurity; #endif /* CONFIG_KEYS */ #ifdef CONFIG_AUDIT - struct list_head audit_rule_init; - struct list_head audit_rule_known; - struct list_head audit_rule_match; - struct list_head audit_rule_free; + struct hlist_head audit_rule_init; + struct hlist_head audit_rule_known; + struct hlist_head audit_rule_match; + struct hlist_head audit_rule_free; #endif /* CONFIG_AUDIT */ #ifdef CONFIG_BPF_SYSCALL - struct list_head bpf; - struct list_head bpf_map; - struct list_head bpf_prog; - struct list_head bpf_map_alloc_security; - struct list_head bpf_map_free_security; - struct list_head bpf_prog_alloc_security; - struct list_head bpf_prog_free_security; + struct hlist_head bpf; + struct hlist_head bpf_map; + struct hlist_head bpf_prog; + struct hlist_head bpf_map_alloc_security; + struct hlist_head bpf_map_free_security; + struct hlist_head bpf_prog_alloc_security; + struct hlist_head bpf_prog_free_security; #endif /* CONFIG_BPF_SYSCALL */ } __randomize_layout; @@ -1962,8 +2005,8 @@ struct security_hook_heads { * For use with generic list macros for common operations. */ struct security_hook_list { - struct list_head list; - struct list_head *head; + struct hlist_node list; + struct hlist_head *head; union security_list_options hook; char *lsm; } __randomize_layout; @@ -2002,7 +2045,7 @@ static inline void security_delete_hooks(struct security_hook_list *hooks, int i; for (i = 0; i < count; i++) - list_del_rcu(&hooks[i].list); + hlist_del_rcu(&hooks[i].list); } #endif /* CONFIG_SECURITY_SELINUX_DISABLE */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index f92ea7783652..ca59883c8364 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -318,6 +318,9 @@ static inline bool memblock_bottom_up(void) phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, ulong flags); +phys_addr_t memblock_alloc_base_nid(phys_addr_t size, + phys_addr_t align, phys_addr_t max_addr, + int nid, ulong flags); phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr); phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, @@ -416,21 +419,11 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end) { } #endif - -extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, - phys_addr_t end_addr); #else static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) { return 0; } - -static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, - phys_addr_t end_addr) -{ - return 0; -} - #endif /* CONFIG_HAVE_MEMBLOCK */ #endif /* __KERNEL__ */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index c46016bb25eb..d99b71bc2c66 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -48,13 +48,12 @@ enum memcg_stat_item { MEMCG_NR_STAT, }; -/* Cgroup-specific events, on top of universal VM events */ -enum memcg_event_item { - MEMCG_LOW = NR_VM_EVENT_ITEMS, +enum memcg_memory_event { + MEMCG_LOW, MEMCG_HIGH, MEMCG_MAX, MEMCG_OOM, - MEMCG_NR_EVENTS, + MEMCG_NR_MEMORY_EVENTS, }; struct mem_cgroup_reclaim_cookie { @@ -88,7 +87,7 @@ enum mem_cgroup_events_target { struct mem_cgroup_stat_cpu { long count[MEMCG_NR_STAT]; - unsigned long events[MEMCG_NR_EVENTS]; + unsigned long events[NR_VM_EVENT_ITEMS]; unsigned long nr_page_events; unsigned long targets[MEM_CGROUP_NTARGETS]; }; @@ -120,6 +119,9 @@ struct mem_cgroup_per_node { unsigned long usage_in_excess;/* Set to the value by which */ /* the soft limit is exceeded*/ bool on_tree; + bool congested; /* memcg has many dirty pages */ + /* backed by a congested BDI */ + struct mem_cgroup *memcg; /* Back pointer, we cannot */ /* use container_of */ }; @@ -202,7 +204,8 @@ struct mem_cgroup { /* OOM-Killer disable */ int oom_kill_disable; - /* handle for "memory.events" */ + /* memory.events */ + atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; struct cgroup_file events_file; /* protect arrays of thresholds */ @@ -231,9 +234,10 @@ struct mem_cgroup { struct task_struct *move_lock_task; unsigned long move_lock_flags; + /* memory.stat */ struct mem_cgroup_stat_cpu __percpu *stat_cpu; atomic_long_t stat[MEMCG_NR_STAT]; - atomic_long_t events[MEMCG_NR_EVENTS]; + atomic_long_t events[NR_VM_EVENT_ITEMS]; unsigned long socket_pressure; @@ -645,9 +649,9 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned); -/* idx can be of type enum memcg_event_item or vm_event_item */ static inline void __count_memcg_events(struct mem_cgroup *memcg, - int idx, unsigned long count) + enum vm_event_item idx, + unsigned long count) { unsigned long x; @@ -663,7 +667,8 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg, } static inline void count_memcg_events(struct mem_cgroup *memcg, - int idx, unsigned long count) + enum vm_event_item idx, + unsigned long count) { unsigned long flags; @@ -672,9 +677,8 @@ static inline void count_memcg_events(struct mem_cgroup *memcg, local_irq_restore(flags); } -/* idx can be of type enum memcg_event_item or vm_event_item */ static inline void count_memcg_page_event(struct page *page, - int idx) + enum vm_event_item idx) { if (page->mem_cgroup) count_memcg_events(page->mem_cgroup, idx, 1); @@ -698,10 +702,10 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, rcu_read_unlock(); } -static inline void mem_cgroup_event(struct mem_cgroup *memcg, - enum memcg_event_item event) +static inline void memcg_memory_event(struct mem_cgroup *memcg, + enum memcg_memory_event event) { - count_memcg_events(memcg, event, 1); + atomic_long_inc(&memcg->memory_events[event]); cgroup_file_notify(&memcg->events_file); } @@ -721,8 +725,8 @@ static inline bool mem_cgroup_disabled(void) return true; } -static inline void mem_cgroup_event(struct mem_cgroup *memcg, - enum memcg_event_item event) +static inline void memcg_memory_event(struct mem_cgroup *memcg, + enum memcg_memory_event event) { } diff --git a/include/linux/memory.h b/include/linux/memory.h index f71e732c77b2..31ca3e28b0eb 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -33,6 +33,7 @@ struct memory_block { void *hw; /* optional pointer to fw/hw data */ int (*phys_callback)(struct memory_block *); struct device dev; + int nid; /* NID for this memory block */ }; int arch_get_memory_phys_device(unsigned long start_pfn); @@ -109,7 +110,7 @@ extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); extern int register_memory_isolate_notifier(struct notifier_block *nb); extern void unregister_memory_isolate_notifier(struct notifier_block *nb); -extern int register_new_memory(int, struct mem_section *); +int hotplug_memory_register(int nid, struct mem_section *section); #ifdef CONFIG_MEMORY_HOTREMOVE extern int unregister_memory_section(struct mem_section *); #endif diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index aba5f86eb038..e0e49b5b1ee1 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -52,24 +52,6 @@ enum { }; /* - * pgdat resizing functions - */ -static inline -void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) -{ - spin_lock_irqsave(&pgdat->node_size_lock, *flags); -} -static inline -void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) -{ - spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); -} -static inline -void pgdat_resize_init(struct pglist_data *pgdat) -{ - spin_lock_init(&pgdat->node_size_lock); -} -/* * Zone resizing functions * * Note: any attempt to resize a zone should has pgdat_resize_lock() @@ -234,9 +216,6 @@ void put_online_mems(void); void mem_hotplug_begin(void); void mem_hotplug_done(void); -extern void set_zone_contiguous(struct zone *zone); -extern void clear_zone_contiguous(struct zone *zone); - #else /* ! CONFIG_MEMORY_HOTPLUG */ #define pfn_to_online_page(pfn) \ ({ \ @@ -246,13 +225,6 @@ extern void clear_zone_contiguous(struct zone *zone); ___page; \ }) -/* - * Stub functions for when hotplug is off - */ -static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} -static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} -static inline void pgdat_resize_init(struct pglist_data *pgdat) {} - static inline unsigned zone_span_seqbegin(struct zone *zone) { return 0; @@ -293,6 +265,34 @@ static inline bool movable_node_is_enabled(void) } #endif /* ! CONFIG_MEMORY_HOTPLUG */ +#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) +/* + * pgdat resizing functions + */ +static inline +void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) +{ + spin_lock_irqsave(&pgdat->node_size_lock, *flags); +} +static inline +void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) +{ + spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); +} +static inline +void pgdat_resize_init(struct pglist_data *pgdat) +{ + spin_lock_init(&pgdat->node_size_lock); +} +#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ +/* + * Stub functions for when hotplug is off + */ +static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} +static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} +static inline void pgdat_resize_init(struct pglist_data *pgdat) {} +#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ + #ifdef CONFIG_MEMORY_HOTREMOVE extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h index 48c3c5be7eb1..9ed2871ea335 100644 --- a/include/linux/mfd/samsung/rtc.h +++ b/include/linux/mfd/samsung/rtc.h @@ -141,15 +141,4 @@ enum s2mps_rtc_reg { #define WTSR_ENABLE_SHIFT 6 #define WTSR_ENABLE_MASK (1 << WTSR_ENABLE_SHIFT) -enum { - RTC_SEC = 0, - RTC_MIN, - RTC_HOUR, - RTC_WEEKDAY, - RTC_DATE, - RTC_MONTH, - RTC_YEAR1, - RTC_YEAR2, -}; - #endif /* __LINUX_MFD_SEC_RTC_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index a2246cf670ba..f2b4abbca55e 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -7,8 +7,7 @@ #include <linux/migrate_mode.h> #include <linux/hugetlb.h> -typedef struct page *new_page_t(struct page *page, unsigned long private, - int **reason); +typedef struct page *new_page_t(struct page *page, unsigned long private); typedef void free_page_t(struct page *page, unsigned long private); /* @@ -25,7 +24,7 @@ enum migrate_reason { MR_SYSCALL, /* also applies to cpusets */ MR_MEMPOLICY_MBIND, MR_NUMA_MISPLACED, - MR_CMA, + MR_CONTIG_RANGE, MR_TYPES }; @@ -43,9 +42,9 @@ static inline struct page *new_page_nodemask(struct page *page, return alloc_huge_page_nodemask(page_hstate(compound_head(page)), preferred_nid, nodemask); - if (thp_migration_supported() && PageTransHuge(page)) { - order = HPAGE_PMD_ORDER; + if (PageTransHuge(page)) { gfp_mask |= GFP_TRANSHUGE; + order = HPAGE_PMD_ORDER; } if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index a9b5fed8f7c6..81d0799b6091 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -257,10 +257,6 @@ enum { }; enum { - MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0 -}; - -enum { MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1, MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2 diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 12758595459b..2bc27f8c5b87 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1017,6 +1017,8 @@ enum mlx5_cap_type { MLX5_CAP_VECTOR_CALC, MLX5_CAP_QOS, MLX5_CAP_DEBUG, + MLX5_CAP_RESERVED_14, + MLX5_CAP_DEV_MEM, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1168,6 +1170,12 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP64_FPGA(mdev, cap) \ MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap) +#define MLX5_CAP_DEV_MEM(mdev, cap)\ + MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) + +#define MLX5_CAP64_DEV_MEM(mdev, cap)\ + MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, @@ -1211,8 +1219,8 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; } -#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8 -#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8 +#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16 +#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\ MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index cded85ab6fe4..767d193c269a 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -591,8 +591,14 @@ struct mlx5_eswitch; struct mlx5_lag; struct mlx5_pagefault; +struct mlx5_rate_limit { + u32 rate; + u32 max_burst_sz; + u16 typical_pkt_sz; +}; + struct mlx5_rl_entry { - u32 rate; + struct mlx5_rate_limit rl; u16 index; u16 refcount; }; @@ -1107,9 +1113,12 @@ int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, int mlx5_init_rl_table(struct mlx5_core_dev *dev); void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); -int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index); -void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate); +int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, + struct mlx5_rate_limit *rl); +void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl); bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); +bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, + struct mlx5_rate_limit *rl_1); int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, bool map_wc, bool fast_path); void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); diff --git a/include/linux/mlx5/fs_helpers.h b/include/linux/mlx5/fs_helpers.h index 7b476bbae731..9db21cd0e92c 100644 --- a/include/linux/mlx5/fs_helpers.h +++ b/include/linux/mlx5/fs_helpers.h @@ -38,6 +38,14 @@ #define MLX5_FS_IPV4_VERSION 4 #define MLX5_FS_IPV6_VERSION 6 +static inline bool mlx5_fs_is_ipsec_flow(const u32 *match_c) +{ + void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, + misc_parameters); + + return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); +} + static inline bool _mlx5_fs_is_outer_ipproto_flow(const u32 *match_c, const u32 *match_v, u8 match) { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index d25011f84815..1aad455538f4 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -92,6 +92,8 @@ enum { MLX5_CMD_OP_DESTROY_MKEY = 0x202, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204, + MLX5_CMD_OP_ALLOC_MEMIC = 0x205, + MLX5_CMD_OP_DEALLOC_MEMIC = 0x206, MLX5_CMD_OP_CREATE_EQ = 0x301, MLX5_CMD_OP_DESTROY_EQ = 0x302, MLX5_CMD_OP_QUERY_EQ = 0x303, @@ -575,7 +577,10 @@ struct mlx5_ifc_qos_cap_bits { u8 esw_scheduling[0x1]; u8 esw_bw_share[0x1]; u8 esw_rate_limit[0x1]; - u8 reserved_at_4[0x1c]; + u8 reserved_at_4[0x1]; + u8 packet_pacing_burst_bound[0x1]; + u8 packet_pacing_typical_size[0x1]; + u8 reserved_at_7[0x19]; u8 reserved_at_20[0x20]; @@ -669,6 +674,24 @@ struct mlx5_ifc_roce_cap_bits { u8 reserved_at_100[0x700]; }; +struct mlx5_ifc_device_mem_cap_bits { + u8 memic[0x1]; + u8 reserved_at_1[0x1f]; + + u8 reserved_at_20[0xb]; + u8 log_min_memic_alloc_size[0x5]; + u8 reserved_at_30[0x8]; + u8 log_max_memic_addr_alignment[0x8]; + + u8 memic_bar_start_addr[0x40]; + + u8 memic_bar_size[0x20]; + + u8 max_memic_size[0x20]; + + u8 reserved_at_c0[0x740]; +}; + enum { MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0, MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, @@ -883,7 +906,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 ets[0x1]; u8 nic_flow_table[0x1]; u8 eswitch_flow_table[0x1]; - u8 early_vf_enable[0x1]; + u8 device_memory[0x1]; u8 mcam_reg[0x1]; u8 pcam_reg[0x1]; u8 local_ca_ack_delay[0x5]; @@ -927,7 +950,11 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_202[0x1]; u8 ipoib_enhanced_offloads[0x1]; u8 ipoib_basic_offloads[0x1]; - u8 reserved_at_205[0x5]; + u8 reserved_at_205[0x1]; + u8 repeated_block_disabled[0x1]; + u8 umr_modify_entity_size_disabled[0x1]; + u8 umr_modify_atomic_disabled[0x1]; + u8 umr_indirect_mkey_disabled[0x1]; u8 umr_fence[0x2]; u8 reserved_at_20c[0x3]; u8 drain_sigerr[0x1]; @@ -2748,12 +2775,17 @@ enum { MLX5_MKC_ACCESS_MODE_MTT = 0x1, MLX5_MKC_ACCESS_MODE_KLMS = 0x2, MLX5_MKC_ACCESS_MODE_KSM = 0x3, + MLX5_MKC_ACCESS_MODE_MEMIC = 0x5, }; struct mlx5_ifc_mkc_bits { u8 reserved_at_0[0x1]; u8 free[0x1]; - u8 reserved_at_2[0xd]; + u8 reserved_at_2[0x1]; + u8 access_mode_4_2[0x3]; + u8 reserved_at_6[0x7]; + u8 relaxed_ordering_write[0x1]; + u8 reserved_at_e[0x1]; u8 small_fence_on_rdma_read_response[0x1]; u8 umr_en[0x1]; u8 a[0x1]; @@ -2761,7 +2793,7 @@ struct mlx5_ifc_mkc_bits { u8 rr[0x1]; u8 lw[0x1]; u8 lr[0x1]; - u8 access_mode[0x2]; + u8 access_mode_1_0[0x2]; u8 reserved_at_18[0x8]; u8 qpn[0x18]; @@ -7397,7 +7429,12 @@ struct mlx5_ifc_set_pp_rate_limit_in_bits { u8 rate_limit[0x20]; - u8 reserved_at_a0[0x160]; + u8 burst_upper_bound[0x20]; + + u8 reserved_at_c0[0x10]; + u8 typical_packet_size[0x10]; + + u8 reserved_at_e0[0x120]; }; struct mlx5_ifc_access_register_out_bits { @@ -8951,4 +8988,57 @@ struct mlx5_ifc_destroy_vport_lag_in_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_alloc_memic_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_30[0x20]; + + u8 reserved_at_40[0x18]; + u8 log_memic_addr_alignment[0x8]; + + u8 range_start_addr[0x40]; + + u8 range_size[0x20]; + + u8 memic_size[0x20]; +}; + +struct mlx5_ifc_alloc_memic_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 memic_start_addr[0x40]; +}; + +struct mlx5_ifc_dealloc_memic_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; + + u8 memic_start_addr[0x40]; + + u8 memic_size[0x20]; + + u8 reserved_at_e0[0x20]; +}; + +struct mlx5_ifc_dealloc_memic_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index f945dff34925..1ac1f06a4be6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -386,17 +386,19 @@ struct vm_operations_struct { void (*close)(struct vm_area_struct * area); int (*split)(struct vm_area_struct * area, unsigned long addr); int (*mremap)(struct vm_area_struct * area); - int (*fault)(struct vm_fault *vmf); - int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); + vm_fault_t (*fault)(struct vm_fault *vmf); + vm_fault_t (*huge_fault)(struct vm_fault *vmf, + enum page_entry_size pe_size); void (*map_pages)(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); + unsigned long (*pagesize)(struct vm_area_struct * area); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ - int (*page_mkwrite)(struct vm_fault *vmf); + vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ - int (*pfn_mkwrite)(struct vm_fault *vmf); + vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); /* called by access_process_vm when get_user_pages() fails, typically * for use by special VMAs that can switch between memory and hardware @@ -745,7 +747,7 @@ int finish_mkwrite_fault(struct vm_fault *vmf); * refcount. The each user mapping also has a reference to the page. * * The pagecache pages are stored in a per-mapping radix tree, which is - * rooted at mapping->page_tree, and indexed by offset. + * rooted at mapping->i_pages, and indexed by offset. * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space * lists, we instead now tag pages as dirty/writeback in the radix tree. * @@ -903,7 +905,9 @@ extern int page_to_nid(const struct page *page); #else static inline int page_to_nid(const struct page *page) { - return (page->flags >> NODES_PGSHIFT) & NODES_MASK; + struct page *p = (struct page *)page; + + return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK; } #endif @@ -1152,6 +1156,7 @@ static inline pgoff_t page_index(struct page *page) bool page_mapped(struct page *page); struct address_space *page_mapping(struct page *page); +struct address_space *page_mapping_file(struct page *page); /* * Return true only if the page has been allocated with @@ -1461,6 +1466,7 @@ extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned int offset, unsigned int length); +void __set_page_dirty(struct page *, struct address_space *, int warn); int __set_page_dirty_nobuffers(struct page *page); int __set_page_dirty_no_writeback(struct page *page); int redirty_page_for_writepage(struct writeback_control *wbc, @@ -2103,6 +2109,7 @@ extern void setup_per_cpu_pageset(void); extern void zone_pcp_update(struct zone *zone); extern void zone_pcp_reset(struct zone *zone); +extern void setup_zone_pageset(struct zone *zone); /* page_alloc.c */ extern int min_free_kbytes; @@ -2420,6 +2427,44 @@ int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn); int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); +static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, + unsigned long addr, struct page *page) +{ + int err = vm_insert_page(vma, addr, page); + + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err < 0 && err != -EBUSY) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, + unsigned long addr, pfn_t pfn) +{ + int err = vm_insert_mixed(vma, addr, pfn); + + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err < 0 && err != -EBUSY) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn) +{ + int err = vm_insert_pfn(vma, addr, pfn); + + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err < 0 && err != -EBUSY) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags, @@ -2589,7 +2634,7 @@ extern int get_hwpoison_page(struct page *page); extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; extern void shake_page(struct page *p, int access); -extern atomic_long_t num_poisoned_pages; +extern atomic_long_t num_poisoned_pages __read_mostly; extern int soft_offline_page(struct page *page, int flags); @@ -2611,6 +2656,7 @@ enum mf_action_page_type { MF_MSG_POISONED_HUGE, MF_MSG_HUGE, MF_MSG_FREE_HUGE, + MF_MSG_NON_PMD_HUGE, MF_MSG_UNMAP_FAILED, MF_MSG_DIRTY_SWAPCACHE, MF_MSG_CLEAN_SWAPCACHE, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fd1af6b9591d..21612347d311 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -22,6 +22,8 @@ #endif #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) +typedef int vm_fault_t; + struct address_space; struct mem_cgroup; struct hmm; diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 57b0030d3800..2ad72d2c8cc5 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -37,10 +37,10 @@ void dump_mm(const struct mm_struct *mm); BUG(); \ } \ } while (0) -#define VM_WARN_ON(cond) WARN_ON(cond) -#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) -#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) -#define VM_WARN(cond, format...) WARN(cond, format) +#define VM_WARN_ON(cond) (void)WARN_ON(cond) +#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) +#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format) +#define VM_WARN(cond, format...) (void)WARN(cond, format) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a2db4576e499..32699b2dc52a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -180,6 +180,7 @@ enum node_stat_item { NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ NR_DIRTIED, /* page dirtyings since bootup */ NR_WRITTEN, /* page writings since bootup */ + NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */ NR_VM_NODE_STAT_ITEMS }; @@ -633,14 +634,15 @@ typedef struct pglist_data { #ifndef CONFIG_NO_BOOTMEM struct bootmem_data *bdata; #endif -#ifdef CONFIG_MEMORY_HOTPLUG +#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) /* * Must be held any time you expect node_start_pfn, node_present_pages * or node_spanned_pages stay constant. Holding this will also * guarantee that any pfn_valid() stays that way. * * pgdat_resize_lock() and pgdat_resize_unlock() are provided to - * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG. + * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG + * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. * * Nests above zone->lock and zone->span_seqlock */ @@ -775,7 +777,8 @@ static inline bool is_dev_zone(const struct zone *zone) #include <linux/memory_hotplug.h> void build_all_zonelists(pg_data_t *pgdat); -void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); +void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, + enum zone_type classzone_idx); bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx, unsigned int alloc_flags, long free_pages); @@ -882,7 +885,7 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); -extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1]; +extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index 3bf8f954b642..3102bd754d18 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h @@ -1,6 +1,4 @@ /* - * linux/include/linux/mtd/bbm.h - * * NAND family Bad Block Management (BBM) header file * - Bad Block Table (BBT) implementation * diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 205ededccc60..a86c4fa93115 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -30,32 +30,19 @@ #include <asm/div64.h> -#define MTD_ERASE_PENDING 0x01 -#define MTD_ERASING 0x02 -#define MTD_ERASE_SUSPEND 0x04 -#define MTD_ERASE_DONE 0x08 -#define MTD_ERASE_FAILED 0x10 - #define MTD_FAIL_ADDR_UNKNOWN -1LL +struct mtd_info; + /* * If the erase fails, fail_addr might indicate exactly which block failed. If * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level * or was not specific to any particular block. */ struct erase_info { - struct mtd_info *mtd; uint64_t addr; uint64_t len; uint64_t fail_addr; - u_long time; - u_long retries; - unsigned dev; - unsigned cell; - void (*callback) (struct erase_info *self); - u_long priv; - u_char state; - struct erase_info *next; }; struct mtd_erase_region_info { @@ -595,8 +582,6 @@ extern void register_mtd_user (struct mtd_notifier *new); extern int unregister_mtd_user (struct mtd_notifier *old); void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size); -void mtd_erase_callback(struct erase_info *instr); - static inline int mtd_is_bitflip(int err) { return err == -EUCLEAN; } diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h new file mode 100644 index 000000000000..792ea5c26329 --- /dev/null +++ b/include/linux/mtd/nand.h @@ -0,0 +1,731 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2017 - Free Electrons + * + * Authors: + * Boris Brezillon <boris.brezillon@free-electrons.com> + * Peter Pan <peterpandong@micron.com> + */ + +#ifndef __LINUX_MTD_NAND_H +#define __LINUX_MTD_NAND_H + +#include <linux/mtd/mtd.h> + +/** + * struct nand_memory_organization - Memory organization structure + * @bits_per_cell: number of bits per NAND cell + * @pagesize: page size + * @oobsize: OOB area size + * @pages_per_eraseblock: number of pages per eraseblock + * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number) + * @planes_per_lun: number of planes per LUN + * @luns_per_target: number of LUN per target (target is a synonym for die) + * @ntargets: total number of targets exposed by the NAND device + */ +struct nand_memory_organization { + unsigned int bits_per_cell; + unsigned int pagesize; + unsigned int oobsize; + unsigned int pages_per_eraseblock; + unsigned int eraseblocks_per_lun; + unsigned int planes_per_lun; + unsigned int luns_per_target; + unsigned int ntargets; +}; + +#define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \ + { \ + .bits_per_cell = (bpc), \ + .pagesize = (ps), \ + .oobsize = (os), \ + .pages_per_eraseblock = (ppe), \ + .eraseblocks_per_lun = (epl), \ + .planes_per_lun = (ppl), \ + .luns_per_target = (lpt), \ + .ntargets = (nt), \ + } + +/** + * struct nand_row_converter - Information needed to convert an absolute offset + * into a row address + * @lun_addr_shift: position of the LUN identifier in the row address + * @eraseblock_addr_shift: position of the eraseblock identifier in the row + * address + */ +struct nand_row_converter { + unsigned int lun_addr_shift; + unsigned int eraseblock_addr_shift; +}; + +/** + * struct nand_pos - NAND position object + * @target: the NAND target/die + * @lun: the LUN identifier + * @plane: the plane within the LUN + * @eraseblock: the eraseblock within the LUN + * @page: the page within the LUN + * + * These information are usually used by specific sub-layers to select the + * appropriate target/die and generate a row address to pass to the device. + */ +struct nand_pos { + unsigned int target; + unsigned int lun; + unsigned int plane; + unsigned int eraseblock; + unsigned int page; +}; + +/** + * struct nand_page_io_req - NAND I/O request object + * @pos: the position this I/O request is targeting + * @dataoffs: the offset within the page + * @datalen: number of data bytes to read from/write to this page + * @databuf: buffer to store data in or get data from + * @ooboffs: the OOB offset within the page + * @ooblen: the number of OOB bytes to read from/write to this page + * @oobbuf: buffer to store OOB data in or get OOB data from + * + * This object is used to pass per-page I/O requests to NAND sub-layers. This + * way all useful information are already formatted in a useful way and + * specific NAND layers can focus on translating these information into + * specific commands/operations. + */ +struct nand_page_io_req { + struct nand_pos pos; + unsigned int dataoffs; + unsigned int datalen; + union { + const void *out; + void *in; + } databuf; + unsigned int ooboffs; + unsigned int ooblen; + union { + const void *out; + void *in; + } oobbuf; +}; + +/** + * struct nand_ecc_req - NAND ECC requirements + * @strength: ECC strength + * @step_size: ECC step/block size + */ +struct nand_ecc_req { + unsigned int strength; + unsigned int step_size; +}; + +#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) } + +/** + * struct nand_bbt - bad block table object + * @cache: in memory BBT cache + */ +struct nand_bbt { + unsigned long *cache; +}; + +struct nand_device; + +/** + * struct nand_ops - NAND operations + * @erase: erase a specific block. No need to check if the block is bad before + * erasing, this has been taken care of by the generic NAND layer + * @markbad: mark a specific block bad. No need to check if the block is + * already marked bad, this has been taken care of by the generic + * NAND layer. This method should just write the BBM (Bad Block + * Marker) so that future call to struct_nand_ops->isbad() return + * true + * @isbad: check whether a block is bad or not. This method should just read + * the BBM and return whether the block is bad or not based on what it + * reads + * + * These are all low level operations that should be implemented by specialized + * NAND layers (SPI NAND, raw NAND, ...). + */ +struct nand_ops { + int (*erase)(struct nand_device *nand, const struct nand_pos *pos); + int (*markbad)(struct nand_device *nand, const struct nand_pos *pos); + bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos); +}; + +/** + * struct nand_device - NAND device + * @mtd: MTD instance attached to the NAND device + * @memorg: memory layout + * @eccreq: ECC requirements + * @rowconv: position to row address converter + * @bbt: bad block table info + * @ops: NAND operations attached to the NAND device + * + * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND) + * should declare their own NAND object embedding a nand_device struct (that's + * how inheritance is done). + * struct_nand_device->memorg and struct_nand_device->eccreq should be filled + * at device detection time to reflect the NAND device + * capabilities/requirements. Once this is done nanddev_init() can be called. + * It will take care of converting NAND information into MTD ones, which means + * the specialized NAND layers should never manually tweak + * struct_nand_device->mtd except for the ->_read/write() hooks. + */ +struct nand_device { + struct mtd_info mtd; + struct nand_memory_organization memorg; + struct nand_ecc_req eccreq; + struct nand_row_converter rowconv; + struct nand_bbt bbt; + const struct nand_ops *ops; +}; + +/** + * struct nand_io_iter - NAND I/O iterator + * @req: current I/O request + * @oobbytes_per_page: maximum number of OOB bytes per page + * @dataleft: remaining number of data bytes to read/write + * @oobleft: remaining number of OOB bytes to read/write + * + * Can be used by specialized NAND layers to iterate over all pages covered + * by an MTD I/O request, which should greatly simplifies the boiler-plate + * code needed to read/write data from/to a NAND device. + */ +struct nand_io_iter { + struct nand_page_io_req req; + unsigned int oobbytes_per_page; + unsigned int dataleft; + unsigned int oobleft; +}; + +/** + * mtd_to_nanddev() - Get the NAND device attached to the MTD instance + * @mtd: MTD instance + * + * Return: the NAND device embedding @mtd. + */ +static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd) +{ + return container_of(mtd, struct nand_device, mtd); +} + +/** + * nanddev_to_mtd() - Get the MTD device attached to a NAND device + * @nand: NAND device + * + * Return: the MTD device embedded in @nand. + */ +static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand) +{ + return &nand->mtd; +} + +/* + * nanddev_bits_per_cell() - Get the number of bits per cell + * @nand: NAND device + * + * Return: the number of bits per cell. + */ +static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand) +{ + return nand->memorg.bits_per_cell; +} + +/** + * nanddev_page_size() - Get NAND page size + * @nand: NAND device + * + * Return: the page size. + */ +static inline size_t nanddev_page_size(const struct nand_device *nand) +{ + return nand->memorg.pagesize; +} + +/** + * nanddev_per_page_oobsize() - Get NAND OOB size + * @nand: NAND device + * + * Return: the OOB size. + */ +static inline unsigned int +nanddev_per_page_oobsize(const struct nand_device *nand) +{ + return nand->memorg.oobsize; +} + +/** + * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock + * @nand: NAND device + * + * Return: the number of pages per eraseblock. + */ +static inline unsigned int +nanddev_pages_per_eraseblock(const struct nand_device *nand) +{ + return nand->memorg.pages_per_eraseblock; +} + +/** + * nanddev_per_page_oobsize() - Get NAND erase block size + * @nand: NAND device + * + * Return: the eraseblock size. + */ +static inline size_t nanddev_eraseblock_size(const struct nand_device *nand) +{ + return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock; +} + +/** + * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN + * @nand: NAND device + * + * Return: the number of eraseblocks per LUN. + */ +static inline unsigned int +nanddev_eraseblocks_per_lun(const struct nand_device *nand) +{ + return nand->memorg.eraseblocks_per_lun; +} + +/** + * nanddev_target_size() - Get the total size provided by a single target/die + * @nand: NAND device + * + * Return: the total size exposed by a single target/die in bytes. + */ +static inline u64 nanddev_target_size(const struct nand_device *nand) +{ + return (u64)nand->memorg.luns_per_target * + nand->memorg.eraseblocks_per_lun * + nand->memorg.pages_per_eraseblock * + nand->memorg.pagesize; +} + +/** + * nanddev_ntarget() - Get the total of targets + * @nand: NAND device + * + * Return: the number of targets/dies exposed by @nand. + */ +static inline unsigned int nanddev_ntargets(const struct nand_device *nand) +{ + return nand->memorg.ntargets; +} + +/** + * nanddev_neraseblocks() - Get the total number of erasablocks + * @nand: NAND device + * + * Return: the total number of eraseblocks exposed by @nand. + */ +static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand) +{ + return (u64)nand->memorg.luns_per_target * + nand->memorg.eraseblocks_per_lun * + nand->memorg.pages_per_eraseblock; +} + +/** + * nanddev_size() - Get NAND size + * @nand: NAND device + * + * Return: the total size (in bytes) exposed by @nand. + */ +static inline u64 nanddev_size(const struct nand_device *nand) +{ + return nanddev_target_size(nand) * nanddev_ntargets(nand); +} + +/** + * nanddev_get_memorg() - Extract memory organization info from a NAND device + * @nand: NAND device + * + * This can be used by the upper layer to fill the memorg info before calling + * nanddev_init(). + * + * Return: the memorg object embedded in the NAND device. + */ +static inline struct nand_memory_organization * +nanddev_get_memorg(struct nand_device *nand) +{ + return &nand->memorg; +} + +int nanddev_init(struct nand_device *nand, const struct nand_ops *ops, + struct module *owner); +void nanddev_cleanup(struct nand_device *nand); + +/** + * nanddev_register() - Register a NAND device + * @nand: NAND device + * + * Register a NAND device. + * This function is just a wrapper around mtd_device_register() + * registering the MTD device embedded in @nand. + * + * Return: 0 in case of success, a negative error code otherwise. + */ +static inline int nanddev_register(struct nand_device *nand) +{ + return mtd_device_register(&nand->mtd, NULL, 0); +} + +/** + * nanddev_unregister() - Unregister a NAND device + * @nand: NAND device + * + * Unregister a NAND device. + * This function is just a wrapper around mtd_device_unregister() + * unregistering the MTD device embedded in @nand. + * + * Return: 0 in case of success, a negative error code otherwise. + */ +static inline int nanddev_unregister(struct nand_device *nand) +{ + return mtd_device_unregister(&nand->mtd); +} + +/** + * nanddev_set_of_node() - Attach a DT node to a NAND device + * @nand: NAND device + * @np: DT node + * + * Attach a DT node to a NAND device. + */ +static inline void nanddev_set_of_node(struct nand_device *nand, + struct device_node *np) +{ + mtd_set_of_node(&nand->mtd, np); +} + +/** + * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device + * @nand: NAND device + * + * Return: the DT node attached to @nand. + */ +static inline struct device_node *nanddev_get_of_node(struct nand_device *nand) +{ + return mtd_get_of_node(&nand->mtd); +} + +/** + * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position + * @nand: NAND device + * @offs: absolute NAND offset (usually passed by the MTD layer) + * @pos: a NAND position object to fill in + * + * Converts @offs into a nand_pos representation. + * + * Return: the offset within the NAND page pointed by @pos. + */ +static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand, + loff_t offs, + struct nand_pos *pos) +{ + unsigned int pageoffs; + u64 tmp = offs; + + pageoffs = do_div(tmp, nand->memorg.pagesize); + pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock); + pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun); + pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; + pos->lun = do_div(tmp, nand->memorg.luns_per_target); + pos->target = tmp; + + return pageoffs; +} + +/** + * nanddev_pos_cmp() - Compare two NAND positions + * @a: First NAND position + * @b: Second NAND position + * + * Compares two NAND positions. + * + * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b. + */ +static inline int nanddev_pos_cmp(const struct nand_pos *a, + const struct nand_pos *b) +{ + if (a->target != b->target) + return a->target < b->target ? -1 : 1; + + if (a->lun != b->lun) + return a->lun < b->lun ? -1 : 1; + + if (a->eraseblock != b->eraseblock) + return a->eraseblock < b->eraseblock ? -1 : 1; + + if (a->page != b->page) + return a->page < b->page ? -1 : 1; + + return 0; +} + +/** + * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset + * @nand: NAND device + * @pos: the NAND position to convert + * + * Converts @pos NAND position into an absolute offset. + * + * Return: the absolute offset. Note that @pos points to the beginning of a + * page, if one wants to point to a specific offset within this page + * the returned offset has to be adjusted manually. + */ +static inline loff_t nanddev_pos_to_offs(struct nand_device *nand, + const struct nand_pos *pos) +{ + unsigned int npages; + + npages = pos->page + + ((pos->eraseblock + + (pos->lun + + (pos->target * nand->memorg.luns_per_target)) * + nand->memorg.eraseblocks_per_lun) * + nand->memorg.pages_per_eraseblock); + + return (loff_t)npages * nand->memorg.pagesize; +} + +/** + * nanddev_pos_to_row() - Extract a row address from a NAND position + * @nand: NAND device + * @pos: the position to convert + * + * Converts a NAND position into a row address that can then be passed to the + * device. + * + * Return: the row address extracted from @pos. + */ +static inline unsigned int nanddev_pos_to_row(struct nand_device *nand, + const struct nand_pos *pos) +{ + return (pos->lun << nand->rowconv.lun_addr_shift) | + (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) | + pos->page; +} + +/** + * nanddev_pos_next_target() - Move a position to the next target/die + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next target/die. Useful when you + * want to iterate over all targets/dies of a NAND device. + */ +static inline void nanddev_pos_next_target(struct nand_device *nand, + struct nand_pos *pos) +{ + pos->page = 0; + pos->plane = 0; + pos->eraseblock = 0; + pos->lun = 0; + pos->target++; +} + +/** + * nanddev_pos_next_lun() - Move a position to the next LUN + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next LUN. Useful when you want to + * iterate over all LUNs of a NAND device. + */ +static inline void nanddev_pos_next_lun(struct nand_device *nand, + struct nand_pos *pos) +{ + if (pos->lun >= nand->memorg.luns_per_target - 1) + return nanddev_pos_next_target(nand, pos); + + pos->lun++; + pos->page = 0; + pos->plane = 0; + pos->eraseblock = 0; +} + +/** + * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next eraseblock. Useful when you + * want to iterate over all eraseblocks of a NAND device. + */ +static inline void nanddev_pos_next_eraseblock(struct nand_device *nand, + struct nand_pos *pos) +{ + if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1) + return nanddev_pos_next_lun(nand, pos); + + pos->eraseblock++; + pos->page = 0; + pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; +} + +/** + * nanddev_pos_next_eraseblock() - Move a position to the next page + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next page. Useful when you want to + * iterate over all pages of a NAND device. + */ +static inline void nanddev_pos_next_page(struct nand_device *nand, + struct nand_pos *pos) +{ + if (pos->page >= nand->memorg.pages_per_eraseblock - 1) + return nanddev_pos_next_eraseblock(nand, pos); + + pos->page++; +} + +/** + * nand_io_iter_init - Initialize a NAND I/O iterator + * @nand: NAND device + * @offs: absolute offset + * @req: MTD request + * @iter: NAND I/O iterator + * + * Initializes a NAND iterator based on the information passed by the MTD + * layer. + */ +static inline void nanddev_io_iter_init(struct nand_device *nand, + loff_t offs, struct mtd_oob_ops *req, + struct nand_io_iter *iter) +{ + struct mtd_info *mtd = nanddev_to_mtd(nand); + + iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos); + iter->req.ooboffs = req->ooboffs; + iter->oobbytes_per_page = mtd_oobavail(mtd, req); + iter->dataleft = req->len; + iter->oobleft = req->ooblen; + iter->req.databuf.in = req->datbuf; + iter->req.datalen = min_t(unsigned int, + nand->memorg.pagesize - iter->req.dataoffs, + iter->dataleft); + iter->req.oobbuf.in = req->oobbuf; + iter->req.ooblen = min_t(unsigned int, + iter->oobbytes_per_page - iter->req.ooboffs, + iter->oobleft); +} + +/** + * nand_io_iter_next_page - Move to the next page + * @nand: NAND device + * @iter: NAND I/O iterator + * + * Updates the @iter to point to the next page. + */ +static inline void nanddev_io_iter_next_page(struct nand_device *nand, + struct nand_io_iter *iter) +{ + nanddev_pos_next_page(nand, &iter->req.pos); + iter->dataleft -= iter->req.datalen; + iter->req.databuf.in += iter->req.datalen; + iter->oobleft -= iter->req.ooblen; + iter->req.oobbuf.in += iter->req.ooblen; + iter->req.dataoffs = 0; + iter->req.ooboffs = 0; + iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize, + iter->dataleft); + iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page, + iter->oobleft); +} + +/** + * nand_io_iter_end - Should end iteration or not + * @nand: NAND device + * @iter: NAND I/O iterator + * + * Check whether @iter has reached the end of the NAND portion it was asked to + * iterate on or not. + * + * Return: true if @iter has reached the end of the iteration request, false + * otherwise. + */ +static inline bool nanddev_io_iter_end(struct nand_device *nand, + const struct nand_io_iter *iter) +{ + if (iter->dataleft || iter->oobleft) + return false; + + return true; +} + +/** + * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O + * request + * @nand: NAND device + * @start: start address to read/write from + * @req: MTD I/O request + * @iter: NAND I/O iterator + * + * Should be used for iterate over pages that are contained in an MTD request. + */ +#define nanddev_io_for_each_page(nand, start, req, iter) \ + for (nanddev_io_iter_init(nand, start, req, iter); \ + !nanddev_io_iter_end(nand, iter); \ + nanddev_io_iter_next_page(nand, iter)) + +bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos); +bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos); +int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos); +int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos); + +/* BBT related functions */ +enum nand_bbt_block_status { + NAND_BBT_BLOCK_STATUS_UNKNOWN, + NAND_BBT_BLOCK_GOOD, + NAND_BBT_BLOCK_WORN, + NAND_BBT_BLOCK_RESERVED, + NAND_BBT_BLOCK_FACTORY_BAD, + NAND_BBT_BLOCK_NUM_STATUS, +}; + +int nanddev_bbt_init(struct nand_device *nand); +void nanddev_bbt_cleanup(struct nand_device *nand); +int nanddev_bbt_update(struct nand_device *nand); +int nanddev_bbt_get_block_status(const struct nand_device *nand, + unsigned int entry); +int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry, + enum nand_bbt_block_status status); +int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block); + +/** + * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry + * @nand: NAND device + * @pos: the NAND position we want to get BBT entry for + * + * Return the BBT entry used to store information about the eraseblock pointed + * by @pos. + * + * Return: the BBT entry storing information about eraseblock pointed by @pos. + */ +static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand, + const struct nand_pos *pos) +{ + return pos->eraseblock + + ((pos->lun + (pos->target * nand->memorg.luns_per_target)) * + nand->memorg.eraseblocks_per_lun); +} + +/** + * nanddev_bbt_is_initialized() - Check if the BBT has been initialized + * @nand: NAND device + * + * Return: true if the BBT has been initialized, false otherwise. + */ +static inline bool nanddev_bbt_is_initialized(struct nand_device *nand) +{ + return !!nand->bbt.cache; +} + +/* MTD -> NAND helper functions. */ +int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo); + +#endif /* __LINUX_MTD_NAND_H */ diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h index 4d8406c81652..8a2decf7462c 100644 --- a/include/linux/mtd/nand_ecc.h +++ b/include/linux/mtd/nand_ecc.h @@ -1,6 +1,4 @@ /* - * drivers/mtd/nand_ecc.h - * * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com> * David Woodhouse <dwmw2@infradead.org> * Thomas Gleixner <tglx@linutronix.de> diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h index d0558a982628..357e88b3263a 100644 --- a/include/linux/mtd/ndfc.h +++ b/include/linux/mtd/ndfc.h @@ -1,6 +1,4 @@ /* - * linux/include/linux/mtd/ndfc.h - * * Copyright (c) 2006 Thomas Gleixner <tglx@linutronix.de> * * This program is free software; you can redistribute it and/or modify diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index c4beb70dacbd..11cb0c50cd84 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -77,6 +77,7 @@ struct mtd_part_parser { struct list_head list; struct module *owner; const char *name; + const struct of_device_id *of_match_table; int (*parse_fn)(struct mtd_info *, const struct mtd_partition **, struct mtd_part_parser_data *); void (*cleanup)(const struct mtd_partition *pparts, int nr_parts); diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 56c5570aadbe..5dad59b31244 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -21,6 +21,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/flashchip.h> #include <linux/mtd/bbm.h> +#include <linux/types.h> struct mtd_info; struct nand_flash_dev; @@ -235,7 +236,8 @@ struct nand_chip; #define ONFI_TIMING_MODE_5 (1 << 5) #define ONFI_TIMING_MODE_UNKNOWN (1 << 6) -/* ONFI feature address */ +/* ONFI feature number/address */ +#define ONFI_FEATURE_NUMBER 256 #define ONFI_FEATURE_ADDR_TIMING_MODE 0x1 /* Vendor-specific feature address (Micron) */ @@ -429,6 +431,47 @@ struct nand_jedec_params { __le16 crc; } __packed; +/** + * struct onfi_params - ONFI specific parameters that will be reused + * @version: ONFI version (BCD encoded), 0 if ONFI is not supported + * @tPROG: Page program time + * @tBERS: Block erase time + * @tR: Page read time + * @tCCS: Change column setup time + * @async_timing_mode: Supported asynchronous timing mode + * @vendor_revision: Vendor specific revision number + * @vendor: Vendor specific data + */ +struct onfi_params { + int version; + u16 tPROG; + u16 tBERS; + u16 tR; + u16 tCCS; + u16 async_timing_mode; + u16 vendor_revision; + u8 vendor[88]; +}; + +/** + * struct nand_parameters - NAND generic parameters from the parameter page + * @model: Model name + * @supports_set_get_features: The NAND chip supports setting/getting features + * @set_feature_list: Bitmap of features that can be set + * @get_feature_list: Bitmap of features that can be get + * @onfi: ONFI specific parameters + */ +struct nand_parameters { + /* Generic parameters */ + char model[100]; + bool supports_set_get_features; + DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER); + DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER); + + /* ONFI parameters */ + struct onfi_params onfi; +}; + /* The maximum expected count of bytes in the NAND ID sequence */ #define NAND_MAX_ID_LEN 8 @@ -1157,21 +1200,15 @@ int nand_op_parser_exec_op(struct nand_chip *chip, * currently in data_buf. * @subpagesize: [INTERN] holds the subpagesize * @id: [INTERN] holds NAND ID - * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded), - * non 0 if ONFI supported. - * @jedec_version: [INTERN] holds the chip JEDEC version (BCD encoded), - * non 0 if JEDEC supported. - * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is - * supported, 0 otherwise. - * @jedec_params: [INTERN] holds the JEDEC parameter page when JEDEC is - * supported, 0 otherwise. + * @parameters: [INTERN] holds generic parameters under an easily + * readable form. * @max_bb_per_die: [INTERN] the max number of bad blocks each die of a * this nand device will encounter their life times. * @blocks_per_die: [INTERN] The number of PEBs in a die * @data_interface: [INTERN] NAND interface timing information * @read_retries: [INTERN] the number of read retry modes supported - * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand - * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand + * @set_features: [REPLACEABLE] set the NAND chip features + * @get_features: [REPLACEABLE] get the NAND chip features * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this * means the configuration should not be applied but @@ -1212,10 +1249,10 @@ struct nand_chip { bool check_only); int (*erase)(struct mtd_info *mtd, int page); int (*scan_bbt)(struct mtd_info *mtd); - int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip, - int feature_addr, uint8_t *subfeature_para); - int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip, - int feature_addr, uint8_t *subfeature_para); + int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip, + int feature_addr, uint8_t *subfeature_para); + int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip, + int feature_addr, uint8_t *subfeature_para); int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode); int (*setup_data_interface)(struct mtd_info *mtd, int chipnr, const struct nand_data_interface *conf); @@ -1243,12 +1280,7 @@ struct nand_chip { int badblockbits; struct nand_id id; - int onfi_version; - int jedec_version; - union { - struct nand_onfi_params onfi_params; - struct nand_jedec_params jedec_params; - }; + struct nand_parameters parameters; u16 max_bb_per_die; u32 blocks_per_die; @@ -1535,26 +1567,13 @@ struct platform_nand_data { struct platform_nand_ctrl ctrl; }; -/* return the supported features. */ -static inline int onfi_feature(struct nand_chip *chip) -{ - return chip->onfi_version ? le16_to_cpu(chip->onfi_params.features) : 0; -} - /* return the supported asynchronous timing mode. */ static inline int onfi_get_async_timing_mode(struct nand_chip *chip) { - if (!chip->onfi_version) + if (!chip->parameters.onfi.version) return ONFI_TIMING_MODE_UNKNOWN; - return le16_to_cpu(chip->onfi_params.async_timing_mode); -} -/* return the supported synchronous timing mode. */ -static inline int onfi_get_sync_timing_mode(struct nand_chip *chip) -{ - if (!chip->onfi_version) - return ONFI_TIMING_MODE_UNKNOWN; - return le16_to_cpu(chip->onfi_params.src_sync_timing_mode); + return chip->parameters.onfi.async_timing_mode; } int onfi_fill_data_interface(struct nand_chip *chip, @@ -1591,13 +1610,6 @@ static inline int nand_opcode_8bits(unsigned int command) return 0; } -/* return the supported JEDEC features. */ -static inline int jedec_feature(struct nand_chip *chip) -{ - return chip->jedec_version ? le16_to_cpu(chip->jedec_params.features) - : 0; -} - /* get timing characteristics from ONFI timing mode. */ const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode); @@ -1629,10 +1641,12 @@ int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page); int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, int page); +/* Wrapper to use in order for controllers/vendors to GET/SET FEATURES */ +int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param); +int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param); /* Stub used by drivers that do not support GET/SET FEATURES operations */ -int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd, - struct nand_chip *chip, int addr, - u8 *subfeature_param); +int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip, + int addr, u8 *subfeature_param); /* Default read_page_raw implementation */ int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, diff --git a/include/linux/nd.h b/include/linux/nd.h index 5dc6b695437d..43c181a6add5 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -180,6 +180,12 @@ struct nd_region; void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event); int __must_check __nd_driver_register(struct nd_device_driver *nd_drv, struct module *module, const char *mod_name); +static inline void nd_driver_unregister(struct nd_device_driver *drv) +{ + driver_unregister(&drv->drv); +} #define nd_driver_register(driver) \ __nd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) +#define module_nd_driver(driver) \ + module_driver(driver, nd_driver_register, nd_driver_unregister) #endif /* __LINUX_ND_H__ */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 38187c68063d..2f129bbfaae8 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -198,14 +198,24 @@ struct nfs_inode { /* * Cache validity bit flags */ -#define NFS_INO_INVALID_ATTR 0x0001 /* cached attrs are invalid */ -#define NFS_INO_INVALID_DATA 0x0002 /* cached data is invalid */ -#define NFS_INO_INVALID_ATIME 0x0004 /* cached atime is invalid */ -#define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */ -#define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */ -#define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */ -#define NFS_INO_REVAL_FORCED 0x0040 /* force revalidation ignoring a delegation */ -#define NFS_INO_INVALID_LABEL 0x0080 /* cached label is invalid */ +#define NFS_INO_INVALID_DATA BIT(1) /* cached data is invalid */ +#define NFS_INO_INVALID_ATIME BIT(2) /* cached atime is invalid */ +#define NFS_INO_INVALID_ACCESS BIT(3) /* cached access cred invalid */ +#define NFS_INO_INVALID_ACL BIT(4) /* cached acls are invalid */ +#define NFS_INO_REVAL_PAGECACHE BIT(5) /* must revalidate pagecache */ +#define NFS_INO_REVAL_FORCED BIT(6) /* force revalidation ignoring a delegation */ +#define NFS_INO_INVALID_LABEL BIT(7) /* cached label is invalid */ +#define NFS_INO_INVALID_CHANGE BIT(8) /* cached change is invalid */ +#define NFS_INO_INVALID_CTIME BIT(9) /* cached ctime is invalid */ +#define NFS_INO_INVALID_MTIME BIT(10) /* cached mtime is invalid */ +#define NFS_INO_INVALID_SIZE BIT(11) /* cached size is invalid */ +#define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */ + +#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ + | NFS_INO_INVALID_CTIME \ + | NFS_INO_INVALID_MTIME \ + | NFS_INO_INVALID_SIZE \ + | NFS_INO_INVALID_OTHER) /* inode metadata is invalid */ /* * Bit offsets in flags field @@ -292,10 +302,11 @@ static inline void nfs_mark_for_revalidate(struct inode *inode) struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); - nfsi->cache_validity |= NFS_INO_INVALID_ATTR | - NFS_INO_REVAL_PAGECACHE | - NFS_INO_INVALID_ACCESS | - NFS_INO_INVALID_ACL; + nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE + | NFS_INO_INVALID_ACCESS + | NFS_INO_INVALID_ACL + | NFS_INO_INVALID_CHANGE + | NFS_INO_INVALID_CTIME; if (S_ISDIR(inode->i_mode)) nfsi->cache_validity |= NFS_INO_INVALID_DATA; spin_unlock(&inode->i_lock); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 6959968dc36a..34d28564ecf3 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1590,11 +1590,13 @@ struct nfs_rpc_ops { unsigned int); int (*create) (struct inode *, struct dentry *, struct iattr *, int); - int (*remove) (struct inode *, const struct qstr *); - void (*unlink_setup) (struct rpc_message *, struct inode *dir); + int (*remove) (struct inode *, struct dentry *); + void (*unlink_setup) (struct rpc_message *, struct dentry *); void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *); int (*unlink_done) (struct rpc_task *, struct inode *); - void (*rename_setup) (struct rpc_message *msg, struct inode *dir); + void (*rename_setup) (struct rpc_message *msg, + struct dentry *old_dentry, + struct dentry *new_dentry); void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *); int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); int (*link) (struct inode *, struct inode *, const struct qstr *); @@ -1633,7 +1635,6 @@ struct nfs_rpc_ops { struct iattr *iattr, int *); int (*have_delegation)(struct inode *, fmode_t); - int (*return_delegation)(struct inode *); struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *); struct nfs_client *(*init_client) (struct nfs_client *, const struct nfs_client_initdata *); diff --git a/include/linux/node.h b/include/linux/node.h index 4ece0fee0ffc..41f171861dcc 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -67,7 +67,7 @@ extern void unregister_one_node(int nid); extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); extern int register_mem_sect_under_node(struct memory_block *mem_blk, - int nid); + int nid, bool check_nid); extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, unsigned long phys_index); @@ -97,7 +97,7 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) return 0; } static inline int register_mem_sect_under_node(struct memory_block *mem_blk, - int nid) + int nid, bool check_nid) { return 0; } diff --git a/include/linux/of.h b/include/linux/of.h index ebf22dd0860c..4d25e4f952d9 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -363,6 +363,9 @@ extern struct device_node *of_parse_phandle(const struct device_node *np, extern int of_parse_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name, int index, struct of_phandle_args *out_args); +extern int of_parse_phandle_with_args_map(const struct device_node *np, + const char *list_name, const char *stem_name, int index, + struct of_phandle_args *out_args); extern int of_parse_phandle_with_fixed_args(const struct device_node *np, const char *list_name, int cells_count, int index, struct of_phandle_args *out_args); @@ -815,6 +818,15 @@ static inline int of_parse_phandle_with_args(const struct device_node *np, return -ENOSYS; } +static inline int of_parse_phandle_with_args_map(const struct device_node *np, + const char *list_name, + const char *stem_name, + int index, + struct of_phandle_args *out_args) +{ + return -ENOSYS; +} + static inline int of_parse_phandle_with_fixed_args(const struct device_node *np, const char *list_name, int cells_count, int index, struct of_phandle_args *out_args) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 50c2b8786831..e34a27727b9a 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -156,9 +156,18 @@ static __always_inline int PageCompound(struct page *page) return test_bit(PG_head, &page->flags) || PageTail(page); } +#define PAGE_POISON_PATTERN -1l +static inline int PagePoisoned(const struct page *page) +{ + return page->flags == PAGE_POISON_PATTERN; +} + /* * Page flags policies wrt compound pages * + * PF_POISONED_CHECK + * check if this struct page poisoned/uninitialized + * * PF_ANY: * the page flag is relevant for small, head and tail pages. * @@ -176,17 +185,20 @@ static __always_inline int PageCompound(struct page *page) * PF_NO_COMPOUND: * the page flag is not relevant for compound pages. */ -#define PF_ANY(page, enforce) page -#define PF_HEAD(page, enforce) compound_head(page) +#define PF_POISONED_CHECK(page) ({ \ + VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ + page; }) +#define PF_ANY(page, enforce) PF_POISONED_CHECK(page) +#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) #define PF_ONLY_HEAD(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(PageTail(page), page); \ - page;}) + PF_POISONED_CHECK(page); }) #define PF_NO_TAIL(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ - compound_head(page);}) + PF_POISONED_CHECK(compound_head(page)); }) #define PF_NO_COMPOUND(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ - page;}) + PF_POISONED_CHECK(page); }) /* * Macros to create function definitions for page flags diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index cdad58bbfd8b..4ae347cbc36d 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -63,7 +63,6 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, bool skip_hwpoisoned_pages); -struct page *alloc_migrate_target(struct page *page, unsigned long private, - int **resultp); +struct page *alloc_migrate_target(struct page *page, unsigned long private); #endif diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index 760d74a0e9a9..14d14beb1f7f 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -175,8 +175,7 @@ static inline void page_ref_unfreeze(struct page *page, int count) VM_BUG_ON_PAGE(page_count(page) != 0, page); VM_BUG_ON(count == 0); - smp_mb(); - atomic_set(&page->_refcount, count); + atomic_set_release(&page->_refcount, count); if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) __page_ref_unfreeze(page, count); } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 34ce3ebf97d5..b1bd2186e6d2 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -144,7 +144,7 @@ void release_pages(struct page **pages, int nr); * 3. check the page is still in pagecache (if no, goto 1) * * Remove-side that cares about stability of _refcount (eg. reclaim) has the - * following (with tree_lock held for write): + * following (with the i_pages lock held): * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) * B. remove page from pagecache * C. free the page @@ -157,7 +157,7 @@ void release_pages(struct page **pages, int nr); * * It is possible that between 1 and 2, the page is removed then the exact same * page is inserted into the same position in pagecache. That's OK: the - * old find_get_page using tree_lock could equally have run before or after + * old find_get_page using a lock could equally have run before or after * such a re-insertion, depending on order that locks are granted. * * Lookups racing against pagecache insertion isn't a big problem: either 1 diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index a1a5e5df0f66..af657ca58b70 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -39,10 +39,9 @@ struct pci_epc_ops { int (*write_header)(struct pci_epc *epc, u8 func_no, struct pci_epf_header *hdr); int (*set_bar)(struct pci_epc *epc, u8 func_no, - enum pci_barno bar, - dma_addr_t bar_phys, size_t size, int flags); + struct pci_epf_bar *epf_bar); void (*clear_bar)(struct pci_epc *epc, u8 func_no, - enum pci_barno bar); + struct pci_epf_bar *epf_bar); int (*map_addr)(struct pci_epc *epc, u8 func_no, phys_addr_t addr, u64 pci_addr, size_t size); void (*unmap_addr)(struct pci_epc *epc, u8 func_no, @@ -127,9 +126,9 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf); int pci_epc_write_header(struct pci_epc *epc, u8 func_no, struct pci_epf_header *hdr); int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, - enum pci_barno bar, - dma_addr_t bar_phys, size_t size, int flags); -void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, int bar); + struct pci_epf_bar *epf_bar); +void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, + struct pci_epf_bar *epf_bar); int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, phys_addr_t phys_addr, u64 pci_addr, size_t size); diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index e897bf076701..f7d6f4883f8b 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -97,6 +97,8 @@ struct pci_epf_driver { struct pci_epf_bar { dma_addr_t phys_addr; size_t size; + enum pci_barno barno; + int flags; }; /** diff --git a/include/linux/pci.h b/include/linux/pci.h index ae42289662df..73178a2fcee0 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -256,6 +256,7 @@ enum pci_bus_speed { PCIE_SPEED_2_5GT = 0x14, PCIE_SPEED_5_0GT = 0x15, PCIE_SPEED_8_0GT = 0x16, + PCIE_SPEED_16_0GT = 0x17, PCI_SPEED_UNKNOWN = 0xff, }; @@ -469,6 +470,9 @@ struct pci_host_bridge { struct msi_controller *msi; unsigned int ignore_reset_delay:1; /* For entire hierarchy */ unsigned int no_ext_tags:1; /* No Extended Tags */ + unsigned int native_aer:1; /* OS may use PCIe AER */ + unsigned int native_hotplug:1; /* OS may use PCIe hotplug */ + unsigned int native_pme:1; /* OS may use PCIe PME */ /* Resource alignment requirements */ resource_size_t (*align_resource)(struct pci_dev *dev, const struct resource *res, @@ -949,11 +953,6 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn); -static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, - unsigned int devfn) -{ - return pci_get_domain_bus_and_slot(0, bus, devfn); -} struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); int pci_dev_present(const struct pci_device_id *ids); @@ -1082,7 +1081,11 @@ int pcie_get_mps(struct pci_dev *dev); int pcie_set_mps(struct pci_dev *dev, int mps); int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, enum pcie_link_width *width); -void pcie_flr(struct pci_dev *dev); +u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width); +void pcie_print_link_status(struct pci_dev *dev); +int pcie_flr(struct pci_dev *dev); int __pci_reset_function_locked(struct pci_dev *dev); int pci_reset_function(struct pci_dev *dev); int pci_reset_function_locked(struct pci_dev *dev); @@ -1095,7 +1098,7 @@ int pci_reset_bus(struct pci_bus *bus); int pci_try_reset_bus(struct pci_bus *bus); void pci_reset_secondary_bus(struct pci_dev *dev); void pcibios_reset_secondary_bus(struct pci_dev *dev); -void pci_reset_bridge_secondary_bus(struct pci_dev *dev); +int pci_reset_bridge_secondary_bus(struct pci_dev *dev); void pci_update_resource(struct pci_dev *dev, int resno); int __must_check pci_assign_resource(struct pci_dev *dev, int i); int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); @@ -1228,7 +1231,8 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, void *alignf_data); -int pci_register_io_range(phys_addr_t addr, resource_size_t size); +int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, + resource_size_t size); unsigned long pci_address_to_pio(phys_addr_t addr); phys_addr_t pci_pio_to_address(unsigned long pio); int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); @@ -1297,7 +1301,6 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus); void pci_setup_bridge(struct pci_bus *bus); resource_size_t pcibios_window_alignment(struct pci_bus *bus, unsigned long type); -resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) @@ -1448,10 +1451,8 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d, #ifdef CONFIG_PCIEPORTBUS extern bool pcie_ports_disabled; -extern bool pcie_ports_auto; #else #define pcie_ports_disabled true -#define pcie_ports_auto false #endif #ifdef CONFIG_PCIEASPM @@ -1663,9 +1664,6 @@ static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn) { return NULL; } -static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, - unsigned int devfn) -{ return NULL; } static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn) { return NULL; } @@ -1925,6 +1923,7 @@ void pcibios_release_device(struct pci_dev *dev); void pcibios_penalize_isa_irq(int irq, int active); int pcibios_alloc_irq(struct pci_dev *dev); void pcibios_free_irq(struct pci_dev *dev); +resource_size_t pcibios_default_alignment(void); #ifdef CONFIG_HIBERNATE_CALLBACKS extern struct dev_pm_ops pcibios_pm_ops; @@ -1957,6 +1956,11 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); int pci_sriov_get_totalvfs(struct pci_dev *dev); resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe); + +/* Arch may override these (weak) */ +int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs); +int pcibios_sriov_disable(struct pci_dev *pdev); +resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); #else static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id) { @@ -2184,24 +2188,11 @@ int pci_parse_request_of_pci_ranges(struct device *dev, /* Arch may override this (weak) */ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); -static inline struct device_node * -pci_device_to_OF_node(const struct pci_dev *pdev) -{ - return pdev ? pdev->dev.of_node : NULL; -} - -static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) -{ - return bus ? bus->dev.of_node : NULL; -} - #else /* CONFIG_OF */ static inline void pci_set_of_node(struct pci_dev *dev) { } static inline void pci_release_of_node(struct pci_dev *dev) { } static inline void pci_set_bus_of_node(struct pci_bus *bus) { } static inline void pci_release_bus_of_node(struct pci_bus *bus) { } -static inline struct device_node * -pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; } static inline struct irq_domain * pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } static inline int pci_parse_request_of_pci_ranges(struct device *dev, @@ -2212,6 +2203,17 @@ static inline int pci_parse_request_of_pci_ranges(struct device *dev, } #endif /* CONFIG_OF */ +static inline struct device_node * +pci_device_to_OF_node(const struct pci_dev *pdev) +{ + return pdev ? pdev->dev.of_node : NULL; +} + +static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) +{ + return bus ? bus->dev.of_node : NULL; +} + #ifdef CONFIG_ACPI struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus); @@ -2282,41 +2284,9 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) return false; } -/** - * pci_uevent_ers - emit a uevent during recovery path of pci device - * @pdev: pci device to check - * @err_type: type of error event - * - */ -static inline void pci_uevent_ers(struct pci_dev *pdev, - enum pci_ers_result err_type) -{ - int idx = 0; - char *envp[3]; - - switch (err_type) { - case PCI_ERS_RESULT_NONE: - case PCI_ERS_RESULT_CAN_RECOVER: - envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY"; - envp[idx++] = "DEVICE_ONLINE=0"; - break; - case PCI_ERS_RESULT_RECOVERED: - envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY"; - envp[idx++] = "DEVICE_ONLINE=1"; - break; - case PCI_ERS_RESULT_DISCONNECT: - envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY"; - envp[idx++] = "DEVICE_ONLINE=0"; - break; - default: - break; - } - - if (idx > 0) { - envp[idx++] = NULL; - kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp); - } -} +#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH) +void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); +#endif /* Provide the legacy pci_dma_* API */ #include <linux/pci-dma-compat.h> diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 2d61d9bde83d..cc608fc55334 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1334,6 +1334,7 @@ #define PCI_DEVICE_ID_IMS_TT3D 0x9135 #define PCI_VENDOR_ID_AMCC 0x10e8 +#define PCI_VENDOR_ID_AMPERE 0x1def #define PCI_VENDOR_ID_INTERG 0x10ea #define PCI_DEVICE_ID_INTERG_1682 0x1682 diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h deleted file mode 100644 index b69769dbf659..000000000000 --- a/include/linux/pcieport_if.h +++ /dev/null @@ -1,71 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * File: pcieport_if.h - * Purpose: PCI Express Port Bus Driver's IF Data Structure - * - * Copyright (C) 2004 Intel - * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) - */ - -#ifndef _PCIEPORT_IF_H_ -#define _PCIEPORT_IF_H_ - -/* Port Type */ -#define PCIE_ANY_PORT (~0) - -/* Service Type */ -#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */ -#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT) -#define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */ -#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT) -#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */ -#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT) -#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */ -#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT) -#define PCIE_PORT_SERVICE_DPC_SHIFT 4 /* Downstream Port Containment */ -#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT) - -struct pcie_device { - int irq; /* Service IRQ/MSI/MSI-X Vector */ - struct pci_dev *port; /* Root/Upstream/Downstream Port */ - u32 service; /* Port service this device represents */ - void *priv_data; /* Service Private Data */ - struct device device; /* Generic Device Interface */ -}; -#define to_pcie_device(d) container_of(d, struct pcie_device, device) - -static inline void set_service_data(struct pcie_device *dev, void *data) -{ - dev->priv_data = data; -} - -static inline void *get_service_data(struct pcie_device *dev) -{ - return dev->priv_data; -} - -struct pcie_port_service_driver { - const char *name; - int (*probe) (struct pcie_device *dev); - void (*remove) (struct pcie_device *dev); - int (*suspend) (struct pcie_device *dev); - int (*resume) (struct pcie_device *dev); - - /* Device driver may resume normal operations */ - void (*error_resume)(struct pci_dev *dev); - - /* Link Reset Capability - AER service driver specific */ - pci_ers_result_t (*reset_link) (struct pci_dev *dev); - - int port_type; /* Type of the port this driver can handle */ - u32 service; /* Port service this device represents */ - - struct device_driver driver; -}; -#define to_service_driver(d) \ - container_of(d, struct pcie_port_service_driver, driver) - -int pcie_port_service_register(struct pcie_port_service_driver *new); -void pcie_port_service_unregister(struct pcie_port_service_driver *new); - -#endif /* _PCIEPORT_IF_H_ */ diff --git a/include/linux/platform_data/asoc-ti-mcbsp.h b/include/linux/platform_data/asoc-ti-mcbsp.h index e684543254f3..e319d0a2ec82 100644 --- a/include/linux/platform_data/asoc-ti-mcbsp.h +++ b/include/linux/platform_data/asoc-ti-mcbsp.h @@ -25,10 +25,6 @@ #include <linux/spinlock.h> #include <linux/clk.h> -#define MCBSP_CONFIG_TYPE2 0x2 -#define MCBSP_CONFIG_TYPE3 0x3 -#define MCBSP_CONFIG_TYPE4 0x4 - /* Platform specific configuration */ struct omap_mcbsp_ops { void (*request)(unsigned int); @@ -47,14 +43,6 @@ struct omap_mcbsp_platform_data { int (*force_ick_on)(struct clk *clk, bool force_on); }; -/** - * omap_mcbsp_dev_attr - OMAP McBSP device attributes for omap_hwmod - * @sidetone: name of the sidetone device - */ -struct omap_mcbsp_dev_attr { - const char *sidetone; -}; - void omap3_mcbsp_init_pdata_callback(struct omap_mcbsp_platform_data *pdata); #endif diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h index a19b78d826e9..757a0f9e26f9 100644 --- a/include/linux/platform_data/dmtimer-omap.h +++ b/include/linux/platform_data/dmtimer-omap.h @@ -20,12 +20,50 @@ #ifndef __PLATFORM_DATA_DMTIMER_OMAP_H__ #define __PLATFORM_DATA_DMTIMER_OMAP_H__ +struct omap_dm_timer_ops { + struct omap_dm_timer *(*request_by_node)(struct device_node *np); + struct omap_dm_timer *(*request_specific)(int timer_id); + struct omap_dm_timer *(*request)(void); + + int (*free)(struct omap_dm_timer *timer); + + void (*enable)(struct omap_dm_timer *timer); + void (*disable)(struct omap_dm_timer *timer); + + int (*get_irq)(struct omap_dm_timer *timer); + int (*set_int_enable)(struct omap_dm_timer *timer, + unsigned int value); + int (*set_int_disable)(struct omap_dm_timer *timer, u32 mask); + + struct clk *(*get_fclk)(struct omap_dm_timer *timer); + + int (*start)(struct omap_dm_timer *timer); + int (*stop)(struct omap_dm_timer *timer); + int (*set_source)(struct omap_dm_timer *timer, int source); + + int (*set_load)(struct omap_dm_timer *timer, int autoreload, + unsigned int value); + int (*set_match)(struct omap_dm_timer *timer, int enable, + unsigned int match); + int (*set_pwm)(struct omap_dm_timer *timer, int def_on, + int toggle, int trigger); + int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler); + + unsigned int (*read_counter)(struct omap_dm_timer *timer); + int (*write_counter)(struct omap_dm_timer *timer, + unsigned int value); + unsigned int (*read_status)(struct omap_dm_timer *timer); + int (*write_status)(struct omap_dm_timer *timer, + unsigned int value); +}; + struct dmtimer_platform_data { /* set_timer_src - Only used for OMAP1 devices */ int (*set_timer_src)(struct platform_device *pdev, int source); u32 timer_capability; u32 timer_errata; int (*get_context_loss_count)(struct device *); + const struct omap_dm_timer_ops *timer_ops; }; #endif /* __PLATFORM_DATA_DMTIMER_OMAP_H__ */ diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h index cb2618147c34..8612855691b2 100644 --- a/include/linux/platform_data/gpio-omap.h +++ b/include/linux/platform_data/gpio-omap.h @@ -157,11 +157,6 @@ #define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr)) #define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES) -struct omap_gpio_dev_attr { - int bank_width; /* GPIO bank width */ - bool dbck_flag; /* dbck required or not - True for OMAP3&4 */ -}; - struct omap_gpio_reg_offs { u16 revision; u16 direction; diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h index fcdc707eab99..2744cff1b297 100644 --- a/include/linux/platform_data/mlxreg.h +++ b/include/linux/platform_data/mlxreg.h @@ -129,6 +129,8 @@ struct mlxreg_core_platform_data { * @mask: top aggregation interrupt common mask; * @cell_low: location of low aggregation interrupt register; * @mask_low: low aggregation interrupt common mask; + * @deferred_nr: I2C adapter number must be exist prior probing execution; + * @shift_nr: I2C adapter numbers must be incremented by this value; */ struct mlxreg_core_hotplug_platform_data { struct mlxreg_core_item *items; @@ -139,6 +141,8 @@ struct mlxreg_core_hotplug_platform_data { u32 mask; u32 cell_low; u32 mask_low; + int deferred_nr; + int shift_nr; }; #endif /* __LINUX_PLATFORM_DATA_MLXREG_H */ diff --git a/include/linux/platform_data/mtd-nand-pxa3xx.h b/include/linux/platform_data/mtd-nand-pxa3xx.h index b42ad83cbc20..4fd0f592a2d2 100644 --- a/include/linux/platform_data/mtd-nand-pxa3xx.h +++ b/include/linux/platform_data/mtd-nand-pxa3xx.h @@ -6,41 +6,22 @@ #include <linux/mtd/partitions.h> /* - * Current pxa3xx_nand controller has two chip select which - * both be workable. - * - * Notice should be taken that: - * When you want to use this feature, you should not enable the - * keep configuration feature, for two chip select could be - * attached with different nand chip. The different page size - * and timing requirement make the keep configuration impossible. + * Current pxa3xx_nand controller has two chip select which both be workable but + * historically all platforms remaining on platform data used only one. Switch + * to device tree if you need more. */ - -/* The max num of chip select current support */ -#define NUM_CHIP_SELECT (2) struct pxa3xx_nand_platform_data { - - /* the data flash bus is shared between the Static Memory - * Controller and the Data Flash Controller, the arbiter - * controls the ownership of the bus - */ - int enable_arbiter; - - /* allow platform code to keep OBM/bootloader defined NFC config */ - int keep_config; - - /* indicate how many chip selects will be used */ - int num_cs; - - /* use an flash-based bad block table */ - bool flash_bbt; - - /* requested ECC strength and ECC step size */ + /* Keep OBM/bootloader NFC timing configuration */ + bool keep_config; + /* Use a flash-based bad block table */ + bool flash_bbt; + /* Requested ECC strength and ECC step size */ int ecc_strength, ecc_step_size; - - const struct mtd_partition *parts[NUM_CHIP_SELECT]; - unsigned int nr_parts[NUM_CHIP_SELECT]; + /* Partitions */ + const struct mtd_partition *parts; + unsigned int nr_parts; }; extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info); + #endif /* __ASM_ARCH_PXA3XX_NAND_H */ diff --git a/include/linux/platform_data/phy-da8xx-usb.h b/include/linux/platform_data/phy-da8xx-usb.h new file mode 100644 index 000000000000..85c2b99381b2 --- /dev/null +++ b/include/linux/platform_data/phy-da8xx-usb.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * phy-da8xx-usb - TI DaVinci DA8xx USB PHY driver + * + * Copyright (C) 2018 David Lechner <david@lechnology.com> + */ + +#ifndef __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ +#define __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ + +#include <linux/regmap.h> + +/** + * da8xx_usb_phy_platform_data + * @cfgchip: CFGCHIP syscon regmap + */ +struct da8xx_usb_phy_platform_data { + struct regmap *cfgchip; +}; + +#endif /* __LINUX_PLATFORM_DATA_PHY_DA8XX_USB_H__ */ diff --git a/include/linux/platform_data/pm33xx.h b/include/linux/platform_data/pm33xx.h new file mode 100644 index 000000000000..f9bed2a0af9d --- /dev/null +++ b/include/linux/platform_data/pm33xx.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * TI pm33xx platform data + * + * Copyright (C) 2016-2018 Texas Instruments, Inc. + * Dave Gerlach <d-gerlach@ti.com> + */ + +#ifndef _LINUX_PLATFORM_DATA_PM33XX_H +#define _LINUX_PLATFORM_DATA_PM33XX_H + +#include <linux/kbuild.h> +#include <linux/types.h> + +#ifndef __ASSEMBLER__ +struct am33xx_pm_sram_addr { + void (*do_wfi)(void); + unsigned long *do_wfi_sz; + unsigned long *resume_offset; + unsigned long *emif_sram_table; + unsigned long *ro_sram_data; +}; + +struct am33xx_pm_platform_data { + int (*init)(void); + int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long)); + struct am33xx_pm_sram_addr *(*get_sram_addrs)(void); +}; + +struct am33xx_pm_sram_data { + u32 wfi_flags; + u32 l2_aux_ctrl_val; + u32 l2_prefetch_ctrl_val; +} __packed __aligned(8); + +struct am33xx_pm_ro_sram_data { + u32 amx3_pm_sram_data_virt; + u32 amx3_pm_sram_data_phys; +} __packed __aligned(8); + +#endif /* __ASSEMBLER__ */ +#endif /* _LINUX_PLATFORM_DATA_PM33XX_H */ diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h index 13c83a25958a..0bf9fddb8306 100644 --- a/include/linux/platform_data/spi-omap2-mcspi.h +++ b/include/linux/platform_data/spi-omap2-mcspi.h @@ -2,10 +2,6 @@ #ifndef _OMAP2_MCSPI_H #define _OMAP2_MCSPI_H -#define OMAP2_MCSPI_REV 0 -#define OMAP3_MCSPI_REV 1 -#define OMAP4_MCSPI_REV 2 - #define OMAP4_MCSPI_REG_OFFSET 0x100 #define MCSPI_PINDIR_D0_IN_D1_OUT 0 @@ -17,10 +13,6 @@ struct omap2_mcspi_platform_config { unsigned int pin_dir:1; }; -struct omap2_mcspi_dev_attr { - unsigned short num_chipselect; -}; - struct omap2_mcspi_device_config { unsigned turbo_mode:1; diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 1be356330b96..80ce28d40832 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -16,6 +16,10 @@ enum ti_sysc_module_type { TI_SYSC_OMAP4_USB_HOST_FS, }; +struct ti_sysc_cookie { + void *data; +}; + /** * struct sysc_regbits - TI OCP_SYSCONFIG register field offsets * @midle_shift: Offset of the midle bit @@ -41,6 +45,7 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_QUIRK_LEGACY_IDLE BIT(8) #define SYSC_QUIRK_RESET_STATUS BIT(7) #define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6) #define SYSC_QUIRK_NO_RESET_ON_INIT BIT(5) @@ -83,4 +88,49 @@ struct sysc_config { u32 quirks; }; +enum sysc_registers { + SYSC_REVISION, + SYSC_SYSCONFIG, + SYSC_SYSSTATUS, + SYSC_MAX_REGS, +}; + +/** + * struct ti_sysc_module_data - ti-sysc to hwmod translation data for a module + * @name: legacy "ti,hwmods" module name + * @module_pa: physical address of the interconnect target module + * @module_size: size of the interconnect target module + * @offsets: array of register offsets as listed in enum sysc_registers + * @nr_offsets: number of registers + * @cap: interconnect target module capabilities + * @cfg: interconnect target module configuration + * + * This data is enough to allocate a new struct omap_hwmod_class_sysconfig + * based on device tree data parsed by ti-sysc driver. + */ +struct ti_sysc_module_data { + const char *name; + u64 module_pa; + u32 module_size; + int *offsets; + int nr_offsets; + const struct sysc_capabilities *cap; + struct sysc_config *cfg; +}; + +struct device; + +struct ti_sysc_platform_data { + struct of_dev_auxdata *auxdata; + int (*init_module)(struct device *dev, + const struct ti_sysc_module_data *data, + struct ti_sysc_cookie *cookie); + int (*enable_module)(struct device *dev, + const struct ti_sysc_cookie *cookie); + int (*idle_module)(struct device *dev, + const struct ti_sysc_cookie *cookie); + int (*shutdown_module)(struct device *dev, + const struct ti_sysc_cookie *cookie); +}; + #endif /* __TI_SYSC_DATA_H__ */ diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h index d8b187c3925d..7b81dad712de 100644 --- a/include/linux/power/smartreflex.h +++ b/include/linux/power/smartreflex.h @@ -143,6 +143,13 @@ #define OMAP3430_SR_ERRWEIGHT 0x04 #define OMAP3430_SR_ERRMAXLIMIT 0x02 +enum sr_instance { + OMAP_SR_MPU, /* shared with iva on omap3 */ + OMAP_SR_CORE, + OMAP_SR_IVA, + OMAP_SR_NR, +}; + struct omap_sr { char *name; struct list_head node; @@ -207,7 +214,6 @@ struct omap_smartreflex_dev_attr { const char *sensor_voltdm_name; }; -#ifdef CONFIG_POWER_AVS_OMAP /* * The smart reflex driver supports CLASS1 CLASS2 and CLASS3 SR. * The smartreflex class driver should pass the class type. @@ -290,6 +296,8 @@ struct omap_sr_data { struct voltagedomain *voltdm; }; +#ifdef CONFIG_POWER_AVS_OMAP + /* Smartreflex module enable/disable interface */ void omap_sr_enable(struct voltagedomain *voltdm); void omap_sr_disable(struct voltagedomain *voltdm); diff --git a/include/linux/printk.h b/include/linux/printk.h index e9b603ee9953..6d7e800affd8 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -201,6 +201,7 @@ void __init setup_log_buf(int early); __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); void dump_stack_print_info(const char *log_lvl); void show_regs_print_info(const char *log_lvl); +extern asmlinkage void dump_stack(void) __cold; extern void printk_safe_init(void); extern void printk_safe_flush(void); extern void printk_safe_flush_on_panic(void); @@ -264,6 +265,10 @@ static inline void show_regs_print_info(const char *log_lvl) { } +static inline asmlinkage void dump_stack(void) +{ +} + static inline void printk_safe_init(void) { } @@ -279,8 +284,6 @@ static inline void printk_safe_flush_on_panic(void) extern int kptr_restrict; -extern asmlinkage void dump_stack(void) __cold; - #ifndef pr_fmt #define pr_fmt(fmt) fmt #endif diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 9395f06e8372..e6d226464838 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -39,6 +39,7 @@ struct persistent_ram_ecc_info { int ecc_size; int symsize; int poly; + uint16_t *par; }; struct persistent_ram_zone { diff --git a/include/linux/quota.h b/include/linux/quota.h index 5ac9de4fcd6f..ca9772c8e48b 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -267,7 +267,6 @@ struct dqstats { struct percpu_counter counter[_DQST_DQSTAT_LAST]; }; -extern struct dqstats *dqstats_pcpu; extern struct dqstats dqstats; static inline void dqstats_inc(unsigned int type) diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index fc55ff31eca7..34149e8b5f73 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -104,25 +104,29 @@ struct radix_tree_node { unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; -/* The top bits of gfp_mask are used to store the root tags and the IDR flag */ -#define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT)) -#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1) +/* The IDR tag is stored in the low bits of the GFP flags */ +#define ROOT_IS_IDR ((__force gfp_t)4) +/* The top bits of gfp_mask are used to store the root tags */ +#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT) struct radix_tree_root { + spinlock_t xa_lock; gfp_t gfp_mask; struct radix_tree_node __rcu *rnode; }; -#define RADIX_TREE_INIT(mask) { \ +#define RADIX_TREE_INIT(name, mask) { \ + .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \ .gfp_mask = (mask), \ .rnode = NULL, \ } #define RADIX_TREE(name, mask) \ - struct radix_tree_root name = RADIX_TREE_INIT(mask) + struct radix_tree_root name = RADIX_TREE_INIT(name, mask) #define INIT_RADIX_TREE(root, mask) \ do { \ + spin_lock_init(&(root)->xa_lock); \ (root)->gfp_mask = (mask); \ (root)->rnode = NULL; \ } while (0) diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index a366cc314479..ea8505204fdf 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -106,6 +106,10 @@ extern const struct raid6_calls raid6_avx512x1; extern const struct raid6_calls raid6_avx512x2; extern const struct raid6_calls raid6_avx512x4; extern const struct raid6_calls raid6_s390vx8; +extern const struct raid6_calls raid6_vpermxor1; +extern const struct raid6_calls raid6_vpermxor2; +extern const struct raid6_calls raid6_vpermxor4; +extern const struct raid6_calls raid6_vpermxor8; struct raid6_recov_calls { void (*data2)(int, size_t, int, int, void **); diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 728d421fffe9..d09a9c7af109 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -344,7 +344,7 @@ struct rproc_ops { int (*stop)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); void * (*da_to_va)(struct rproc *rproc, u64 da, int len); - int (*load_rsc_table)(struct rproc *rproc, const struct firmware *fw); + int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); struct resource_table *(*find_loaded_rsc_table)( struct rproc *rproc, const struct firmware *fw); int (*load)(struct rproc *rproc, const struct firmware *fw); @@ -395,6 +395,21 @@ enum rproc_crash_type { }; /** + * struct rproc_dump_segment - segment info from ELF header + * @node: list node related to the rproc segment list + * @da: device address of the segment + * @size: size of the segment + */ +struct rproc_dump_segment { + struct list_head node; + + dma_addr_t da; + size_t size; + + loff_t offset; +}; + +/** * struct rproc - represents a physical remote processor device * @node: list node of this rproc object * @domain: iommu domain @@ -424,6 +439,7 @@ enum rproc_crash_type { * @cached_table: copy of the resource table * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU + * @dump_segments: list of segments in the firmware */ struct rproc { struct list_head node; @@ -455,19 +471,21 @@ struct rproc { size_t table_sz; bool has_iommu; bool auto_boot; + struct list_head dump_segments; }; /** * struct rproc_subdev - subdevice tied to a remoteproc * @node: list node related to the rproc subdevs list * @probe: probe function, called as the rproc is started - * @remove: remove function, called as the rproc is stopped + * @remove: remove function, called as the rproc is being stopped, the @crashed + * parameter indicates if this originates from the a recovery */ struct rproc_subdev { struct list_head node; int (*probe)(struct rproc_subdev *subdev); - void (*remove)(struct rproc_subdev *subdev); + void (*remove)(struct rproc_subdev *subdev, bool crashed); }; /* we currently support only two vrings per rvdev */ @@ -534,6 +552,7 @@ void rproc_free(struct rproc *rproc); int rproc_boot(struct rproc *rproc); void rproc_shutdown(struct rproc *rproc); void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type); +int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size); static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) { @@ -550,7 +569,7 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev, int (*probe)(struct rproc_subdev *subdev), - void (*remove)(struct rproc_subdev *subdev)); + void (*remove)(struct rproc_subdev *subdev, bool graceful)); void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev); diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h index adb88f8cefbc..9326d671b6e6 100644 --- a/include/linux/reset-controller.h +++ b/include/linux/reset-controller.h @@ -27,12 +27,38 @@ struct device_node; struct of_phandle_args; /** + * struct reset_control_lookup - represents a single lookup entry + * + * @list: internal list of all reset lookup entries + * @provider: name of the reset controller device controlling this reset line + * @index: ID of the reset controller in the reset controller device + * @dev_id: name of the device associated with this reset line + * @con_id name of the reset line (can be NULL) + */ +struct reset_control_lookup { + struct list_head list; + const char *provider; + unsigned int index; + const char *dev_id; + const char *con_id; +}; + +#define RESET_LOOKUP(_provider, _index, _dev_id, _con_id) \ + { \ + .provider = _provider, \ + .index = _index, \ + .dev_id = _dev_id, \ + .con_id = _con_id, \ + } + +/** * struct reset_controller_dev - reset controller entity that might * provide multiple reset controls * @ops: a pointer to device specific struct reset_control_ops * @owner: kernel module of the reset controller driver * @list: internal list of reset controller devices * @reset_control_head: head of internal list of requested reset controls + * @dev: corresponding driver model device struct * @of_node: corresponding device tree node as phandle target * @of_reset_n_cells: number of cells in reset line specifiers * @of_xlate: translation function to translate from specifier as found in the @@ -44,6 +70,7 @@ struct reset_controller_dev { struct module *owner; struct list_head list; struct list_head reset_control_head; + struct device *dev; struct device_node *of_node; int of_reset_n_cells; int (*of_xlate)(struct reset_controller_dev *rcdev, @@ -58,4 +85,7 @@ struct device; int devm_reset_controller_register(struct device *dev, struct reset_controller_dev *rcdev); +void reset_controller_add_lookup(struct reset_control_lookup *lookup, + unsigned int num_entries); + #endif diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 7d9eb39fa76a..a0233edc0718 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -34,10 +34,12 @@ struct ring_buffer_event { * array[0] = time delta (28 .. 59) * size = 8 bytes * - * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock - * array[0] = tv_nsec - * array[1..2] = tv_sec - * size = 16 bytes + * @RINGBUF_TYPE_TIME_STAMP: Absolute timestamp + * Same format as TIME_EXTEND except that the + * value is an absolute timestamp, not a delta + * event.time_delta contains bottom 27 bits + * array[0] = top (28 .. 59) bits + * size = 8 bytes * * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX: * Data record @@ -54,12 +56,12 @@ enum ring_buffer_type { RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, RINGBUF_TYPE_PADDING, RINGBUF_TYPE_TIME_EXTEND, - /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */ RINGBUF_TYPE_TIME_STAMP, }; unsigned ring_buffer_event_length(struct ring_buffer_event *event); void *ring_buffer_event_data(struct ring_buffer_event *event); +u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event); /* * ring_buffer_discard_commit will remove an event that has not @@ -115,6 +117,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, int ring_buffer_write(struct ring_buffer *buffer, unsigned long length, void *data); +void ring_buffer_nest_start(struct ring_buffer *buffer); +void ring_buffer_nest_end(struct ring_buffer *buffer); + struct ring_buffer_event * ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, unsigned long *lost_events); @@ -178,6 +183,8 @@ void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, int cpu, u64 *ts); void ring_buffer_set_clock(struct ring_buffer *buffer, u64 (*clock)(void)); +void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs); +bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer); size_t ring_buffer_page_len(void *page); diff --git a/include/linux/rtc.h b/include/linux/rtc.h index fc6c90b57be0..4c007f69082f 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -145,12 +145,17 @@ struct rtc_device { bool registered; - struct nvmem_config *nvmem_config; struct nvmem_device *nvmem; /* Old ABI support */ bool nvram_old_abi; struct bin_attribute *nvram; + time64_t range_min; + timeu64_t range_max; + time64_t start_secs; + time64_t offset_secs; + bool set_start_time; + #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL struct work_struct uie_task; struct timer_list uie_timer; @@ -164,6 +169,11 @@ struct rtc_device { }; #define to_rtc_device(d) container_of(d, struct rtc_device, dev) +/* useful timestamps */ +#define RTC_TIMESTAMP_BEGIN_1900 -2208989361LL /* 1900-01-01 00:00:00 */ +#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */ +#define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */ + extern struct rtc_device *rtc_device_register(const char *name, struct device *dev, const struct rtc_class_ops *ops, @@ -212,10 +222,6 @@ void rtc_aie_update_irq(void *private); void rtc_uie_update_irq(void *private); enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); -int rtc_register(rtc_task_t *task); -int rtc_unregister(rtc_task_t *task); -int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); - void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data); int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer, ktime_t expires, ktime_t period); @@ -271,4 +277,17 @@ extern int rtc_hctosys_ret; #define rtc_hctosys_ret -ENODEV #endif +#ifdef CONFIG_RTC_NVMEM +int rtc_nvmem_register(struct rtc_device *rtc, + struct nvmem_config *nvmem_config); +void rtc_nvmem_unregister(struct rtc_device *rtc); +#else +static inline int rtc_nvmem_register(struct rtc_device *rtc, + struct nvmem_config *nvmem_config) +{ + return -ENODEV; +} +static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {} +#endif + #endif /* _LINUX_RTC_H_ */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 9806184bb3d5..2c570cd934af 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -104,7 +104,8 @@ static inline void mm_update_next_owner(struct mm_struct *mm) #endif /* CONFIG_MEMCG */ #ifdef CONFIG_MMU -extern void arch_pick_mmap_layout(struct mm_struct *mm); +extern void arch_pick_mmap_layout(struct mm_struct *mm, + struct rlimit *rlim_stack); extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); @@ -113,7 +114,8 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); #else -static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} +static inline void arch_pick_mmap_layout(struct mm_struct *mm, + struct rlimit *rlim_stack) {} #endif static inline bool in_vfork(struct task_struct *tsk) diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 23b4f9cb82db..a7ce74c74e49 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -319,7 +319,7 @@ extern int force_sig_info(int, struct siginfo *, struct task_struct *); extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, - const struct cred *, u32); + const struct cred *); extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); extern __must_check bool do_notify_parent(struct task_struct *, int); diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h new file mode 100644 index 000000000000..b458c87b866c --- /dev/null +++ b/include/linux/scmi_protocol.h @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SCMI Message Protocol driver header + * + * Copyright (C) 2018 ARM Ltd. + */ +#include <linux/device.h> +#include <linux/types.h> + +#define SCMI_MAX_STR_SIZE 16 +#define SCMI_MAX_NUM_RATES 16 + +/** + * struct scmi_revision_info - version information structure + * + * @major_ver: Major ABI version. Change here implies risk of backward + * compatibility break. + * @minor_ver: Minor ABI version. Change here implies new feature addition, + * or compatible change in ABI. + * @num_protocols: Number of protocols that are implemented, excluding the + * base protocol. + * @num_agents: Number of agents in the system. + * @impl_ver: A vendor-specific implementation version. + * @vendor_id: A vendor identifier(Null terminated ASCII string) + * @sub_vendor_id: A sub-vendor identifier(Null terminated ASCII string) + */ +struct scmi_revision_info { + u16 major_ver; + u16 minor_ver; + u8 num_protocols; + u8 num_agents; + u32 impl_ver; + char vendor_id[SCMI_MAX_STR_SIZE]; + char sub_vendor_id[SCMI_MAX_STR_SIZE]; +}; + +struct scmi_clock_info { + char name[SCMI_MAX_STR_SIZE]; + bool rate_discrete; + union { + struct { + int num_rates; + u64 rates[SCMI_MAX_NUM_RATES]; + } list; + struct { + u64 min_rate; + u64 max_rate; + u64 step_size; + } range; + }; +}; + +struct scmi_handle; + +/** + * struct scmi_clk_ops - represents the various operations provided + * by SCMI Clock Protocol + * + * @count_get: get the count of clocks provided by SCMI + * @info_get: get the information of the specified clock + * @rate_get: request the current clock rate of a clock + * @rate_set: set the clock rate of a clock + * @enable: enables the specified clock + * @disable: disables the specified clock + */ +struct scmi_clk_ops { + int (*count_get)(const struct scmi_handle *handle); + + const struct scmi_clock_info *(*info_get) + (const struct scmi_handle *handle, u32 clk_id); + int (*rate_get)(const struct scmi_handle *handle, u32 clk_id, + u64 *rate); + int (*rate_set)(const struct scmi_handle *handle, u32 clk_id, + u32 config, u64 rate); + int (*enable)(const struct scmi_handle *handle, u32 clk_id); + int (*disable)(const struct scmi_handle *handle, u32 clk_id); +}; + +/** + * struct scmi_perf_ops - represents the various operations provided + * by SCMI Performance Protocol + * + * @limits_set: sets limits on the performance level of a domain + * @limits_get: gets limits on the performance level of a domain + * @level_set: sets the performance level of a domain + * @level_get: gets the performance level of a domain + * @device_domain_id: gets the scmi domain id for a given device + * @get_transition_latency: gets the DVFS transition latency for a given device + * @add_opps_to_device: adds all the OPPs for a given device + * @freq_set: sets the frequency for a given device using sustained frequency + * to sustained performance level mapping + * @freq_get: gets the frequency for a given device using sustained frequency + * to sustained performance level mapping + */ +struct scmi_perf_ops { + int (*limits_set)(const struct scmi_handle *handle, u32 domain, + u32 max_perf, u32 min_perf); + int (*limits_get)(const struct scmi_handle *handle, u32 domain, + u32 *max_perf, u32 *min_perf); + int (*level_set)(const struct scmi_handle *handle, u32 domain, + u32 level, bool poll); + int (*level_get)(const struct scmi_handle *handle, u32 domain, + u32 *level, bool poll); + int (*device_domain_id)(struct device *dev); + int (*get_transition_latency)(const struct scmi_handle *handle, + struct device *dev); + int (*add_opps_to_device)(const struct scmi_handle *handle, + struct device *dev); + int (*freq_set)(const struct scmi_handle *handle, u32 domain, + unsigned long rate, bool poll); + int (*freq_get)(const struct scmi_handle *handle, u32 domain, + unsigned long *rate, bool poll); +}; + +/** + * struct scmi_power_ops - represents the various operations provided + * by SCMI Power Protocol + * + * @num_domains_get: get the count of power domains provided by SCMI + * @name_get: gets the name of a power domain + * @state_set: sets the power state of a power domain + * @state_get: gets the power state of a power domain + */ +struct scmi_power_ops { + int (*num_domains_get)(const struct scmi_handle *handle); + char *(*name_get)(const struct scmi_handle *handle, u32 domain); +#define SCMI_POWER_STATE_TYPE_SHIFT 30 +#define SCMI_POWER_STATE_ID_MASK (BIT(28) - 1) +#define SCMI_POWER_STATE_PARAM(type, id) \ + ((((type) & BIT(0)) << SCMI_POWER_STATE_TYPE_SHIFT) | \ + ((id) & SCMI_POWER_STATE_ID_MASK)) +#define SCMI_POWER_STATE_GENERIC_ON SCMI_POWER_STATE_PARAM(0, 0) +#define SCMI_POWER_STATE_GENERIC_OFF SCMI_POWER_STATE_PARAM(1, 0) + int (*state_set)(const struct scmi_handle *handle, u32 domain, + u32 state); + int (*state_get)(const struct scmi_handle *handle, u32 domain, + u32 *state); +}; + +struct scmi_sensor_info { + u32 id; + u8 type; + char name[SCMI_MAX_STR_SIZE]; +}; + +/* + * Partial list from Distributed Management Task Force (DMTF) specification: + * DSP0249 (Platform Level Data Model specification) + */ +enum scmi_sensor_class { + NONE = 0x0, + TEMPERATURE_C = 0x2, + VOLTAGE = 0x5, + CURRENT = 0x6, + POWER = 0x7, + ENERGY = 0x8, +}; + +/** + * struct scmi_sensor_ops - represents the various operations provided + * by SCMI Sensor Protocol + * + * @count_get: get the count of sensors provided by SCMI + * @info_get: get the information of the specified sensor + * @configuration_set: control notifications on cross-over events for + * the trip-points + * @trip_point_set: selects and configures a trip-point of interest + * @reading_get: gets the current value of the sensor + */ +struct scmi_sensor_ops { + int (*count_get)(const struct scmi_handle *handle); + + const struct scmi_sensor_info *(*info_get) + (const struct scmi_handle *handle, u32 sensor_id); + int (*configuration_set)(const struct scmi_handle *handle, + u32 sensor_id); + int (*trip_point_set)(const struct scmi_handle *handle, u32 sensor_id, + u8 trip_id, u64 trip_value); + int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id, + bool async, u64 *value); +}; + +/** + * struct scmi_handle - Handle returned to ARM SCMI clients for usage. + * + * @dev: pointer to the SCMI device + * @version: pointer to the structure containing SCMI version information + * @power_ops: pointer to set of power protocol operations + * @perf_ops: pointer to set of performance protocol operations + * @clk_ops: pointer to set of clock protocol operations + * @sensor_ops: pointer to set of sensor protocol operations + */ +struct scmi_handle { + struct device *dev; + struct scmi_revision_info *version; + struct scmi_perf_ops *perf_ops; + struct scmi_clk_ops *clk_ops; + struct scmi_power_ops *power_ops; + struct scmi_sensor_ops *sensor_ops; + /* for protocol internal use */ + void *perf_priv; + void *clk_priv; + void *power_priv; + void *sensor_priv; +}; + +enum scmi_std_protocol { + SCMI_PROTOCOL_BASE = 0x10, + SCMI_PROTOCOL_POWER = 0x11, + SCMI_PROTOCOL_SYSTEM = 0x12, + SCMI_PROTOCOL_PERF = 0x13, + SCMI_PROTOCOL_CLOCK = 0x14, + SCMI_PROTOCOL_SENSOR = 0x15, +}; + +struct scmi_device { + u32 id; + u8 protocol_id; + struct device dev; + struct scmi_handle *handle; +}; + +#define to_scmi_dev(d) container_of(d, struct scmi_device, dev) + +struct scmi_device * +scmi_device_create(struct device_node *np, struct device *parent, int protocol); +void scmi_device_destroy(struct scmi_device *scmi_dev); + +struct scmi_device_id { + u8 protocol_id; +}; + +struct scmi_driver { + const char *name; + int (*probe)(struct scmi_device *sdev); + void (*remove)(struct scmi_device *sdev); + const struct scmi_device_id *id_table; + + struct device_driver driver; +}; + +#define to_scmi_driver(d) container_of(d, struct scmi_driver, driver) + +#ifdef CONFIG_ARM_SCMI_PROTOCOL +int scmi_driver_register(struct scmi_driver *driver, + struct module *owner, const char *mod_name); +void scmi_driver_unregister(struct scmi_driver *driver); +#else +static inline int +scmi_driver_register(struct scmi_driver *driver, struct module *owner, + const char *mod_name) +{ + return -EINVAL; +} + +static inline void scmi_driver_unregister(struct scmi_driver *driver) {} +#endif /* CONFIG_ARM_SCMI_PROTOCOL */ + +#define scmi_register(driver) \ + scmi_driver_register(driver, THIS_MODULE, KBUILD_MODNAME) +#define scmi_unregister(driver) \ + scmi_driver_unregister(driver) + +/** + * module_scmi_driver() - Helper macro for registering a scmi driver + * @__scmi_driver: scmi_driver structure + * + * Helper macro for scmi drivers to set up proper module init / exit + * functions. Replaces module_init() and module_exit() and keeps people from + * printing pointless things to the kernel log when their driver is loaded. + */ +#define module_scmi_driver(__scmi_driver) \ + module_driver(__scmi_driver, scmi_register, scmi_unregister) + +typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *); +int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn); +void scmi_protocol_unregister(int protocol_id); diff --git a/include/linux/security.h b/include/linux/security.h index 128e1e4a5346..200920f521a1 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -112,6 +112,7 @@ struct xfrm_policy; struct xfrm_state; struct xfrm_user_sec_ctx; struct seq_file; +struct sctp_endpoint; #ifdef CONFIG_MMU extern unsigned long mmap_min_addr; @@ -321,6 +322,7 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp); void security_cred_free(struct cred *cred); int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); void security_transfer_creds(struct cred *new, const struct cred *old); +void security_cred_getsecid(const struct cred *c, u32 *secid); int security_kernel_act_as(struct cred *new, u32 secid); int security_kernel_create_files_as(struct cred *new, struct inode *inode); int security_kernel_module_request(char *kmod_name); @@ -344,7 +346,7 @@ int security_task_setscheduler(struct task_struct *p); int security_task_getscheduler(struct task_struct *p); int security_task_movememory(struct task_struct *p); int security_task_kill(struct task_struct *p, struct siginfo *info, - int sig, u32 secid); + int sig, const struct cred *cred); int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); void security_task_to_inode(struct task_struct *p, struct inode *inode); @@ -1007,7 +1009,7 @@ static inline int security_task_movememory(struct task_struct *p) static inline int security_task_kill(struct task_struct *p, struct siginfo *info, int sig, - u32 secid) + const struct cred *cred) { return 0; } @@ -1226,6 +1228,11 @@ int security_tun_dev_create(void); int security_tun_dev_attach_queue(void *security); int security_tun_dev_attach(struct sock *sk, void *security); int security_tun_dev_open(void *security); +int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb); +int security_sctp_bind_connect(struct sock *sk, int optname, + struct sockaddr *address, int addrlen); +void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk, + struct sock *newsk); #else /* CONFIG_SECURITY_NETWORK */ static inline int security_unix_stream_connect(struct sock *sock, @@ -1418,6 +1425,25 @@ static inline int security_tun_dev_open(void *security) { return 0; } + +static inline int security_sctp_assoc_request(struct sctp_endpoint *ep, + struct sk_buff *skb) +{ + return 0; +} + +static inline int security_sctp_bind_connect(struct sock *sk, int optname, + struct sockaddr *address, + int addrlen) +{ + return 0; +} + +static inline void security_sctp_sk_clone(struct sctp_endpoint *ep, + struct sock *sk, + struct sock *newsk) +{ +} #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index ab437dd2e3b9..a121982af0f5 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -118,9 +118,14 @@ __printf(2, 3) void seq_printf(struct seq_file *m, const char *fmt, ...); void seq_putc(struct seq_file *m, char c); void seq_puts(struct seq_file *m, const char *s); +void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter, + unsigned long long num, unsigned int width); void seq_put_decimal_ull(struct seq_file *m, const char *delimiter, unsigned long long num); void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num); +void seq_put_hex_ll(struct seq_file *m, const char *delimiter, + unsigned long long v, unsigned int width); + void seq_escape(struct seq_file *m, const char *s, const char *esc); void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, @@ -235,4 +240,5 @@ extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *hea extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos); +void seq_file_init(void); #endif diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h index a7f004a3c177..463ed28d2b27 100644 --- a/include/linux/serial_s3c.h +++ b/include/linux/serial_s3c.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Internal header file for Samsung S3C2410 serial ports (UART0-2) * @@ -10,21 +11,7 @@ * Internal header file for MX1ADS serial ports (UART1 & 2) * * Copyright (C) 2002 Shane Nay (shane@minirl.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ + */ #ifndef __ASM_ARM_REGS_SERIAL_H #define __ASM_ARM_REGS_SERIAL_H diff --git a/include/linux/slab.h b/include/linux/slab.h index 231abc8976c5..81ebd71f8c03 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -125,7 +125,6 @@ #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ (unsigned long)ZERO_SIZE_PTR) -#include <linux/kmemleak.h> #include <linux/kasan.h> struct mem_cgroup; @@ -137,12 +136,13 @@ bool slab_is_available(void); extern bool usercopy_fallback; -struct kmem_cache *kmem_cache_create(const char *name, size_t size, - size_t align, slab_flags_t flags, +struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, + unsigned int align, slab_flags_t flags, void (*ctor)(void *)); struct kmem_cache *kmem_cache_create_usercopy(const char *name, - size_t size, size_t align, slab_flags_t flags, - size_t useroffset, size_t usersize, + unsigned int size, unsigned int align, + slab_flags_t flags, + unsigned int useroffset, unsigned int usersize, void (*ctor)(void *)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); @@ -308,7 +308,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; * 2 = 129 .. 192 bytes * n = 2^(n-1)+1 .. 2^n */ -static __always_inline int kmalloc_index(size_t size) +static __always_inline unsigned int kmalloc_index(size_t size) { if (!size) return 0; @@ -504,7 +504,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) return kmalloc_large(size, flags); #ifndef CONFIG_SLOB if (!(flags & GFP_DMA)) { - int index = kmalloc_index(size); + unsigned int index = kmalloc_index(size); if (!index) return ZERO_SIZE_PTR; @@ -522,11 +522,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) * return size or 0 if a kmalloc cache for that * size does not exist */ -static __always_inline int kmalloc_size(int n) +static __always_inline unsigned int kmalloc_size(unsigned int n) { #ifndef CONFIG_SLOB if (n > 2) - return 1 << n; + return 1U << n; if (n == 1 && KMALLOC_MIN_SIZE <= 32) return 96; @@ -542,7 +542,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) #ifndef CONFIG_SLOB if (__builtin_constant_p(size) && size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { - int i = kmalloc_index(size); + unsigned int i = kmalloc_index(size); if (!i) return ZERO_SIZE_PTR; diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 7385547c04b1..d9228e4d0320 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -85,8 +85,8 @@ struct kmem_cache { unsigned int *random_seq; #endif - size_t useroffset; /* Usercopy region offset */ - size_t usersize; /* Usercopy region size */ + unsigned int useroffset; /* Usercopy region offset */ + unsigned int usersize; /* Usercopy region size */ struct kmem_cache_node *node[MAX_NUMNODES]; }; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 8ad99c47b19c..3773e26c08c1 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -73,7 +73,7 @@ struct kmem_cache_cpu { * given order would contain. */ struct kmem_cache_order_objects { - unsigned long x; + unsigned int x; }; /* @@ -84,11 +84,12 @@ struct kmem_cache { /* Used for retriving partial slabs etc */ slab_flags_t flags; unsigned long min_partial; - int size; /* The size of an object including meta data */ - int object_size; /* The size of an object without meta data */ - int offset; /* Free pointer offset. */ + unsigned int size; /* The size of an object including meta data */ + unsigned int object_size;/* The size of an object without meta data */ + unsigned int offset; /* Free pointer offset. */ #ifdef CONFIG_SLUB_CPU_PARTIAL - int cpu_partial; /* Number of per cpu partial objects to keep around */ + /* Number of per cpu partial objects to keep around */ + unsigned int cpu_partial; #endif struct kmem_cache_order_objects oo; @@ -98,10 +99,10 @@ struct kmem_cache { gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ void (*ctor)(void *); - int inuse; /* Offset to metadata */ - int align; /* Alignment */ - int reserved; /* Reserved bytes at the end of slabs */ - int red_left_pad; /* Left redzone padding size */ + unsigned int inuse; /* Offset to metadata */ + unsigned int align; /* Alignment */ + unsigned int reserved; /* Reserved bytes at the end of slabs */ + unsigned int red_left_pad; /* Left redzone padding size */ const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ #ifdef CONFIG_SYSFS @@ -110,7 +111,8 @@ struct kmem_cache { #endif #ifdef CONFIG_MEMCG struct memcg_cache_params memcg_params; - int max_attr_size; /* for propagation, maximum size of a stored attr */ + /* for propagation, maximum size of a stored attr */ + unsigned int max_attr_size; #ifdef CONFIG_SYSFS struct kset *memcg_kset; #endif @@ -124,7 +126,7 @@ struct kmem_cache { /* * Defragmentation by allocating from a remote node. */ - int remote_node_defrag_ratio; + unsigned int remote_node_defrag_ratio; #endif #ifdef CONFIG_SLAB_FREELIST_RANDOM @@ -135,8 +137,8 @@ struct kmem_cache { struct kasan_cache kasan_info; #endif - size_t useroffset; /* Usercopy region offset */ - size_t usersize; /* Usercopy region size */ + unsigned int useroffset; /* Usercopy region offset */ + unsigned int usersize; /* Usercopy region size */ struct kmem_cache_node *node[MAX_NUMNODES]; }; diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index b0a507d356ef..fd25f0148566 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -21,6 +21,10 @@ #define MT8173_TOP_AXI_PROT_EN_MFG_M1 BIT(22) #define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT BIT(23) +#define MT2701_TOP_AXI_PROT_EN_MM_M0 BIT(1) +#define MT2701_TOP_AXI_PROT_EN_CONN_M BIT(2) +#define MT2701_TOP_AXI_PROT_EN_CONN_S BIT(8) + #define MT7622_TOP_AXI_PROT_EN_ETHSYS (BIT(3) | BIT(17)) #define MT7622_TOP_AXI_PROT_EN_HIF0 (BIT(24) | BIT(25)) #define MT7622_TOP_AXI_PROT_EN_HIF1 (BIT(26) | BIT(27) | \ diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h index bd8e0864b059..5b98bbdabc25 100644 --- a/include/linux/soc/qcom/mdt_loader.h +++ b/include/linux/soc/qcom/mdt_loader.h @@ -14,6 +14,7 @@ struct firmware; ssize_t qcom_mdt_get_size(const struct firmware *fw); int qcom_mdt_load(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, - phys_addr_t mem_phys, size_t mem_size); + phys_addr_t mem_phys, size_t mem_size, + phys_addr_t *reloc_base); #endif diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h index e57eb4b6cc5a..fc0b445bb36b 100644 --- a/include/linux/soc/samsung/exynos-pmu.h +++ b/include/linux/soc/samsung/exynos-pmu.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2014 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Header for EXYNOS PMU Driver support - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __LINUX_SOC_EXYNOS_PMU_H diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h index bebdde5dccd6..66dcb9ec273a 100644 --- a/include/linux/soc/samsung/exynos-regs-pmu.h +++ b/include/linux/soc/samsung/exynos-regs-pmu.h @@ -1,14 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2010-2015 Samsung Electronics Co., Ltd. * http://www.samsung.com * * EXYNOS - Power management unit definition * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * * Notice: * This is not a list of all Exynos Power Management Unit SFRs. * There are too many of them, not mentioning subtle differences diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index ed761f751ecb..9b11b6a0978c 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -217,5 +217,12 @@ void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, const struct sockaddr *sap); void rpc_cleanup_clids(void); + +static inline int rpc_reply_expected(struct rpc_task *task) +{ + return (task->tk_msg.rpc_proc != NULL) && + (task->tk_msg.rpc_proc->p_decode != NULL); +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 786ae2255f05..574368e8a16f 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -272,6 +272,7 @@ struct svc_rqst { #define RQ_BUSY (6) /* request is busy */ #define RQ_DATA (7) /* request has data */ unsigned long rq_flags; /* flags field */ + ktime_t rq_qtime; /* enqueue time */ void * rq_argp; /* decoded arguments */ void * rq_resp; /* xdr'd results */ @@ -283,6 +284,7 @@ struct svc_rqst { int rq_reserved; /* space on socket outq * reserved for this request */ + ktime_t rq_stime; /* start time */ struct cache_req rq_chandle; /* handle passed to caches for * request delaying @@ -493,6 +495,10 @@ void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); char * svc_print_addr(struct svc_rqst *, char *, size_t); +unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, + struct kvec *first, size_t total); +char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, + struct kvec *first, size_t total); #define RPC_MAX_ADDRBUFLEN (63U) diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 4b731b046bcd..7337e1221590 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -132,9 +132,6 @@ struct svcxprt_rdma { #define RDMAXPRT_CONN_PENDING 3 #define RPCRDMA_LISTEN_BACKLOG 10 -/* The default ORD value is based on two outstanding full-size writes with a - * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */ -#define RPCRDMA_ORD (64/4) #define RPCRDMA_MAX_REQUESTS 32 /* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 1caf7bc83306..c3d72066d4b1 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -25,7 +25,7 @@ struct svc_xprt_ops { void (*xpo_release_rqst)(struct svc_rqst *); void (*xpo_detach)(struct svc_xprt *); void (*xpo_free)(struct svc_xprt *); - int (*xpo_secure_port)(struct svc_rqst *); + void (*xpo_secure_port)(struct svc_rqst *rqstp); void (*xpo_kill_temp_xprt)(struct svc_xprt *); }; @@ -83,6 +83,7 @@ struct svc_xprt { size_t xpt_locallen; /* length of address */ struct sockaddr_storage xpt_remote; /* remote peer's address */ size_t xpt_remotelen; /* length of address */ + char xpt_remotebuf[INET6_ADDRSTRLEN + 10]; struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */ struct list_head xpt_users; /* callbacks on free */ @@ -152,7 +153,10 @@ static inline void svc_xprt_set_remote(struct svc_xprt *xprt, { memcpy(&xprt->xpt_remote, sa, salen); xprt->xpt_remotelen = salen; + snprintf(xprt->xpt_remotebuf, sizeof(xprt->xpt_remotebuf) - 1, + "%pISpc", sa); } + static inline unsigned short svc_addr_port(const struct sockaddr *sa) { const struct sockaddr_in *sin = (const struct sockaddr_in *)sa; diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index d950223c64b1..2bd68177a442 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -253,6 +253,12 @@ xdr_stream_remaining(const struct xdr_stream *xdr) return xdr->nwords << 2; } +ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, + size_t size); +ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr, + size_t maxlen, gfp_t gfp_flags); +ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, + size_t size); ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, size_t maxlen, gfp_t gfp_flags); /** @@ -313,6 +319,31 @@ xdr_stream_encode_u64(struct xdr_stream *xdr, __u64 n) } /** + * xdr_stream_encode_opaque_inline - Encode opaque xdr data + * @xdr: pointer to xdr_stream + * @ptr: pointer to void pointer + * @len: size of object + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t len) +{ + size_t count = sizeof(__u32) + xdr_align_size(len); + __be32 *p = xdr_reserve_space(xdr, count); + + if (unlikely(!p)) { + *ptr = NULL; + return -EMSGSIZE; + } + xdr_encode_opaque(p, NULL, len); + *ptr = ++p; + return count; +} + +/** * xdr_stream_encode_opaque_fixed - Encode fixed length opaque xdr data * @xdr: pointer to xdr_stream * @ptr: pointer to opaque data object @@ -356,6 +387,31 @@ xdr_stream_encode_opaque(struct xdr_stream *xdr, const void *ptr, size_t len) } /** + * xdr_stream_encode_uint32_array - Encode variable length array of integers + * @xdr: pointer to xdr_stream + * @array: array of integers + * @array_size: number of elements in @array + * + * Return values: + * On success, returns length in bytes of XDR buffer consumed + * %-EMSGSIZE on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_encode_uint32_array(struct xdr_stream *xdr, + const __u32 *array, size_t array_size) +{ + ssize_t ret = (array_size+1) * sizeof(__u32); + __be32 *p = xdr_reserve_space(xdr, ret); + + if (unlikely(!p)) + return -EMSGSIZE; + *p++ = cpu_to_be32(array_size); + for (; array_size > 0; p++, array++, array_size--) + *p = cpu_to_be32p(array); + return ret; +} + +/** * xdr_stream_decode_u32 - Decode a 32-bit integer * @xdr: pointer to xdr_stream * @ptr: location to store integer @@ -432,6 +488,44 @@ xdr_stream_decode_opaque_inline(struct xdr_stream *xdr, void **ptr, size_t maxle } return len; } + +/** + * xdr_stream_decode_uint32_array - Decode variable length array of integers + * @xdr: pointer to xdr_stream + * @array: location to store the integer array or NULL + * @array_size: number of elements to store + * + * Return values: + * On success, returns number of elements stored in @array + * %-EBADMSG on XDR buffer overflow + * %-EMSGSIZE if the size of the array exceeds @array_size + */ +static inline ssize_t +xdr_stream_decode_uint32_array(struct xdr_stream *xdr, + __u32 *array, size_t array_size) +{ + __be32 *p; + __u32 len; + ssize_t retval; + + if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) + return -EBADMSG; + p = xdr_inline_decode(xdr, len * sizeof(*p)); + if (unlikely(!p)) + return -EBADMSG; + if (array == NULL) + return len; + if (len <= array_size) { + if (len < array_size) + memset(array+len, 0, (array_size-len)*sizeof(*array)); + array_size = len; + retval = len; + } else + retval = -EMSGSIZE; + for (; array_size > 0; p++, array++, array_size--) + *array = be32_to_cpup(p); + return retval; +} #endif /* __KERNEL__ */ #endif /* _SUNRPC_XDR_H_ */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 7fad83881ce1..5fea0fb420df 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -197,7 +197,7 @@ struct rpc_xprt { struct list_head free; /* free slots */ unsigned int max_reqs; /* max number of slots */ unsigned int min_reqs; /* min number of slots */ - atomic_t num_reqs; /* total slots */ + unsigned int num_reqs; /* total slots */ unsigned long state; /* transport state */ unsigned char resvport : 1; /* use a reserved port */ atomic_t swapper; /* we're swapping over this @@ -373,6 +373,7 @@ void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action); void xprt_write_space(struct rpc_xprt *xprt); void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result); struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); +void xprt_update_rtt(struct rpc_task *task); void xprt_complete_rqst(struct rpc_task *task, int copied); void xprt_pin_rqst(struct rpc_rqst *req); void xprt_unpin_rqst(struct rpc_rqst *req); diff --git a/include/linux/swap.h b/include/linux/swap.h index a1a3f4ed94ce..2417d288e016 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -400,7 +400,6 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *, #define SWAP_ADDRESS_SPACE_SHIFT 14 #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) extern struct address_space *swapper_spaces[]; -extern bool swap_vma_readahead; #define swap_address_space(entry) \ (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ >> SWAP_ADDRESS_SPACE_SHIFT]) @@ -422,14 +421,10 @@ extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t, struct vm_area_struct *vma, unsigned long addr, bool *new_page_allocated); -extern struct page *swapin_readahead(swp_entry_t, gfp_t, - struct vm_area_struct *vma, unsigned long addr); - -extern struct page *swap_readahead_detect(struct vm_fault *vmf, - struct vma_swap_readahead *swap_ra); -extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask, - struct vm_fault *vmf, - struct vma_swap_readahead *swap_ra); +extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, + struct vm_fault *vmf); +extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, + struct vm_fault *vmf); /* linux/mm/swapfile.c */ extern atomic_long_t nr_swap_pages; @@ -437,11 +432,6 @@ extern long total_swap_pages; extern atomic_t nr_rotate_swap; extern bool has_usable_swap(void); -static inline bool swap_use_vma_readahead(void) -{ - return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap); -} - /* Swap 50% full? Release swapcache more aggressively.. */ static inline bool vm_swap_full(void) { @@ -537,26 +527,14 @@ static inline void put_swap_page(struct page *page, swp_entry_t swp) { } -static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, - struct vm_area_struct *vma, unsigned long addr) +static inline struct page *swap_cluster_readahead(swp_entry_t entry, + gfp_t gfp_mask, struct vm_fault *vmf) { return NULL; } -static inline bool swap_use_vma_readahead(void) -{ - return false; -} - -static inline struct page *swap_readahead_detect( - struct vm_fault *vmf, struct vma_swap_readahead *swap_ra) -{ - return NULL; -} - -static inline struct page *do_swap_page_readahead( - swp_entry_t fentry, gfp_t gfp_mask, - struct vm_fault *vmf, struct vma_swap_readahead *swap_ra) +static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, + struct vm_fault *vmf) { return NULL; } diff --git a/include/linux/tick.h b/include/linux/tick.h index 7f8c9a127f5a..55388ab45fd4 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -115,27 +115,46 @@ enum tick_dep_bits { extern bool tick_nohz_enabled; extern bool tick_nohz_tick_stopped(void); extern bool tick_nohz_tick_stopped_cpu(int cpu); +extern void tick_nohz_idle_stop_tick(void); +extern void tick_nohz_idle_retain_tick(void); +extern void tick_nohz_idle_restart_tick(void); extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); -extern ktime_t tick_nohz_get_sleep_length(void); +extern bool tick_nohz_idle_got_tick(void); +extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next); extern unsigned long tick_nohz_get_idle_calls(void); extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); + +static inline void tick_nohz_idle_stop_tick_protected(void) +{ + local_irq_disable(); + tick_nohz_idle_stop_tick(); + local_irq_enable(); +} + #else /* !CONFIG_NO_HZ_COMMON */ #define tick_nohz_enabled (0) static inline int tick_nohz_tick_stopped(void) { return 0; } static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; } +static inline void tick_nohz_idle_stop_tick(void) { } +static inline void tick_nohz_idle_retain_tick(void) { } +static inline void tick_nohz_idle_restart_tick(void) { } static inline void tick_nohz_idle_enter(void) { } static inline void tick_nohz_idle_exit(void) { } +static inline bool tick_nohz_idle_got_tick(void) { return false; } -static inline ktime_t tick_nohz_get_sleep_length(void) +static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) { - return NSEC_PER_SEC / HZ; + *delta_next = TICK_NSEC; + return *delta_next; } static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } + +static inline void tick_nohz_idle_stop_tick_protected(void) { } #endif /* !CONFIG_NO_HZ_COMMON */ #ifdef CONFIG_NO_HZ_FULL diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 82c219dfd3bb..9737fbec7019 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -31,6 +31,7 @@ struct timespec64 get_monotonic_coarse64(void); extern void getrawmonotonic64(struct timespec64 *ts); extern void ktime_get_ts64(struct timespec64 *ts); extern time64_t ktime_get_seconds(void); +extern time64_t __ktime_get_real_seconds(void); extern time64_t ktime_get_real_seconds(void); extern void ktime_get_active_ts64(struct timespec64 *ts); diff --git a/include/linux/tpm.h b/include/linux/tpm.h index bcdd3790e94d..06639fb6ab85 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -44,7 +44,7 @@ struct tpm_class_ops { bool (*update_timeouts)(struct tpm_chip *chip, unsigned long *timeout_cap); int (*request_locality)(struct tpm_chip *chip, int loc); - void (*relinquish_locality)(struct tpm_chip *chip, int loc); + int (*relinquish_locality)(struct tpm_chip *chip, int loc); void (*clk_enable)(struct tpm_chip *chip, bool value); }; diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index e0e98000b665..2bde3eff564c 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -430,11 +430,13 @@ enum event_trigger_type { extern int filter_match_preds(struct event_filter *filter, void *rec); -extern enum event_trigger_type event_triggers_call(struct trace_event_file *file, - void *rec); -extern void event_triggers_post_call(struct trace_event_file *file, - enum event_trigger_type tt, - void *rec); +extern enum event_trigger_type +event_triggers_call(struct trace_event_file *file, void *rec, + struct ring_buffer_event *event); +extern void +event_triggers_post_call(struct trace_event_file *file, + enum event_trigger_type tt, + void *rec, struct ring_buffer_event *event); bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); @@ -454,7 +456,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file) if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) { if (eflags & EVENT_FILE_FL_TRIGGER_MODE) - event_triggers_call(file, NULL); + event_triggers_call(file, NULL, NULL); if (eflags & EVENT_FILE_FL_SOFT_DISABLED) return true; if (eflags & EVENT_FILE_FL_PID_FILTER) diff --git a/include/linux/utsname.h b/include/linux/utsname.h index c8060c2ecd04..44429d9142ca 100644 --- a/include/linux/utsname.h +++ b/include/linux/utsname.h @@ -44,6 +44,8 @@ static inline void put_uts_ns(struct uts_namespace *ns) { kref_put(&ns->kref, free_uts_ns); } + +void uts_ns_init(void); #else static inline void get_uts_ns(struct uts_namespace *ns) { @@ -61,6 +63,10 @@ static inline struct uts_namespace *copy_utsname(unsigned long flags, return old_ns; } + +static inline void uts_ns_init(void) +{ +} #endif #ifdef CONFIG_PROC_SYSCTL diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index a4c2317d8b9f..f25cef84b41d 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -20,6 +20,17 @@ extern int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); #endif +struct reclaim_stat { + unsigned nr_dirty; + unsigned nr_unqueued_dirty; + unsigned nr_congested; + unsigned nr_writeback; + unsigned nr_immediate; + unsigned nr_activate; + unsigned nr_ref_keep; + unsigned nr_unmap_fail; +}; + #ifdef CONFIG_VM_EVENT_COUNTERS /* * Light weight per cpu counter implementation. diff --git a/include/linux/xarray.h b/include/linux/xarray.h new file mode 100644 index 000000000000..2dfc8006fe64 --- /dev/null +++ b/include/linux/xarray.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef _LINUX_XARRAY_H +#define _LINUX_XARRAY_H +/* + * eXtensible Arrays + * Copyright (c) 2017 Microsoft Corporation + * Author: Matthew Wilcox <mawilcox@microsoft.com> + */ + +#include <linux/spinlock.h> + +#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) +#define xa_lock(xa) spin_lock(&(xa)->xa_lock) +#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock) +#define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock) +#define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock) +#define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock) +#define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock) +#define xa_lock_irqsave(xa, flags) \ + spin_lock_irqsave(&(xa)->xa_lock, flags) +#define xa_unlock_irqrestore(xa, flags) \ + spin_unlock_irqrestore(&(xa)->xa_lock, flags) + +#endif /* _LINUX_XARRAY_H */ diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 57a8e98f2708..2219cce81ca4 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -47,6 +47,8 @@ void zs_destroy_pool(struct zs_pool *pool); unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags); void zs_free(struct zs_pool *pool, unsigned long obj); +size_t zs_huge_class_size(struct zs_pool *pool); + void *zs_map_object(struct zs_pool *pool, unsigned long handle, enum zs_mapmode mm); void zs_unmap_object(struct zs_pool *pool, unsigned long handle); diff --git a/include/media/i2c/saa6588.h b/include/media/i2c/saa6588.h index b5ec1aa60ed5..a0825f532f71 100644 --- a/include/media/i2c/saa6588.h +++ b/include/media/i2c/saa6588.h @@ -32,6 +32,7 @@ struct saa6588_command { unsigned char __user *buffer; struct file *instance; poll_table *event_list; + __poll_t poll_mask; }; /* These ioctls are internal to the kernel */ diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index 54b689247937..160bca96d524 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -320,6 +320,7 @@ void v4l_bound_align_image(unsigned int *width, unsigned int wmin, * set of resolutions contained in an array of a driver specific struct. * * @array: a driver specific array of image sizes + * @array_size: the length of the driver specific array of image sizes * @width_field: the name of the width field in the driver specific struct * @height_field: the name of the height field in the driver specific struct * @width: desired width. @@ -332,13 +333,13 @@ void v4l_bound_align_image(unsigned int *width, unsigned int wmin, * * Returns the best match or NULL if the length of the array is zero. */ -#define v4l2_find_nearest_size(array, width_field, height_field, \ +#define v4l2_find_nearest_size(array, array_size, width_field, height_field, \ width, height) \ ({ \ BUILD_BUG_ON(sizeof((array)->width_field) != sizeof(u32) || \ sizeof((array)->height_field) != sizeof(u32)); \ (typeof(&(*(array))))__v4l2_find_nearest_size( \ - (array), ARRAY_SIZE(array), sizeof(*(array)), \ + (array), array_size, sizeof(*(array)), \ offsetof(typeof(*(array)), width_field), \ offsetof(typeof(*(array)), height_field), \ width, height); \ diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 27634e8d2585..f60cf9cf3b9c 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -33,13 +33,13 @@ */ enum vfl_devnode_type { VFL_TYPE_GRABBER = 0, - VFL_TYPE_VBI = 1, - VFL_TYPE_RADIO = 2, - VFL_TYPE_SUBDEV = 3, - VFL_TYPE_SDR = 4, - VFL_TYPE_TOUCH = 5, + VFL_TYPE_VBI, + VFL_TYPE_RADIO, + VFL_TYPE_SUBDEV, + VFL_TYPE_SDR, + VFL_TYPE_TOUCH, + VFL_TYPE_MAX /* Shall be the last one */ }; -#define VFL_TYPE_MAX VFL_TYPE_TOUCH /** * enum vfl_direction - Identifies if a &struct video_device corresponds diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 95ccc1eef558..b619a190ff12 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -895,7 +895,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u16 conn_timeout); struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, u16 conn_timeout, - u8 role); + u8 role, bdaddr_t *direct_rpa); struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, u8 sec_level, u8 auth_type); struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, diff --git a/include/net/devlink.h b/include/net/devlink.h index e21d8cadd480..2e4f71e16e95 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -232,14 +232,6 @@ struct devlink_dpipe_headers { }; /** - * struct devlink_resource_ops - resource ops - * @occ_get: get the occupied size - */ -struct devlink_resource_ops { - u64 (*occ_get)(struct devlink *devlink); -}; - -/** * struct devlink_resource_size_params - resource's size parameters * @size_min: minimum size which can be set * @size_max: maximum size which can be set @@ -265,6 +257,8 @@ devlink_resource_size_params_init(struct devlink_resource_size_params *size_para size_params->unit = unit; } +typedef u64 devlink_resource_occ_get_t(void *priv); + /** * struct devlink_resource - devlink resource * @name: name of the resource @@ -277,7 +271,6 @@ devlink_resource_size_params_init(struct devlink_resource_size_params *size_para * @size_params: size parameters * @list: parent list * @resource_list: list of child resources - * @resource_ops: resource ops */ struct devlink_resource { const char *name; @@ -289,7 +282,8 @@ struct devlink_resource { struct devlink_resource_size_params size_params; struct list_head list; struct list_head resource_list; - const struct devlink_resource_ops *resource_ops; + devlink_resource_occ_get_t *occ_get; + void *occ_get_priv; }; #define DEVLINK_RESOURCE_ID_PARENT_TOP 0 @@ -409,8 +403,7 @@ int devlink_resource_register(struct devlink *devlink, u64 resource_size, u64 resource_id, u64 parent_resource_id, - const struct devlink_resource_size_params *size_params, - const struct devlink_resource_ops *resource_ops); + const struct devlink_resource_size_params *size_params); void devlink_resources_unregister(struct devlink *devlink, struct devlink_resource *resource); int devlink_resource_size_get(struct devlink *devlink, @@ -419,6 +412,12 @@ int devlink_resource_size_get(struct devlink *devlink, int devlink_dpipe_table_resource_set(struct devlink *devlink, const char *table_name, u64 resource_id, u64 resource_units); +void devlink_resource_occ_get_register(struct devlink *devlink, + u64 resource_id, + devlink_resource_occ_get_t *occ_get, + void *occ_get_priv); +void devlink_resource_occ_get_unregister(struct devlink *devlink, + u64 resource_id); #else @@ -562,8 +561,7 @@ devlink_resource_register(struct devlink *devlink, u64 resource_size, u64 resource_id, u64 parent_resource_id, - const struct devlink_resource_size_params *size_params, - const struct devlink_resource_ops *resource_ops) + const struct devlink_resource_size_params *size_params) { return 0; } @@ -589,6 +587,20 @@ devlink_dpipe_table_resource_set(struct devlink *devlink, return -EOPNOTSUPP; } +static inline void +devlink_resource_occ_get_register(struct devlink *devlink, + u64 resource_id, + devlink_resource_occ_get_t *occ_get, + void *occ_get_priv) +{ +} + +static inline void +devlink_resource_occ_get_unregister(struct devlink *devlink, + u64 resource_id) +{ +} + #endif #endif /* _NET_DEVLINK_H_ */ diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 899495589a7e..c7be1ca8e562 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -43,6 +43,7 @@ struct inet_timewait_sock { #define tw_family __tw_common.skc_family #define tw_state __tw_common.skc_state #define tw_reuse __tw_common.skc_reuse +#define tw_reuseport __tw_common.skc_reuseport #define tw_ipv6only __tw_common.skc_ipv6only #define tw_bound_dev_if __tw_common.skc_bound_dev_if #define tw_node __tw_common.skc_nulls_node diff --git a/include/net/nexthop.h b/include/net/nexthop.h index 36bb794f5cd6..902ff382a6dc 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -7,7 +7,7 @@ static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining) { - return remaining >= sizeof(*rtnh) && + return remaining >= (int)sizeof(*rtnh) && rtnh->rtnh_len >= sizeof(*rtnh) && rtnh->rtnh_len <= remaining; } diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 72c5b8fc3232..28b996d63490 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -432,9 +432,11 @@ static inline int sctp_list_single_entry(struct list_head *head) static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu) { struct sctp_sock *sp = sctp_sk(asoc->base.sk); + struct sctp_af *af = sp->pf->af; int frag = pmtu; - frag -= sp->pf->af->net_header_len; + frag -= af->ip_options_len(asoc->base.sk); + frag -= af->net_header_len; frag -= sizeof(struct sctphdr) + sctp_datachk_len(&asoc->stream); if (asoc->user_frag) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index c63249ea34c3..a0ec462bc1a9 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -491,6 +491,7 @@ struct sctp_af { void (*ecn_capable)(struct sock *sk); __u16 net_header_len; int sockaddr_len; + int (*ip_options_len)(struct sock *sk); sa_family_t sa_family; struct list_head list; }; @@ -515,6 +516,7 @@ struct sctp_pf { int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr); void (*to_sk_saddr)(union sctp_addr *, struct sock *sk); void (*to_sk_daddr)(union sctp_addr *, struct sock *sk); + void (*copy_ip_options)(struct sock *sk, struct sock *newsk); struct sctp_af *af; }; @@ -1320,6 +1322,16 @@ struct sctp_endpoint { reconf_enable:1; __u8 strreset_enable; + + /* Security identifiers from incoming (INIT). These are set by + * security_sctp_assoc_request(). These will only be used by + * SCTP TCP type sockets and peeled off connections as they + * cause a new socket to be generated. security_sctp_sk_clone() + * will then plug these into the new socket. + */ + + u32 secid; + u32 peer_secid; }; /* Recover the outter endpoint structure. */ diff --git a/include/net/slhc_vj.h b/include/net/slhc_vj.h index 8716d5942b65..8fcf8908a694 100644 --- a/include/net/slhc_vj.h +++ b/include/net/slhc_vj.h @@ -127,6 +127,7 @@ typedef __u32 int32; */ struct cstate { byte_t cs_this; /* connection id number (xmit) */ + bool initialized; /* true if initialized */ struct cstate *next; /* next in ring (xmit) */ struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */ struct tcphdr cs_tcp; diff --git a/include/net/sock.h b/include/net/sock.h index 49bd2c1796b0..74d725fdbe0f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1114,8 +1114,8 @@ struct proto { struct kmem_cache *slab; unsigned int obj_size; slab_flags_t slab_flags; - size_t useroffset; /* Usercopy region offset */ - size_t usersize; /* Usercopy region size */ + unsigned int useroffset; /* Usercopy region offset */ + unsigned int usersize; /* Usercopy region size */ struct percpu_counter *orphan_count; diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index 415e09960017..a08cc7278980 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -119,10 +119,6 @@ int rdma_resolve_ip(struct rdma_addr_client *client, struct rdma_dev_addr *addr, void *context), void *context); -int rdma_resolve_ip_route(struct sockaddr *src_addr, - const struct sockaddr *dst_addr, - struct rdma_dev_addr *addr); - void rdma_addr_cancel(struct rdma_dev_addr *addr); void rdma_copy_addr(struct rdma_dev_addr *dev_addr, @@ -133,11 +129,6 @@ int rdma_addr_size(struct sockaddr *addr); int rdma_addr_size_in6(struct sockaddr_in6 *addr); int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr); -int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, - const union ib_gid *dgid, - u8 *dmac, const struct net_device *ndev, - int *hoplimit); - static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr) { return ((u16)dev_addr->broadcast[8] << 8) | (u16)dev_addr->broadcast[9]; diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h index 385ec88ee9e5..eb49cc8d1f95 100644 --- a/include/rdma/ib_cache.h +++ b/include/rdma/ib_cache.h @@ -55,20 +55,6 @@ int ib_get_cached_gid(struct ib_device *device, union ib_gid *gid, struct ib_gid_attr *attr); -/** - * ib_find_cached_gid - Returns the port number and GID table index where - * a specified GID value occurs. - * @device: The device to query. - * @gid: The GID value to search for. - * @gid_type: The GID type to search for. - * @ndev: In RoCE, the net device of the device. NULL means ignore. - * @port_num: The port number of the device where the GID value was found. - * @index: The index into the cached GID table where the GID was found. This - * parameter may be NULL. - * - * ib_find_cached_gid() searches for the specified GID value in - * the local software cache. - */ int ib_find_cached_gid(struct ib_device *device, const union ib_gid *gid, enum ib_gid_type gid_type, @@ -76,21 +62,6 @@ int ib_find_cached_gid(struct ib_device *device, u8 *port_num, u16 *index); -/** - * ib_find_cached_gid_by_port - Returns the GID table index where a specified - * GID value occurs - * @device: The device to query. - * @gid: The GID value to search for. - * @gid_type: The GID type to search for. - * @port_num: The port number of the device where the GID value sould be - * searched. - * @ndev: In RoCE, the net device of the device. Null means ignore. - * @index: The index into the cached GID table where the GID was found. This - * parameter may be NULL. - * - * ib_find_cached_gid() searches for the specified GID value in - * the local software cache. - */ int ib_find_cached_gid_by_port(struct ib_device *device, const union ib_gid *gid, enum ib_gid_type gid_type, diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index 811cfcfcbe3d..bacb144f7780 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -163,7 +163,15 @@ struct sa_path_rec_ib { u8 raw_traffic; }; +/** + * struct sa_path_rec_roce - RoCE specific portion of the path record entry + * @route_resolved: When set, it indicates that this route is already + * resolved for this path record entry. + * @dmac: Destination mac address for the given DGID entry + * of the path record entry. + */ struct sa_path_rec_roce { + bool route_resolved; u8 dmac[ETH_ALEN]; /* ignored in IB */ int ifindex; @@ -590,6 +598,11 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec) (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2)); } +static inline bool sa_path_is_opa(struct sa_path_rec *rec) +{ + return (rec->rec_type == SA_PATH_REC_TYPE_OPA); +} + static inline void sa_path_set_slid(struct sa_path_rec *rec, u32 slid) { if (rec->rec_type == SA_PATH_REC_TYPE_IB) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 6eb174753acf..9fc8a825aa28 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -64,6 +64,8 @@ #include <linux/cgroup_rdma.h> #include <uapi/rdma/ib_user_verbs.h> #include <rdma/restrack.h> +#include <uapi/rdma/rdma_user_ioctl.h> +#include <uapi/rdma/ib_user_ioctl_verbs.h> #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN @@ -90,8 +92,11 @@ enum ib_gid_type { #define ROCE_V2_UDP_DPORT 4791 struct ib_gid_attr { - enum ib_gid_type gid_type; struct net_device *ndev; + struct ib_device *device; + enum ib_gid_type gid_type; + u16 index; + u8 port_num; }; enum rdma_node_type { @@ -316,6 +321,18 @@ struct ib_cq_caps { u16 max_cq_moderation_period; }; +struct ib_dm_mr_attr { + u64 length; + u64 offset; + u32 access_flags; +}; + +struct ib_dm_alloc_attr { + u64 length; + u32 alignment; + u32 flags; +}; + struct ib_device_attr { u64 fw_ver; __be64 sys_image_guid; @@ -367,6 +384,7 @@ struct ib_device_attr { u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ struct ib_tm_caps tm_caps; struct ib_cq_caps cq_caps; + u64 max_dm_size; }; enum ib_mtu { @@ -469,6 +487,9 @@ enum ib_port_speed { /** * struct rdma_hw_stats + * @lock - Mutex to protect parallel write access to lifespan and values + * of counters, which are 64bits and not guaranteeed to be written + * atomicaly on 32bits systems. * @timestamp - Used by the core code to track when the last update was * @lifespan - Used by the core code to determine how old the counters * should be before being updated again. Stored in jiffies, defaults @@ -484,6 +505,7 @@ enum ib_port_speed { * filled in by the drivers get_stats routine */ struct rdma_hw_stats { + struct mutex lock; /* Protect lifespan and values[] */ unsigned long timestamp; unsigned long lifespan; const char * const *names; @@ -1755,6 +1777,14 @@ struct ib_qp { struct rdma_restrack_entry res; }; +struct ib_dm { + struct ib_device *device; + u32 length; + u32 flags; + struct ib_uobject *uobject; + atomic_t usecnt; +}; + struct ib_mr { struct ib_device *device; struct ib_pd *pd; @@ -1768,6 +1798,13 @@ struct ib_mr { struct ib_uobject *uobject; /* user */ struct list_head qp_entry; /* FR */ }; + + struct ib_dm *dm; + + /* + * Implementation details of the RDMA core, don't use in drivers: + */ + struct rdma_restrack_entry res; }; struct ib_mw { @@ -1810,6 +1847,7 @@ enum ib_flow_spec_type { /* L3 header*/ IB_FLOW_SPEC_IPV4 = 0x30, IB_FLOW_SPEC_IPV6 = 0x31, + IB_FLOW_SPEC_ESP = 0x34, /* L4 headers*/ IB_FLOW_SPEC_TCP = 0x40, IB_FLOW_SPEC_UDP = 0x41, @@ -1818,6 +1856,7 @@ enum ib_flow_spec_type { /* Actions */ IB_FLOW_SPEC_ACTION_TAG = 0x1000, IB_FLOW_SPEC_ACTION_DROP = 0x1001, + IB_FLOW_SPEC_ACTION_HANDLE = 0x1002, }; #define IB_FLOW_SPEC_LAYER_MASK 0xF0 #define IB_FLOW_SPEC_SUPPORT_LAYERS 8 @@ -1835,7 +1874,8 @@ enum ib_flow_domain { enum ib_flow_flags { IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ - IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */ + IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */ + IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */ }; struct ib_flow_eth_filter { @@ -1940,6 +1980,20 @@ struct ib_flow_spec_tunnel { struct ib_flow_tunnel_filter mask; }; +struct ib_flow_esp_filter { + __be32 spi; + __be32 seq; + /* Must be last */ + u8 real_sz[0]; +}; + +struct ib_flow_spec_esp { + u32 type; + u16 size; + struct ib_flow_esp_filter val; + struct ib_flow_esp_filter mask; +}; + struct ib_flow_spec_action_tag { enum ib_flow_spec_type type; u16 size; @@ -1951,6 +2005,12 @@ struct ib_flow_spec_action_drop { u16 size; }; +struct ib_flow_spec_action_handle { + enum ib_flow_spec_type type; + u16 size; + struct ib_flow_action *act; +}; + union ib_flow_spec { struct { u32 type; @@ -1962,8 +2022,10 @@ union ib_flow_spec { struct ib_flow_spec_tcp_udp tcp_udp; struct ib_flow_spec_ipv6 ipv6; struct ib_flow_spec_tunnel tunnel; + struct ib_flow_spec_esp esp; struct ib_flow_spec_action_tag flow_tag; struct ib_flow_spec_action_drop drop; + struct ib_flow_spec_action_handle action; }; struct ib_flow_attr { @@ -1984,6 +2046,64 @@ struct ib_flow { struct ib_uobject *uobject; }; +enum ib_flow_action_type { + IB_FLOW_ACTION_UNSPECIFIED, + IB_FLOW_ACTION_ESP = 1, +}; + +struct ib_flow_action_attrs_esp_keymats { + enum ib_uverbs_flow_action_esp_keymat protocol; + union { + struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm; + } keymat; +}; + +struct ib_flow_action_attrs_esp_replays { + enum ib_uverbs_flow_action_esp_replay protocol; + union { + struct ib_uverbs_flow_action_esp_replay_bmp bmp; + } replay; +}; + +enum ib_flow_action_attrs_esp_flags { + /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags + * This is done in order to share the same flags between user-space and + * kernel and spare an unnecessary translation. + */ + + /* Kernel flags */ + IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32, + IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33, +}; + +struct ib_flow_spec_list { + struct ib_flow_spec_list *next; + union ib_flow_spec spec; +}; + +struct ib_flow_action_attrs_esp { + struct ib_flow_action_attrs_esp_keymats *keymat; + struct ib_flow_action_attrs_esp_replays *replay; + struct ib_flow_spec_list *encap; + /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled. + * Value of 0 is a valid value. + */ + u32 esn; + u32 spi; + u32 seq; + u32 tfc_pad; + /* Use enum ib_flow_action_attrs_esp_flags */ + u64 flags; + u64 hard_limit_pkts; +}; + +struct ib_flow_action { + struct ib_device *device; + struct ib_uobject *uobject; + enum ib_flow_action_type type; + atomic_t usecnt; +}; + struct ib_mad_hdr; struct ib_grh; @@ -2060,6 +2180,8 @@ struct ib_port_pkey_list { struct list_head pkey_list; }; +struct uverbs_attr_bundle; + struct ib_device { /* Do not access @dma_device directly from ULP nor from HW drivers. */ struct device *dma_device; @@ -2127,37 +2249,36 @@ struct ib_device { */ struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num); + /* query_gid should be return GID value for @device, when @port_num + * link layer is either IB or iWarp. It is no-op if @port_num port + * is RoCE link layer. + */ int (*query_gid)(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); - /* When calling add_gid, the HW vendor's driver should - * add the gid of device @device at gid index @index of - * port @port_num to be @gid. Meta-info of that gid (for example, - * the network device related to this gid is available - * at @attr. @context allows the HW vendor driver to store extra - * information together with a GID entry. The HW vendor may allocate - * memory to contain this information and store it in @context when a - * new GID entry is written to. Params are consistent until the next - * call of add_gid or delete_gid. The function should return 0 on + /* When calling add_gid, the HW vendor's driver should add the gid + * of device of port at gid index available at @attr. Meta-info of + * that gid (for example, the network device related to this gid) is + * available at @attr. @context allows the HW vendor driver to store + * extra information together with a GID entry. The HW vendor driver may + * allocate memory to contain this information and store it in @context + * when a new GID entry is written to. Params are consistent until the + * next call of add_gid or delete_gid. The function should return 0 on * success or error otherwise. The function could be called - * concurrently for different ports. This function is only called - * when roce_gid_table is used. + * concurrently for different ports. This function is only called when + * roce_gid_table is used. */ - int (*add_gid)(struct ib_device *device, - u8 port_num, - unsigned int index, - const union ib_gid *gid, + int (*add_gid)(const union ib_gid *gid, const struct ib_gid_attr *attr, void **context); /* When calling del_gid, the HW vendor's driver should delete the - * gid of device @device at gid index @index of port @port_num. + * gid of device @device at gid index gid_index of port port_num + * available in @attr. * Upon the deletion of a GID entry, the HW vendor must free any * allocated memory. The caller will clear @context afterwards. * This function is only called when roce_gid_table is used. */ - int (*del_gid)(struct ib_device *device, - u8 port_num, - unsigned int index, + int (*del_gid)(const struct ib_gid_attr *attr, void **context); int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index, u16 *pkey); @@ -2315,6 +2436,21 @@ struct ib_device { struct ib_rwq_ind_table_init_attr *init_attr, struct ib_udata *udata); int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); + struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *device, + const struct ib_flow_action_attrs_esp *attr, + struct uverbs_attr_bundle *attrs); + int (*destroy_flow_action)(struct ib_flow_action *action); + int (*modify_flow_action_esp)(struct ib_flow_action *action, + const struct ib_flow_action_attrs_esp *attr, + struct uverbs_attr_bundle *attrs); + struct ib_dm * (*alloc_dm)(struct ib_device *device, + struct ib_ucontext *context, + struct ib_dm_alloc_attr *attr, + struct uverbs_attr_bundle *attrs); + int (*dealloc_dm)(struct ib_dm *dm); + struct ib_mr * (*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, + struct ib_dm_mr_attr *attr, + struct uverbs_attr_bundle *attrs); /** * rdma netdev operation * @@ -2376,6 +2512,7 @@ struct ib_device { int comp_vector); struct uverbs_root_spec *specs_root; + enum rdma_driver_id driver_id; }; struct ib_client { @@ -2435,11 +2572,9 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; } -static inline bool ib_is_udata_cleared(struct ib_udata *udata, - size_t offset, - size_t len) +static inline bool ib_is_buffer_cleared(const void __user *p, + size_t len) { - const void __user *p = udata->inbuf + offset; bool ret; u8 *buf; @@ -2455,6 +2590,13 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata, return ret; } +static inline bool ib_is_udata_cleared(struct ib_udata *udata, + size_t offset, + size_t len) +{ + return ib_is_buffer_cleared(udata->inbuf + offset, len); +} + /** * ib_modify_qp_is_ok - Check that the supplied attribute mask * contains all required attributes and no attributes not allowed for @@ -2471,9 +2613,9 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata, * transition from cur_state to next_state is allowed by the IB spec, * and that the attribute mask supplied is allowed for the transition. */ -int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, - enum ib_qp_type type, enum ib_qp_attr_mask mask, - enum rdma_link_layer ll); +bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, + enum ib_qp_type type, enum ib_qp_attr_mask mask, + enum rdma_link_layer ll); void ib_register_event_handler(struct ib_event_handler *event_handler); void ib_unregister_event_handler(struct ib_event_handler *event_handler); @@ -2848,7 +2990,7 @@ int ib_modify_port(struct ib_device *device, struct ib_port_modify *port_modify); int ib_find_gid(struct ib_device *device, union ib_gid *gid, - struct net_device *ndev, u8 *port_num, u16 *index); + u8 *port_num, u16 *index); int ib_find_pkey(struct ib_device *device, u8 port_num, u16 pkey, u16 *index); @@ -3217,18 +3359,6 @@ static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, } /** - * ib_peek_cq - Returns the number of unreaped completions currently - * on the specified CQ. - * @cq: The CQ to peek. - * @wc_cnt: A minimum number of unreaped completions to check for. - * - * If the number of unreaped completions is greater than or equal to wc_cnt, - * this function returns wc_cnt, otherwise, it returns the actual number of - * unreaped completions. - */ -int ib_peek_cq(struct ib_cq *cq, int wc_cnt); - -/** * ib_req_notify_cq - Request completion notification on a CQ. * @cq: The CQ to generate an event for. * @flags: diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 6538a5cc27b6..690934733ba7 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -38,6 +38,7 @@ #include <linux/in6.h> #include <rdma/ib_addr.h> #include <rdma/ib_sa.h> +#include <uapi/rdma/rdma_user_cm.h> /* * Upon receiving a device removal event, users must destroy the associated @@ -64,14 +65,6 @@ enum rdma_cm_event_type { const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event); -enum rdma_port_space { - RDMA_PS_SDP = 0x0001, - RDMA_PS_IPOIB = 0x0002, - RDMA_PS_IB = 0x013F, - RDMA_PS_TCP = 0x0106, - RDMA_PS_UDP = 0x0111, -}; - #define RDMA_IB_IP_PS_MASK 0xFFFFFFFFFFFF0000ULL #define RDMA_IB_IP_PS_TCP 0x0000000001060000ULL #define RDMA_IB_IP_PS_UDP 0x0000000001110000ULL @@ -120,20 +113,6 @@ struct rdma_cm_event { } param; }; -enum rdma_cm_state { - RDMA_CM_IDLE, - RDMA_CM_ADDR_QUERY, - RDMA_CM_ADDR_RESOLVED, - RDMA_CM_ROUTE_QUERY, - RDMA_CM_ROUTE_RESOLVED, - RDMA_CM_CONNECT, - RDMA_CM_DISCONNECT, - RDMA_CM_ADDR_BOUND, - RDMA_CM_LISTEN, - RDMA_CM_DEVICE_REMOVAL, - RDMA_CM_DESTROYING -}; - struct rdma_cm_id; /** @@ -152,11 +131,17 @@ struct rdma_cm_id { struct ib_qp *qp; rdma_cm_event_handler event_handler; struct rdma_route route; - enum rdma_port_space ps; + enum rdma_ucm_port_space ps; enum ib_qp_type qp_type; u8 port_num; }; +struct rdma_cm_id *__rdma_create_id(struct net *net, + rdma_cm_event_handler event_handler, + void *context, enum rdma_ucm_port_space ps, + enum ib_qp_type qp_type, + const char *caller); + /** * rdma_create_id - Create an RDMA identifier. * @@ -169,10 +154,9 @@ struct rdma_cm_id { * * The id holds a reference on the network namespace until it is destroyed. */ -struct rdma_cm_id *rdma_create_id(struct net *net, - rdma_cm_event_handler event_handler, - void *context, enum rdma_port_space ps, - enum ib_qp_type qp_type); +#define rdma_create_id(net, event_handler, context, ps, qp_type) \ + __rdma_create_id((net), (event_handler), (context), (ps), (qp_type), \ + KBUILD_MODNAME) /** * rdma_destroy_id - Destroys an RDMA identifier. @@ -284,6 +268,9 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); */ int rdma_listen(struct rdma_cm_id *id, int backlog); +int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, + const char *caller); + /** * rdma_accept - Called to accept a connection request or response. * @id: Connection identifier associated with the request. @@ -299,7 +286,8 @@ int rdma_listen(struct rdma_cm_id *id, int backlog); * state of the qp associated with the id is modified to error, such that any * previously posted receive buffers would be flushed. */ -int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); +#define rdma_accept(id, conn_param) \ + __rdma_accept((id), (conn_param), KBUILD_MODNAME) /** * rdma_notify - Notifies the RDMA CM of an asynchronous event that has diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 4118324a0310..3f4c187e435d 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -538,7 +538,7 @@ static inline void rvt_mod_retry_timer(struct rvt_qp *qp) struct rvt_dev_info *rvt_alloc_device(size_t size, int nports); void rvt_dealloc_device(struct rvt_dev_info *rdi); -int rvt_register_device(struct rvt_dev_info *rvd); +int rvt_register_device(struct rvt_dev_info *rvd, u32 driver_id); void rvt_unregister_device(struct rvt_dev_info *rvd); int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr); int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port, diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h index 2cdf8dcf4bdc..f3b3e3576f6a 100644 --- a/include/rdma/restrack.h +++ b/include/rdma/restrack.h @@ -11,6 +11,7 @@ #include <linux/sched.h> #include <linux/kref.h> #include <linux/completion.h> +#include <linux/sched/task.h> /** * enum rdma_restrack_type - HW objects to track @@ -29,6 +30,14 @@ enum rdma_restrack_type { */ RDMA_RESTRACK_QP, /** + * @RDMA_RESTRACK_CM_ID: Connection Manager ID (CM_ID) + */ + RDMA_RESTRACK_CM_ID, + /** + * @RDMA_RESTRACK_MR: Memory Region (MR) + */ + RDMA_RESTRACK_MR, + /** * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations */ RDMA_RESTRACK_MAX @@ -146,8 +155,23 @@ static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res) int __must_check rdma_restrack_get(struct rdma_restrack_entry *res); /** - * rdma_restrack_put() - relase resource + * rdma_restrack_put() - release resource * @res: resource entry */ int rdma_restrack_put(struct rdma_restrack_entry *res); + +/** + * rdma_restrack_set_task() - set the task for this resource + * @res: resource entry + * @task: task struct + */ +static inline void rdma_restrack_set_task(struct rdma_restrack_entry *res, + struct task_struct *task) +{ + if (res->task) + put_task_struct(res->task); + get_task_struct(task); + res->task = task; +} + #endif /* _RDMA_RESTRACK_H_ */ diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h index 38287d9d23a1..4a4201d997a7 100644 --- a/include/rdma/uverbs_ioctl.h +++ b/include/rdma/uverbs_ioctl.h @@ -37,6 +37,7 @@ #include <linux/uaccess.h> #include <rdma/rdma_user_ioctl.h> #include <rdma/ib_user_ioctl_verbs.h> +#include <rdma/ib_user_ioctl_cmds.h> /* * ======================================= @@ -50,6 +51,7 @@ enum uverbs_attr_type { UVERBS_ATTR_TYPE_PTR_OUT, UVERBS_ATTR_TYPE_IDR, UVERBS_ATTR_TYPE_FD, + UVERBS_ATTR_TYPE_ENUM_IN, }; enum uverbs_obj_access { @@ -61,15 +63,32 @@ enum uverbs_obj_access { enum { UVERBS_ATTR_SPEC_F_MANDATORY = 1U << 0, - /* Support extending attributes by length */ - UVERBS_ATTR_SPEC_F_MIN_SZ = 1U << 1, + /* Support extending attributes by length, validate all unknown size == zero */ + UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO = 1U << 1, }; +/* Specification of a single attribute inside the ioctl message */ struct uverbs_attr_spec { - enum uverbs_attr_type type; union { - u16 len; + /* Header shared by all following union members - to reduce space. */ struct { + enum uverbs_attr_type type; + /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */ + u8 flags; + }; + struct { + enum uverbs_attr_type type; + /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */ + u8 flags; + /* Current known size to kernel */ + u16 len; + /* User isn't allowed to provide something < min_len */ + u16 min_len; + } ptr; + struct { + enum uverbs_attr_type type; + /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */ + u8 flags; /* * higher bits mean the namespace and lower bits mean * the type id within the namespace. @@ -77,9 +96,19 @@ struct uverbs_attr_spec { u16 obj_type; u8 access; } obj; + struct { + enum uverbs_attr_type type; + /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */ + u8 flags; + u8 num_elems; + /* + * The enum attribute can select one of the attributes + * contained in the ids array. Currently only PTR_IN + * attributes are supported in the ids array. + */ + const struct uverbs_attr_spec *ids; + } enum_def; }; - /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */ - u8 flags; }; struct uverbs_attr_spec_hash { @@ -164,30 +193,45 @@ struct uverbs_object_tree_def { }; #define UA_FLAGS(_flags) .flags = _flags -#define __UVERBS_ATTR0(_id, _len, _type, ...) \ +#define __UVERBS_ATTR0(_id, _type, _fld, _attr, ...) \ ((const struct uverbs_attr_def) \ - {.id = _id, .attr = {.type = _type, {.len = _len}, .flags = 0, } }) -#define __UVERBS_ATTR1(_id, _len, _type, _flags) \ + {.id = _id, .attr = {{._fld = {.type = _type, _attr, .flags = 0, } }, } }) +#define __UVERBS_ATTR1(_id, _type, _fld, _attr, _extra1, ...) \ ((const struct uverbs_attr_def) \ - {.id = _id, .attr = {.type = _type, {.len = _len}, _flags, } }) -#define __UVERBS_ATTR(_id, _len, _type, _flags, _n, ...) \ - __UVERBS_ATTR##_n(_id, _len, _type, _flags) + {.id = _id, .attr = {{._fld = {.type = _type, _attr, _extra1 } },} }) +#define __UVERBS_ATTR2(_id, _type, _fld, _attr, _extra1, _extra2) \ + ((const struct uverbs_attr_def) \ + {.id = _id, .attr = {{._fld = {.type = _type, _attr, _extra1, _extra2 } },} }) +#define __UVERBS_ATTR(_id, _type, _fld, _attr, _extra1, _extra2, _n, ...) \ + __UVERBS_ATTR##_n(_id, _type, _fld, _attr, _extra1, _extra2) + +#define UVERBS_ATTR_TYPE(_type) \ + .min_len = sizeof(_type), .len = sizeof(_type) +#define UVERBS_ATTR_STRUCT(_type, _last) \ + .min_len = ((uintptr_t)(&((_type *)0)->_last + 1)), .len = sizeof(_type) +#define UVERBS_ATTR_SIZE(_min_len, _len) \ + .min_len = _min_len, .len = _len + /* * In new compiler, UVERBS_ATTR could be simplified by declaring it as * [_id] = {.type = _type, .len = _len, ##__VA_ARGS__} * But since we support older compilers too, we need the more complex code. */ -#define UVERBS_ATTR(_id, _len, _type, ...) \ - __UVERBS_ATTR(_id, _len, _type, ##__VA_ARGS__, 1, 0) +#define UVERBS_ATTR(_id, _type, _fld, _attr, ...) \ + __UVERBS_ATTR(_id, _type, _fld, _attr, ##__VA_ARGS__, 2, 1, 0) #define UVERBS_ATTR_PTR_IN_SZ(_id, _len, ...) \ - UVERBS_ATTR(_id, _len, UVERBS_ATTR_TYPE_PTR_IN, ##__VA_ARGS__) + UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_PTR_IN, ptr, _len, ##__VA_ARGS__) /* If sizeof(_type) <= sizeof(u64), this will be inlined rather than a pointer */ #define UVERBS_ATTR_PTR_IN(_id, _type, ...) \ - UVERBS_ATTR_PTR_IN_SZ(_id, sizeof(_type), ##__VA_ARGS__) + UVERBS_ATTR_PTR_IN_SZ(_id, _type, ##__VA_ARGS__) #define UVERBS_ATTR_PTR_OUT_SZ(_id, _len, ...) \ - UVERBS_ATTR(_id, _len, UVERBS_ATTR_TYPE_PTR_OUT, ##__VA_ARGS__) + UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_PTR_OUT, ptr, _len, ##__VA_ARGS__) #define UVERBS_ATTR_PTR_OUT(_id, _type, ...) \ - UVERBS_ATTR_PTR_OUT_SZ(_id, sizeof(_type), ##__VA_ARGS__) + UVERBS_ATTR_PTR_OUT_SZ(_id, _type, ##__VA_ARGS__) +#define UVERBS_ATTR_ENUM_IN(_id, _enum_arr, ...) \ + UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_ENUM_IN, enum_def, \ + .ids = (_enum_arr), \ + .num_elems = ARRAY_SIZE(_enum_arr), ##__VA_ARGS__) /* * In new compiler, UVERBS_ATTR_IDR (and FD) could be simplified by declaring @@ -202,15 +246,13 @@ struct uverbs_object_tree_def { #define ___UVERBS_ATTR_OBJ0(_id, _obj_class, _obj_type, _access, ...)\ ((const struct uverbs_attr_def) \ {.id = _id, \ - .attr = {.type = _obj_class, \ - {.obj = {.obj_type = _obj_type, .access = _access } },\ - .flags = 0} }) + .attr = { {.obj = {.type = _obj_class, .obj_type = _obj_type, \ + .access = _access, .flags = 0 } }, } }) #define ___UVERBS_ATTR_OBJ1(_id, _obj_class, _obj_type, _access, _flags)\ ((const struct uverbs_attr_def) \ {.id = _id, \ - .attr = {.type = _obj_class, \ - {.obj = {.obj_type = _obj_type, .access = _access} }, \ - _flags} }) + .attr = { {.obj = {.type = _obj_class, .obj_type = _obj_type, \ + .access = _access, _flags} }, } }) #define ___UVERBS_ATTR_OBJ(_id, _obj_class, _obj_type, _access, _flags, \ _n, ...) \ ___UVERBS_ATTR_OBJ##_n(_id, _obj_class, _obj_type, _access, _flags) @@ -229,6 +271,11 @@ struct uverbs_object_tree_def { #define DECLARE_UVERBS_ATTR_SPEC(_name, ...) \ const struct uverbs_attr_def _name = __VA_ARGS__ +#define DECLARE_UVERBS_ENUM(_name, ...) \ + const struct uverbs_enum_spec _name = { \ + .len = ARRAY_SIZE(((struct uverbs_attr_spec[]){__VA_ARGS__})),\ + .ids = {__VA_ARGS__}, \ + } #define _UVERBS_METHOD_ATTRS_SZ(...) \ (sizeof((const struct uverbs_attr_def * const []){__VA_ARGS__}) /\ sizeof(const struct uverbs_attr_def *)) @@ -280,6 +327,7 @@ struct uverbs_ptr_attr { u16 len; /* Combination of bits from enum UVERBS_ATTR_F_XXXX */ u16 flags; + u8 enum_id; }; struct uverbs_obj_attr { @@ -336,6 +384,8 @@ static inline bool uverbs_attr_is_valid(const struct uverbs_attr_bundle *attrs_b idx & ~UVERBS_ID_NS_MASK); } +#define IS_UVERBS_COPY_ERR(_ret) ((_ret) && (_ret) != -ENOENT) + static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr_bundle *attrs_bundle, u16 idx) { @@ -347,6 +397,29 @@ static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr return &attrs_bundle->hash[idx_bucket].attrs[idx & ~UVERBS_ID_NS_MASK]; } +static inline int uverbs_attr_get_enum_id(const struct uverbs_attr_bundle *attrs_bundle, + u16 idx) +{ + const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); + + if (IS_ERR(attr)) + return PTR_ERR(attr); + + return attr->ptr_attr.enum_id; +} + +static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_bundle, + u16 idx) +{ + struct ib_uobject *uobj = + uverbs_attr_get(attrs_bundle, idx)->obj_attr.uobject; + + if (IS_ERR(uobj)) + return uobj; + + return uobj->object; +} + static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, size_t idx, const void *from, size_t size) { @@ -385,8 +458,8 @@ static inline int _uverbs_copy_from(void *to, /* * Validation ensures attr->ptr_attr.len >= size. If the caller is - * using UVERBS_ATTR_SPEC_F_MIN_SZ then it must call copy_from with - * the right size. + * using UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO then it must call + * uverbs_copy_from_or_zero. */ if (unlikely(size < attr->ptr_attr.len)) return -EINVAL; @@ -400,9 +473,37 @@ static inline int _uverbs_copy_from(void *to, return 0; } +static inline int _uverbs_copy_from_or_zero(void *to, + const struct uverbs_attr_bundle *attrs_bundle, + size_t idx, + size_t size) +{ + const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx); + size_t min_size; + + if (IS_ERR(attr)) + return PTR_ERR(attr); + + min_size = min_t(size_t, size, attr->ptr_attr.len); + + if (uverbs_attr_ptr_is_inline(attr)) + memcpy(to, &attr->ptr_attr.data, min_size); + else if (copy_from_user(to, u64_to_user_ptr(attr->ptr_attr.data), + min_size)) + return -EFAULT; + + if (size > min_size) + memset(to + min_size, 0, size - min_size); + + return 0; +} + #define uverbs_copy_from(to, attrs_bundle, idx) \ _uverbs_copy_from(to, attrs_bundle, idx, sizeof(*to)) +#define uverbs_copy_from_or_zero(to, attrs_bundle, idx) \ + _uverbs_copy_from_or_zero(to, attrs_bundle, idx, sizeof(*to)) + /* ================================================= * Definitions -> Specs infrastructure * ================================================= diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h new file mode 100644 index 000000000000..c5bb4ebdb0b0 --- /dev/null +++ b/include/rdma/uverbs_named_ioctl.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _UVERBS_NAMED_IOCTL_ +#define _UVERBS_NAMED_IOCTL_ + +#include <rdma/uverbs_ioctl.h> + +#ifndef UVERBS_MODULE_NAME +#error "Please #define UVERBS_MODULE_NAME before including rdma/uverbs_named_ioctl.h" +#endif + +#define _UVERBS_PASTE(x, y) x ## y +#define _UVERBS_NAME(x, y) _UVERBS_PASTE(x, y) +#define UVERBS_METHOD(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _method_##id) +#define UVERBS_HANDLER(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _handler_##id) + +#define DECLARE_UVERBS_NAMED_METHOD(id, ...) \ + DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, UVERBS_HANDLER(id), ##__VA_ARGS__) + +#define DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(id, handler, ...) \ + DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, handler, ##__VA_ARGS__) + +#define DECLARE_UVERBS_NAMED_METHOD_NO_OVERRIDE(id, handler, ...) \ + DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, NULL, ##__VA_ARGS__) + +#define DECLARE_UVERBS_NAMED_OBJECT(id, ...) \ + DECLARE_UVERBS_OBJECT(UVERBS_OBJECT(id), id, ##__VA_ARGS__) + +#define _UVERBS_COMP_NAME(x, y, z) _UVERBS_NAME(_UVERBS_NAME(x, y), z) + +#define UVERBS_NO_OVERRIDE NULL + +/* This declares a parsing tree with one object and one method. This is usually + * used for merging driver attributes to the common attributes. The driver has + * a chance to override the handler and type attrs of the original object. + * The __VA_ARGS__ just contains a list of attributes. + */ +#define ADD_UVERBS_ATTRIBUTES(_name, _object, _method, _type_attrs, _handler, ...) \ +static DECLARE_UVERBS_METHOD(_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \ + _method_, _name), \ + _method, _handler, ##__VA_ARGS__); \ + \ +static DECLARE_UVERBS_OBJECT(_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \ + _object_, _name), \ + _object, _type_attrs, \ + &_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \ + _method_, _name)); \ + \ +static DECLARE_UVERBS_OBJECT_TREE(_name, \ + &_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \ + _object_, _name)) + +/* A very common use case is that the driver doesn't override the handler and + * type_attrs. Therefore, we provide a simplified macro for this common case. + */ +#define ADD_UVERBS_ATTRIBUTES_SIMPLE(_name, _object, _method, ...) \ + ADD_UVERBS_ATTRIBUTES(_name, _object, _method, UVERBS_NO_OVERRIDE, \ + UVERBS_NO_OVERRIDE, ##__VA_ARGS__) + +#endif diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h index 5f8e20bbd67c..9d56cdb84655 100644 --- a/include/rdma/uverbs_std_types.h +++ b/include/rdma/uverbs_std_types.h @@ -37,26 +37,10 @@ #include <rdma/uverbs_ioctl.h> #include <rdma/ib_user_ioctl_verbs.h> +#define UVERBS_OBJECT(id) uverbs_object_##id + #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) -extern const struct uverbs_object_def uverbs_object_comp_channel; -extern const struct uverbs_object_def uverbs_object_cq; -extern const struct uverbs_object_def uverbs_object_qp; -extern const struct uverbs_object_def uverbs_object_rwq_ind_table; -extern const struct uverbs_object_def uverbs_object_wq; -extern const struct uverbs_object_def uverbs_object_srq; -extern const struct uverbs_object_def uverbs_object_ah; -extern const struct uverbs_object_def uverbs_object_flow; -extern const struct uverbs_object_def uverbs_object_mr; -extern const struct uverbs_object_def uverbs_object_mw; -extern const struct uverbs_object_def uverbs_object_pd; -extern const struct uverbs_object_def uverbs_object_xrcd; -extern const struct uverbs_object_def uverbs_object_device; - -extern const struct uverbs_object_tree_def uverbs_default_objects; -static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void) -{ - return &uverbs_default_objects; -} +const struct uverbs_object_tree_def *uverbs_default_get_objects(void); #else static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(void) { @@ -72,22 +56,22 @@ static inline struct ib_uobject *__uobj_get(const struct uverbs_obj_type *type, return rdma_lookup_get_uobject(type, ucontext, id, write); } -#define uobj_get_type(_object) uverbs_object_##_object.type_attrs +#define uobj_get_type(_object) UVERBS_OBJECT(_object).type_attrs #define uobj_get_read(_type, _id, _ucontext) \ - __uobj_get(_type, false, _ucontext, _id) + __uobj_get(uobj_get_type(_type), false, _ucontext, _id) -#define uobj_get_obj_read(_object, _id, _ucontext) \ +#define uobj_get_obj_read(_object, _type, _id, _ucontext) \ ({ \ struct ib_uobject *__uobj = \ - __uobj_get(uverbs_object_##_object.type_attrs, \ + __uobj_get(uobj_get_type(_type), \ false, _ucontext, _id); \ \ (struct ib_##_object *)(IS_ERR(__uobj) ? NULL : __uobj->object);\ }) #define uobj_get_write(_type, _id, _ucontext) \ - __uobj_get(_type, true, _ucontext, _id) + __uobj_get(uobj_get_type(_type), true, _ucontext, _id) static inline void uobj_put_read(struct ib_uobject *uobj) { @@ -124,7 +108,7 @@ static inline struct ib_uobject *__uobj_alloc(const struct uverbs_obj_type *type } #define uobj_alloc(_type, ucontext) \ - __uobj_alloc(_type, ucontext) + __uobj_alloc(uobj_get_type(_type), ucontext) #endif diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h index aeae4466dd25..e69e4c4d80ae 100644 --- a/include/soc/tegra/bpmp.h +++ b/include/soc/tegra/bpmp.h @@ -75,8 +75,8 @@ struct tegra_bpmp { struct mbox_chan *channel; } mbox; - struct tegra_bpmp_channel *channels; - unsigned int num_channels; + spinlock_t atomic_tx_lock; + struct tegra_bpmp_channel *tx_channel, *rx_channel, *threaded_channels; struct { unsigned long *allocated; diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 63815f66b274..f0820554caa9 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -49,6 +49,7 @@ enum afs_fs_operation { afs_FS_ExtendLock = 157, /* AFS Extend a file lock */ afs_FS_ReleaseLock = 158, /* AFS Release a file lock */ afs_FS_Lookup = 161, /* AFS lookup file in directory */ + afs_FS_InlineBulkStatus = 65536, /* AFS Fetch multiple file statuses with errors */ afs_FS_FetchData64 = 65537, /* AFS Fetch file data */ afs_FS_StoreData64 = 65538, /* AFS Store file data */ afs_FS_GiveUpAllCallBacks = 65539, /* AFS Give up all our callbacks on a server */ @@ -62,6 +63,27 @@ enum afs_vl_operation { afs_VL_GetCapabilities = 65537, /* AFS Get VL server capabilities */ }; +enum afs_edit_dir_op { + afs_edit_dir_create, + afs_edit_dir_create_error, + afs_edit_dir_create_inval, + afs_edit_dir_create_nospc, + afs_edit_dir_delete, + afs_edit_dir_delete_error, + afs_edit_dir_delete_inval, + afs_edit_dir_delete_noent, +}; + +enum afs_edit_dir_reason { + afs_edit_dir_for_create, + afs_edit_dir_for_link, + afs_edit_dir_for_mkdir, + afs_edit_dir_for_rename, + afs_edit_dir_for_rmdir, + afs_edit_dir_for_symlink, + afs_edit_dir_for_unlink, +}; + #endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */ /* @@ -93,6 +115,7 @@ enum afs_vl_operation { EM(afs_FS_ExtendLock, "FS.ExtendLock") \ EM(afs_FS_ReleaseLock, "FS.ReleaseLock") \ EM(afs_FS_Lookup, "FS.Lookup") \ + EM(afs_FS_InlineBulkStatus, "FS.InlineBulkStatus") \ EM(afs_FS_FetchData64, "FS.FetchData64") \ EM(afs_FS_StoreData64, "FS.StoreData64") \ EM(afs_FS_GiveUpAllCallBacks, "FS.GiveUpAllCallBacks") \ @@ -104,6 +127,25 @@ enum afs_vl_operation { EM(afs_YFSVL_GetEndpoints, "YFSVL.GetEndpoints") \ E_(afs_VL_GetCapabilities, "VL.GetCapabilities") +#define afs_edit_dir_ops \ + EM(afs_edit_dir_create, "create") \ + EM(afs_edit_dir_create_error, "c_fail") \ + EM(afs_edit_dir_create_inval, "c_invl") \ + EM(afs_edit_dir_create_nospc, "c_nspc") \ + EM(afs_edit_dir_delete, "delete") \ + EM(afs_edit_dir_delete_error, "d_err ") \ + EM(afs_edit_dir_delete_inval, "d_invl") \ + E_(afs_edit_dir_delete_noent, "d_nent") + +#define afs_edit_dir_reasons \ + EM(afs_edit_dir_for_create, "Create") \ + EM(afs_edit_dir_for_link, "Link ") \ + EM(afs_edit_dir_for_mkdir, "MkDir ") \ + EM(afs_edit_dir_for_rename, "Rename") \ + EM(afs_edit_dir_for_rmdir, "RmDir ") \ + EM(afs_edit_dir_for_symlink, "Symlnk") \ + E_(afs_edit_dir_for_unlink, "Unlink") + /* * Export enum symbols via userspace. @@ -116,6 +158,8 @@ enum afs_vl_operation { afs_call_traces; afs_fs_operations; afs_vl_operations; +afs_edit_dir_ops; +afs_edit_dir_reasons; /* * Now redefine the EM() and E_() macros to map the enums to the strings that @@ -462,6 +506,75 @@ TRACE_EVENT(afs_call_state, __entry->ret, __entry->abort) ); +TRACE_EVENT(afs_edit_dir, + TP_PROTO(struct afs_vnode *dvnode, + enum afs_edit_dir_reason why, + enum afs_edit_dir_op op, + unsigned int block, + unsigned int slot, + unsigned int f_vnode, + unsigned int f_unique, + const char *name), + + TP_ARGS(dvnode, why, op, block, slot, f_vnode, f_unique, name), + + TP_STRUCT__entry( + __field(unsigned int, vnode ) + __field(unsigned int, unique ) + __field(enum afs_edit_dir_reason, why ) + __field(enum afs_edit_dir_op, op ) + __field(unsigned int, block ) + __field(unsigned short, slot ) + __field(unsigned int, f_vnode ) + __field(unsigned int, f_unique ) + __array(char, name, 18 ) + ), + + TP_fast_assign( + int __len = strlen(name); + __len = min(__len, 17); + __entry->vnode = dvnode->fid.vnode; + __entry->unique = dvnode->fid.unique; + __entry->why = why; + __entry->op = op; + __entry->block = block; + __entry->slot = slot; + __entry->f_vnode = f_vnode; + __entry->f_unique = f_unique; + memcpy(__entry->name, name, __len); + __entry->name[__len] = 0; + ), + + TP_printk("d=%x:%x %s %s %u[%u] f=%x:%x %s", + __entry->vnode, __entry->unique, + __print_symbolic(__entry->why, afs_edit_dir_reasons), + __print_symbolic(__entry->op, afs_edit_dir_ops), + __entry->block, __entry->slot, + __entry->f_vnode, __entry->f_unique, + __entry->name) + ); + +TRACE_EVENT(afs_protocol_error, + TP_PROTO(struct afs_call *call, int error, const void *where), + + TP_ARGS(call, error, where), + + TP_STRUCT__entry( + __field(unsigned int, call ) + __field(int, error ) + __field(const void *, where ) + ), + + TP_fast_assign( + __entry->call = call ? call->debug_id : 0; + __entry->error = error; + __entry->where = where; + ), + + TP_printk("c=%08x r=%d sp=%pSR", + __entry->call, __entry->error, __entry->where) + ); + #endif /* _TRACE_AFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h new file mode 100644 index 000000000000..aa86e7dba511 --- /dev/null +++ b/include/trace/events/cachefiles.h @@ -0,0 +1,325 @@ +/* CacheFiles tracepoints + * + * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cachefiles + +#if !defined(_TRACE_CACHEFILES_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_CACHEFILES_H + +#include <linux/tracepoint.h> + +/* + * Define enums for tracing information. + */ +#ifndef __CACHEFILES_DECLARE_TRACE_ENUMS_ONCE_ONLY +#define __CACHEFILES_DECLARE_TRACE_ENUMS_ONCE_ONLY + +enum cachefiles_obj_ref_trace { + cachefiles_obj_put_wait_retry = fscache_obj_ref__nr_traces, + cachefiles_obj_put_wait_timeo, + cachefiles_obj_ref__nr_traces +}; + +#endif + +/* + * Define enum -> string mappings for display. + */ +#define cachefiles_obj_kill_traces \ + EM(FSCACHE_OBJECT_IS_STALE, "stale") \ + EM(FSCACHE_OBJECT_NO_SPACE, "no_space") \ + EM(FSCACHE_OBJECT_WAS_RETIRED, "was_retired") \ + E_(FSCACHE_OBJECT_WAS_CULLED, "was_culled") + +#define cachefiles_obj_ref_traces \ + EM(fscache_obj_get_add_to_deps, "GET add_to_deps") \ + EM(fscache_obj_get_queue, "GET queue") \ + EM(fscache_obj_put_alloc_fail, "PUT alloc_fail") \ + EM(fscache_obj_put_attach_fail, "PUT attach_fail") \ + EM(fscache_obj_put_drop_obj, "PUT drop_obj") \ + EM(fscache_obj_put_enq_dep, "PUT enq_dep") \ + EM(fscache_obj_put_queue, "PUT queue") \ + EM(fscache_obj_put_work, "PUT work") \ + EM(cachefiles_obj_put_wait_retry, "PUT wait_retry") \ + E_(cachefiles_obj_put_wait_timeo, "PUT wait_timeo") + +/* + * Export enum symbols via userspace. + */ +#undef EM +#undef E_ +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define E_(a, b) TRACE_DEFINE_ENUM(a); + +cachefiles_obj_kill_traces; +cachefiles_obj_ref_traces; + +/* + * Now redefine the EM() and E_() macros to map the enums to the strings that + * will be printed in the output. + */ +#undef EM +#undef E_ +#define EM(a, b) { a, b }, +#define E_(a, b) { a, b } + + +TRACE_EVENT(cachefiles_ref, + TP_PROTO(struct cachefiles_object *obj, + struct fscache_cookie *cookie, + enum cachefiles_obj_ref_trace why, + int usage), + + TP_ARGS(obj, cookie, why, usage), + + /* Note that obj may be NULL */ + TP_STRUCT__entry( + __field(struct cachefiles_object *, obj ) + __field(struct fscache_cookie *, cookie ) + __field(enum cachefiles_obj_ref_trace, why ) + __field(int, usage ) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->cookie = cookie; + __entry->usage = usage; + __entry->why = why; + ), + + TP_printk("c=%p o=%p u=%d %s", + __entry->cookie, __entry->obj, __entry->usage, + __print_symbolic(__entry->why, cachefiles_obj_ref_traces)) + ); + +TRACE_EVENT(cachefiles_lookup, + TP_PROTO(struct cachefiles_object *obj, + struct dentry *de, + struct inode *inode), + + TP_ARGS(obj, de, inode), + + TP_STRUCT__entry( + __field(struct cachefiles_object *, obj ) + __field(struct dentry *, de ) + __field(struct inode *, inode ) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->de = de; + __entry->inode = inode; + ), + + TP_printk("o=%p d=%p i=%p", + __entry->obj, __entry->de, __entry->inode) + ); + +TRACE_EVENT(cachefiles_mkdir, + TP_PROTO(struct cachefiles_object *obj, + struct dentry *de, int ret), + + TP_ARGS(obj, de, ret), + + TP_STRUCT__entry( + __field(struct cachefiles_object *, obj ) + __field(struct dentry *, de ) + __field(int, ret ) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->de = de; + __entry->ret = ret; + ), + + TP_printk("o=%p d=%p r=%u", + __entry->obj, __entry->de, __entry->ret) + ); + +TRACE_EVENT(cachefiles_create, + TP_PROTO(struct cachefiles_object *obj, + struct dentry *de, int ret), + + TP_ARGS(obj, de, ret), + + TP_STRUCT__entry( + __field(struct cachefiles_object *, obj ) + __field(struct dentry *, de ) + __field(int, ret ) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->de = de; + __entry->ret = ret; + ), + + TP_printk("o=%p d=%p r=%u", + __entry->obj, __entry->de, __entry->ret) + ); + +TRACE_EVENT(cachefiles_unlink, + TP_PROTO(struct cachefiles_object *obj, + struct dentry *de, + enum fscache_why_object_killed why), + + TP_ARGS(obj, de, why), + + /* Note that obj may be NULL */ + TP_STRUCT__entry( + __field(struct cachefiles_object *, obj ) + __field(struct dentry *, de ) + __field(enum fscache_why_object_killed, why ) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->de = de; + __entry->why = why; + ), + + TP_printk("o=%p d=%p w=%s", + __entry->obj, __entry->de, + __print_symbolic(__entry->why, cachefiles_obj_kill_traces)) + ); + +TRACE_EVENT(cachefiles_rename, + TP_PROTO(struct cachefiles_object *obj, + struct dentry *de, + struct dentry *to, + enum fscache_why_object_killed why), + + TP_ARGS(obj, de, to, why), + + /* Note that obj may be NULL */ + TP_STRUCT__entry( + __field(struct cachefiles_object *, obj ) + __field(struct dentry *, de ) + __field(struct dentry *, to ) + __field(enum fscache_why_object_killed, why ) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->de = de; + __entry->to = to; + __entry->why = why; + ), + + TP_printk("o=%p d=%p t=%p w=%s", + __entry->obj, __entry->de, __entry->to, + __print_symbolic(__entry->why, cachefiles_obj_kill_traces)) + ); + +TRACE_EVENT(cachefiles_mark_active, + TP_PROTO(struct cachefiles_object *obj, + struct dentry *de), + + TP_ARGS(obj, de), + + /* Note that obj may be NULL */ + TP_STRUCT__entry( + __field(struct cachefiles_object *, obj ) + __field(struct dentry *, de ) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->de = de; + ), + + TP_printk("o=%p d=%p", + __entry->obj, __entry->de) + ); + +TRACE_EVENT(cachefiles_wait_active, + TP_PROTO(struct cachefiles_object *obj, + struct dentry *de, + struct cachefiles_object *xobj), + + TP_ARGS(obj, de, xobj), + + /* Note that obj may be NULL */ + TP_STRUCT__entry( + __field(struct cachefiles_object *, obj ) + __field(struct dentry *, de ) + __field(struct cachefiles_object *, xobj ) + __field(u16, flags ) + __field(u16, fsc_flags ) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->de = de; + __entry->xobj = xobj; + __entry->flags = xobj->flags; + __entry->fsc_flags = xobj->fscache.flags; + ), + + TP_printk("o=%p d=%p wo=%p wf=%x wff=%x", + __entry->obj, __entry->de, __entry->xobj, + __entry->flags, __entry->fsc_flags) + ); + +TRACE_EVENT(cachefiles_mark_inactive, + TP_PROTO(struct cachefiles_object *obj, + struct dentry *de, + struct inode *inode), + + TP_ARGS(obj, de, inode), + + /* Note that obj may be NULL */ + TP_STRUCT__entry( + __field(struct cachefiles_object *, obj ) + __field(struct dentry *, de ) + __field(struct inode *, inode ) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->de = de; + __entry->inode = inode; + ), + + TP_printk("o=%p d=%p i=%p", + __entry->obj, __entry->de, __entry->inode) + ); + +TRACE_EVENT(cachefiles_mark_buried, + TP_PROTO(struct cachefiles_object *obj, + struct dentry *de, + enum fscache_why_object_killed why), + + TP_ARGS(obj, de, why), + + /* Note that obj may be NULL */ + TP_STRUCT__entry( + __field(struct cachefiles_object *, obj ) + __field(struct dentry *, de ) + __field(enum fscache_why_object_killed, why ) + ), + + TP_fast_assign( + __entry->obj = obj; + __entry->de = de; + __entry->why = why; + ), + + TP_printk("o=%p d=%p w=%s", + __entry->obj, __entry->de, + __print_symbolic(__entry->why, cachefiles_obj_kill_traces)) + ); + +#endif /* _TRACE_CACHEFILES_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h new file mode 100644 index 000000000000..686cfe997ed2 --- /dev/null +++ b/include/trace/events/fscache.h @@ -0,0 +1,537 @@ +/* FS-Cache tracepoints + * + * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM fscache + +#if !defined(_TRACE_FSCACHE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FSCACHE_H + +#include <linux/fscache.h> +#include <linux/tracepoint.h> + +/* + * Define enums for tracing information. + */ +#ifndef __FSCACHE_DECLARE_TRACE_ENUMS_ONCE_ONLY +#define __FSCACHE_DECLARE_TRACE_ENUMS_ONCE_ONLY + +enum fscache_cookie_trace { + fscache_cookie_collision, + fscache_cookie_discard, + fscache_cookie_get_acquire_parent, + fscache_cookie_get_attach_object, + fscache_cookie_get_reacquire, + fscache_cookie_get_register_netfs, + fscache_cookie_put_acquire_nobufs, + fscache_cookie_put_dup_netfs, + fscache_cookie_put_relinquish, + fscache_cookie_put_object, + fscache_cookie_put_parent, +}; + +enum fscache_page_trace { + fscache_page_cached, + fscache_page_inval, + fscache_page_maybe_release, + fscache_page_radix_clear_store, + fscache_page_radix_delete, + fscache_page_radix_insert, + fscache_page_radix_pend2store, + fscache_page_radix_set_pend, + fscache_page_uncache, + fscache_page_write, + fscache_page_write_end, + fscache_page_write_end_pend, + fscache_page_write_end_noc, + fscache_page_write_wait, + fscache_page_trace__nr +}; + +enum fscache_op_trace { + fscache_op_cancel, + fscache_op_cancel_all, + fscache_op_cancelled, + fscache_op_completed, + fscache_op_enqueue_async, + fscache_op_enqueue_mythread, + fscache_op_gc, + fscache_op_init, + fscache_op_put, + fscache_op_run, + fscache_op_signal, + fscache_op_submit, + fscache_op_submit_ex, + fscache_op_work, + fscache_op_trace__nr +}; + +enum fscache_page_op_trace { + fscache_page_op_alloc_one, + fscache_page_op_attr_changed, + fscache_page_op_check_consistency, + fscache_page_op_invalidate, + fscache_page_op_retr_multi, + fscache_page_op_retr_one, + fscache_page_op_write_one, + fscache_page_op_trace__nr +}; + +#endif + +/* + * Declare tracing information enums and their string mappings for display. + */ +#define fscache_cookie_traces \ + EM(fscache_cookie_collision, "*COLLISION*") \ + EM(fscache_cookie_discard, "DISCARD") \ + EM(fscache_cookie_get_acquire_parent, "GET prn") \ + EM(fscache_cookie_get_attach_object, "GET obj") \ + EM(fscache_cookie_get_reacquire, "GET raq") \ + EM(fscache_cookie_get_register_netfs, "GET net") \ + EM(fscache_cookie_put_acquire_nobufs, "PUT nbf") \ + EM(fscache_cookie_put_dup_netfs, "PUT dnt") \ + EM(fscache_cookie_put_relinquish, "PUT rlq") \ + EM(fscache_cookie_put_object, "PUT obj") \ + E_(fscache_cookie_put_parent, "PUT prn") + +#define fscache_page_traces \ + EM(fscache_page_cached, "Cached ") \ + EM(fscache_page_inval, "InvalPg") \ + EM(fscache_page_maybe_release, "MayRels") \ + EM(fscache_page_uncache, "Uncache") \ + EM(fscache_page_radix_clear_store, "RxCStr ") \ + EM(fscache_page_radix_delete, "RxDel ") \ + EM(fscache_page_radix_insert, "RxIns ") \ + EM(fscache_page_radix_pend2store, "RxP2S ") \ + EM(fscache_page_radix_set_pend, "RxSPend ") \ + EM(fscache_page_write, "WritePg") \ + EM(fscache_page_write_end, "EndPgWr") \ + EM(fscache_page_write_end_pend, "EndPgWP") \ + EM(fscache_page_write_end_noc, "EndPgNC") \ + E_(fscache_page_write_wait, "WtOnWrt") + +#define fscache_op_traces \ + EM(fscache_op_cancel, "Cancel1") \ + EM(fscache_op_cancel_all, "CancelA") \ + EM(fscache_op_cancelled, "Canclld") \ + EM(fscache_op_completed, "Complet") \ + EM(fscache_op_enqueue_async, "EnqAsyn") \ + EM(fscache_op_enqueue_mythread, "EnqMyTh") \ + EM(fscache_op_gc, "GC ") \ + EM(fscache_op_init, "Init ") \ + EM(fscache_op_put, "Put ") \ + EM(fscache_op_run, "Run ") \ + EM(fscache_op_signal, "Signal ") \ + EM(fscache_op_submit, "Submit ") \ + EM(fscache_op_submit_ex, "SubmitX") \ + E_(fscache_op_work, "Work ") + +#define fscache_page_op_traces \ + EM(fscache_page_op_alloc_one, "Alloc1 ") \ + EM(fscache_page_op_attr_changed, "AttrChg") \ + EM(fscache_page_op_check_consistency, "CheckCn") \ + EM(fscache_page_op_invalidate, "Inval ") \ + EM(fscache_page_op_retr_multi, "RetrMul") \ + EM(fscache_page_op_retr_one, "Retr1 ") \ + E_(fscache_page_op_write_one, "Write1 ") + +/* + * Export enum symbols via userspace. + */ +#undef EM +#undef E_ +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define E_(a, b) TRACE_DEFINE_ENUM(a); + +fscache_cookie_traces; + +/* + * Now redefine the EM() and E_() macros to map the enums to the strings that + * will be printed in the output. + */ +#undef EM +#undef E_ +#define EM(a, b) { a, b }, +#define E_(a, b) { a, b } + + +TRACE_EVENT(fscache_cookie, + TP_PROTO(struct fscache_cookie *cookie, + enum fscache_cookie_trace where, + int usage), + + TP_ARGS(cookie, where, usage), + + TP_STRUCT__entry( + __field(struct fscache_cookie *, cookie ) + __field(struct fscache_cookie *, parent ) + __field(enum fscache_cookie_trace, where ) + __field(int, usage ) + __field(int, n_children ) + __field(int, n_active ) + __field(u8, flags ) + ), + + TP_fast_assign( + __entry->cookie = cookie; + __entry->parent = cookie->parent; + __entry->where = where; + __entry->usage = usage; + __entry->n_children = atomic_read(&cookie->n_children); + __entry->n_active = atomic_read(&cookie->n_active); + __entry->flags = cookie->flags; + ), + + TP_printk("%s c=%p u=%d p=%p Nc=%d Na=%d f=%02x", + __print_symbolic(__entry->where, fscache_cookie_traces), + __entry->cookie, __entry->usage, + __entry->parent, __entry->n_children, __entry->n_active, + __entry->flags) + ); + +TRACE_EVENT(fscache_netfs, + TP_PROTO(struct fscache_netfs *netfs), + + TP_ARGS(netfs), + + TP_STRUCT__entry( + __field(struct fscache_cookie *, cookie ) + __array(char, name, 8 ) + ), + + TP_fast_assign( + __entry->cookie = netfs->primary_index; + strncpy(__entry->name, netfs->name, 8); + __entry->name[7] = 0; + ), + + TP_printk("c=%p n=%s", + __entry->cookie, __entry->name) + ); + +TRACE_EVENT(fscache_acquire, + TP_PROTO(struct fscache_cookie *cookie), + + TP_ARGS(cookie), + + TP_STRUCT__entry( + __field(struct fscache_cookie *, cookie ) + __field(struct fscache_cookie *, parent ) + __array(char, name, 8 ) + __field(int, p_usage ) + __field(int, p_n_children ) + __field(u8, p_flags ) + ), + + TP_fast_assign( + __entry->cookie = cookie; + __entry->parent = cookie->parent; + __entry->p_usage = atomic_read(&cookie->parent->usage); + __entry->p_n_children = atomic_read(&cookie->parent->n_children); + __entry->p_flags = cookie->parent->flags; + memcpy(__entry->name, cookie->def->name, 8); + __entry->name[7] = 0; + ), + + TP_printk("c=%p p=%p pu=%d pc=%d pf=%02x n=%s", + __entry->cookie, __entry->parent, __entry->p_usage, + __entry->p_n_children, __entry->p_flags, __entry->name) + ); + +TRACE_EVENT(fscache_relinquish, + TP_PROTO(struct fscache_cookie *cookie, bool retire), + + TP_ARGS(cookie, retire), + + TP_STRUCT__entry( + __field(struct fscache_cookie *, cookie ) + __field(struct fscache_cookie *, parent ) + __field(int, usage ) + __field(int, n_children ) + __field(int, n_active ) + __field(u8, flags ) + __field(bool, retire ) + ), + + TP_fast_assign( + __entry->cookie = cookie; + __entry->parent = cookie->parent; + __entry->usage = atomic_read(&cookie->usage); + __entry->n_children = atomic_read(&cookie->n_children); + __entry->n_active = atomic_read(&cookie->n_active); + __entry->flags = cookie->flags; + __entry->retire = retire; + ), + + TP_printk("c=%p u=%d p=%p Nc=%d Na=%d f=%02x r=%u", + __entry->cookie, __entry->usage, + __entry->parent, __entry->n_children, __entry->n_active, + __entry->flags, __entry->retire) + ); + +TRACE_EVENT(fscache_enable, + TP_PROTO(struct fscache_cookie *cookie), + + TP_ARGS(cookie), + + TP_STRUCT__entry( + __field(struct fscache_cookie *, cookie ) + __field(int, usage ) + __field(int, n_children ) + __field(int, n_active ) + __field(u8, flags ) + ), + + TP_fast_assign( + __entry->cookie = cookie; + __entry->usage = atomic_read(&cookie->usage); + __entry->n_children = atomic_read(&cookie->n_children); + __entry->n_active = atomic_read(&cookie->n_active); + __entry->flags = cookie->flags; + ), + + TP_printk("c=%p u=%d Nc=%d Na=%d f=%02x", + __entry->cookie, __entry->usage, + __entry->n_children, __entry->n_active, __entry->flags) + ); + +TRACE_EVENT(fscache_disable, + TP_PROTO(struct fscache_cookie *cookie), + + TP_ARGS(cookie), + + TP_STRUCT__entry( + __field(struct fscache_cookie *, cookie ) + __field(int, usage ) + __field(int, n_children ) + __field(int, n_active ) + __field(u8, flags ) + ), + + TP_fast_assign( + __entry->cookie = cookie; + __entry->usage = atomic_read(&cookie->usage); + __entry->n_children = atomic_read(&cookie->n_children); + __entry->n_active = atomic_read(&cookie->n_active); + __entry->flags = cookie->flags; + ), + + TP_printk("c=%p u=%d Nc=%d Na=%d f=%02x", + __entry->cookie, __entry->usage, + __entry->n_children, __entry->n_active, __entry->flags) + ); + +TRACE_EVENT(fscache_osm, + TP_PROTO(struct fscache_object *object, + const struct fscache_state *state, + bool wait, bool oob, s8 event_num), + + TP_ARGS(object, state, wait, oob, event_num), + + TP_STRUCT__entry( + __field(struct fscache_cookie *, cookie ) + __field(struct fscache_object *, object ) + __array(char, state, 8 ) + __field(bool, wait ) + __field(bool, oob ) + __field(s8, event_num ) + ), + + TP_fast_assign( + __entry->cookie = object->cookie; + __entry->object = object; + __entry->wait = wait; + __entry->oob = oob; + __entry->event_num = event_num; + memcpy(__entry->state, state->short_name, 8); + ), + + TP_printk("c=%p o=%p %s %s%sev=%d", + __entry->cookie, + __entry->object, + __entry->state, + __print_symbolic(__entry->wait, + { true, "WAIT" }, + { false, "WORK" }), + __print_symbolic(__entry->oob, + { true, " OOB " }, + { false, " " }), + __entry->event_num) + ); + +TRACE_EVENT(fscache_page, + TP_PROTO(struct fscache_cookie *cookie, struct page *page, + enum fscache_page_trace why), + + TP_ARGS(cookie, page, why), + + TP_STRUCT__entry( + __field(struct fscache_cookie *, cookie ) + __field(pgoff_t, page ) + __field(enum fscache_page_trace, why ) + ), + + TP_fast_assign( + __entry->cookie = cookie; + __entry->page = page->index; + __entry->why = why; + ), + + TP_printk("c=%p %s pg=%lx", + __entry->cookie, + __print_symbolic(__entry->why, fscache_page_traces), + __entry->page) + ); + +TRACE_EVENT(fscache_check_page, + TP_PROTO(struct fscache_cookie *cookie, struct page *page, + void *val, int n), + + TP_ARGS(cookie, page, val, n), + + TP_STRUCT__entry( + __field(struct fscache_cookie *, cookie ) + __field(void *, page ) + __field(void *, val ) + __field(int, n ) + ), + + TP_fast_assign( + __entry->cookie = cookie; + __entry->page = page; + __entry->val = val; + __entry->n = n; + ), + + TP_printk("c=%p pg=%p val=%p n=%d", + __entry->cookie, __entry->page, __entry->val, __entry->n) + ); + +TRACE_EVENT(fscache_wake_cookie, + TP_PROTO(struct fscache_cookie *cookie), + + TP_ARGS(cookie), + + TP_STRUCT__entry( + __field(struct fscache_cookie *, cookie ) + ), + + TP_fast_assign( + __entry->cookie = cookie; + ), + + TP_printk("c=%p", __entry->cookie) + ); + +TRACE_EVENT(fscache_op, + TP_PROTO(struct fscache_cookie *cookie, struct fscache_operation *op, + enum fscache_op_trace why), + + TP_ARGS(cookie, op, why), + + TP_STRUCT__entry( + __field(struct fscache_cookie *, cookie ) + __field(struct fscache_operation *, op ) + __field(enum fscache_op_trace, why ) + ), + + TP_fast_assign( + __entry->cookie = cookie; + __entry->op = op; + __entry->why = why; + ), + + TP_printk("c=%p op=%p %s", + __entry->cookie, __entry->op, + __print_symbolic(__entry->why, fscache_op_traces)) + ); + +TRACE_EVENT(fscache_page_op, + TP_PROTO(struct fscache_cookie *cookie, struct page *page, + struct fscache_operation *op, enum fscache_page_op_trace what), + + TP_ARGS(cookie, page, op, what), + + TP_STRUCT__entry( + __field(struct fscache_cookie *, cookie ) + __field(pgoff_t, page ) + __field(struct fscache_operation *, op ) + __field(enum fscache_page_op_trace, what ) + ), + + TP_fast_assign( + __entry->cookie = cookie; + __entry->page = page ? page->index : 0; + __entry->op = op; + __entry->what = what; + ), + + TP_printk("c=%p %s pg=%lx op=%p", + __entry->cookie, + __print_symbolic(__entry->what, fscache_page_op_traces), + __entry->page, __entry->op) + ); + +TRACE_EVENT(fscache_wrote_page, + TP_PROTO(struct fscache_cookie *cookie, struct page *page, + struct fscache_operation *op, int ret), + + TP_ARGS(cookie, page, op, ret), + + TP_STRUCT__entry( + __field(struct fscache_cookie *, cookie ) + __field(pgoff_t, page ) + __field(struct fscache_operation *, op ) + __field(int, ret ) + ), + + TP_fast_assign( + __entry->cookie = cookie; + __entry->page = page->index; + __entry->op = op; + __entry->ret = ret; + ), + + TP_printk("c=%p pg=%lx op=%p ret=%d", + __entry->cookie, __entry->page, __entry->op, __entry->ret) + ); + +TRACE_EVENT(fscache_gang_lookup, + TP_PROTO(struct fscache_cookie *cookie, struct fscache_operation *op, + void **results, int n, pgoff_t store_limit), + + TP_ARGS(cookie, op, results, n, store_limit), + + TP_STRUCT__entry( + __field(struct fscache_cookie *, cookie ) + __field(struct fscache_operation *, op ) + __field(pgoff_t, results0 ) + __field(int, n ) + __field(pgoff_t, store_limit ) + ), + + TP_fast_assign( + __entry->cookie = cookie; + __entry->op = op; + __entry->results0 = results[0] ? ((struct page *)results[0])->index : (pgoff_t)-1; + __entry->n = n; + __entry->store_limit = store_limit; + ), + + TP_printk("c=%p op=%p r0=%lx n=%d sl=%lx", + __entry->cookie, __entry->op, __entry->results0, __entry->n, + __entry->store_limit) + ); + +#endif /* _TRACE_FSCACHE_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/initcall.h b/include/trace/events/initcall.h new file mode 100644 index 000000000000..8d6cf10d27c9 --- /dev/null +++ b/include/trace/events/initcall.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM initcall + +#if !defined(_TRACE_INITCALL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_INITCALL_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(initcall_level, + + TP_PROTO(const char *level), + + TP_ARGS(level), + + TP_STRUCT__entry( + __string(level, level) + ), + + TP_fast_assign( + __assign_str(level, level); + ), + + TP_printk("level=%s", __get_str(level)) +); + +TRACE_EVENT(initcall_start, + + TP_PROTO(initcall_t func), + + TP_ARGS(func), + + TP_STRUCT__entry( + __field(initcall_t, func) + ), + + TP_fast_assign( + __entry->func = func; + ), + + TP_printk("func=%pS", __entry->func) +); + +TRACE_EVENT(initcall_finish, + + TP_PROTO(initcall_t func, int ret), + + TP_ARGS(func, ret), + + TP_STRUCT__entry( + __field(initcall_t, func) + __field(int, ret) + ), + + TP_fast_assign( + __entry->func = func; + __entry->ret = ret; + ), + + TP_printk("func=%pS ret=%d", __entry->func, __entry->ret) +); + +#endif /* if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h index bcf4daccd6be..711372845945 100644 --- a/include/trace/events/migrate.h +++ b/include/trace/events/migrate.h @@ -20,7 +20,7 @@ EM( MR_SYSCALL, "syscall_or_cpuset") \ EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \ EM( MR_NUMA_MISPLACED, "numa_misplaced") \ - EMe(MR_CMA, "cma") + EMe(MR_CONTIG_RANGE, "contig_range") /* * First define the enums in the above macros to be exported to userspace diff --git a/include/trace/events/rtc.h b/include/trace/events/rtc.h new file mode 100644 index 000000000000..621333f1c890 --- /dev/null +++ b/include/trace/events/rtc.h @@ -0,0 +1,206 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rtc + +#if !defined(_TRACE_RTC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RTC_H + +#include <linux/rtc.h> +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(rtc_time_alarm_class, + + TP_PROTO(time64_t secs, int err), + + TP_ARGS(secs, err), + + TP_STRUCT__entry( + __field(time64_t, secs) + __field(int, err) + ), + + TP_fast_assign( + __entry->secs = secs; + __entry->err = err; + ), + + TP_printk("UTC (%lld) (%d)", + __entry->secs, __entry->err + ) +); + +DEFINE_EVENT(rtc_time_alarm_class, rtc_set_time, + + TP_PROTO(time64_t secs, int err), + + TP_ARGS(secs, err) +); + +DEFINE_EVENT(rtc_time_alarm_class, rtc_read_time, + + TP_PROTO(time64_t secs, int err), + + TP_ARGS(secs, err) +); + +DEFINE_EVENT(rtc_time_alarm_class, rtc_set_alarm, + + TP_PROTO(time64_t secs, int err), + + TP_ARGS(secs, err) +); + +DEFINE_EVENT(rtc_time_alarm_class, rtc_read_alarm, + + TP_PROTO(time64_t secs, int err), + + TP_ARGS(secs, err) +); + +TRACE_EVENT(rtc_irq_set_freq, + + TP_PROTO(int freq, int err), + + TP_ARGS(freq, err), + + TP_STRUCT__entry( + __field(int, freq) + __field(int, err) + ), + + TP_fast_assign( + __entry->freq = freq; + __entry->err = err; + ), + + TP_printk("set RTC periodic IRQ frequency:%u (%d)", + __entry->freq, __entry->err + ) +); + +TRACE_EVENT(rtc_irq_set_state, + + TP_PROTO(int enabled, int err), + + TP_ARGS(enabled, err), + + TP_STRUCT__entry( + __field(int, enabled) + __field(int, err) + ), + + TP_fast_assign( + __entry->enabled = enabled; + __entry->err = err; + ), + + TP_printk("%s RTC 2^N Hz periodic IRQs (%d)", + __entry->enabled ? "enable" : "disable", + __entry->err + ) +); + +TRACE_EVENT(rtc_alarm_irq_enable, + + TP_PROTO(unsigned int enabled, int err), + + TP_ARGS(enabled, err), + + TP_STRUCT__entry( + __field(unsigned int, enabled) + __field(int, err) + ), + + TP_fast_assign( + __entry->enabled = enabled; + __entry->err = err; + ), + + TP_printk("%s RTC alarm IRQ (%d)", + __entry->enabled ? "enable" : "disable", + __entry->err + ) +); + +DECLARE_EVENT_CLASS(rtc_offset_class, + + TP_PROTO(long offset, int err), + + TP_ARGS(offset, err), + + TP_STRUCT__entry( + __field(long, offset) + __field(int, err) + ), + + TP_fast_assign( + __entry->offset = offset; + __entry->err = err; + ), + + TP_printk("RTC offset: %ld (%d)", + __entry->offset, __entry->err + ) +); + +DEFINE_EVENT(rtc_offset_class, rtc_set_offset, + + TP_PROTO(long offset, int err), + + TP_ARGS(offset, err) +); + +DEFINE_EVENT(rtc_offset_class, rtc_read_offset, + + TP_PROTO(long offset, int err), + + TP_ARGS(offset, err) +); + +DECLARE_EVENT_CLASS(rtc_timer_class, + + TP_PROTO(struct rtc_timer *timer), + + TP_ARGS(timer), + + TP_STRUCT__entry( + __field(struct rtc_timer *, timer) + __field(ktime_t, expires) + __field(ktime_t, period) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->expires = timer->node.expires; + __entry->period = timer->period; + ), + + TP_printk("RTC timer:(%p) expires:%lld period:%lld", + __entry->timer, __entry->expires, __entry->period + ) +); + +DEFINE_EVENT(rtc_timer_class, rtc_timer_enqueue, + + TP_PROTO(struct rtc_timer *timer), + + TP_ARGS(timer) +); + +DEFINE_EVENT(rtc_timer_class, rtc_timer_dequeue, + + TP_PROTO(struct rtc_timer *timer), + + TP_ARGS(timer) +); + +DEFINE_EVENT(rtc_timer_class, rtc_timer_fired, + + TP_PROTO(struct rtc_timer *timer), + + TP_ARGS(timer) +); + +#endif /* _TRACE_RTC_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 970c91a83173..335d87242439 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -50,9 +50,9 @@ DEFINE_EVENT(rpc_task_status, rpc_bind_status, ); TRACE_EVENT(rpc_connect_status, - TP_PROTO(struct rpc_task *task, int status), + TP_PROTO(const struct rpc_task *task), - TP_ARGS(task, status), + TP_ARGS(task), TP_STRUCT__entry( __field(unsigned int, task_id) @@ -63,7 +63,7 @@ TRACE_EVENT(rpc_connect_status, TP_fast_assign( __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; - __entry->status = status; + __entry->status = task->tk_status; ), TP_printk("task:%u@%u status=%d", @@ -103,9 +103,9 @@ TRACE_EVENT(rpc_request, DECLARE_EVENT_CLASS(rpc_task_running, - TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action), + TP_PROTO(const struct rpc_task *task, const void *action), - TP_ARGS(clnt, task, action), + TP_ARGS(task, action), TP_STRUCT__entry( __field(unsigned int, task_id) @@ -117,7 +117,8 @@ DECLARE_EVENT_CLASS(rpc_task_running, ), TP_fast_assign( - __entry->client_id = clnt ? clnt->cl_clid : -1; + __entry->client_id = task->tk_client ? + task->tk_client->cl_clid : -1; __entry->task_id = task->tk_pid; __entry->action = action; __entry->runstate = task->tk_runstate; @@ -136,33 +137,33 @@ DECLARE_EVENT_CLASS(rpc_task_running, DEFINE_EVENT(rpc_task_running, rpc_task_begin, - TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action), + TP_PROTO(const struct rpc_task *task, const void *action), - TP_ARGS(clnt, task, action) + TP_ARGS(task, action) ); DEFINE_EVENT(rpc_task_running, rpc_task_run_action, - TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action), + TP_PROTO(const struct rpc_task *task, const void *action), - TP_ARGS(clnt, task, action) + TP_ARGS(task, action) ); DEFINE_EVENT(rpc_task_running, rpc_task_complete, - TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action), + TP_PROTO(const struct rpc_task *task, const void *action), - TP_ARGS(clnt, task, action) + TP_ARGS(task, action) ); DECLARE_EVENT_CLASS(rpc_task_queued, - TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q), + TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q), - TP_ARGS(clnt, task, q), + TP_ARGS(task, q), TP_STRUCT__entry( __field(unsigned int, task_id) @@ -175,7 +176,8 @@ DECLARE_EVENT_CLASS(rpc_task_queued, ), TP_fast_assign( - __entry->client_id = clnt ? clnt->cl_clid : -1; + __entry->client_id = task->tk_client ? + task->tk_client->cl_clid : -1; __entry->task_id = task->tk_pid; __entry->timeout = task->tk_timeout; __entry->runstate = task->tk_runstate; @@ -196,20 +198,65 @@ DECLARE_EVENT_CLASS(rpc_task_queued, DEFINE_EVENT(rpc_task_queued, rpc_task_sleep, - TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q), + TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q), - TP_ARGS(clnt, task, q) + TP_ARGS(task, q) ); DEFINE_EVENT(rpc_task_queued, rpc_task_wakeup, - TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q), + TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q), - TP_ARGS(clnt, task, q) + TP_ARGS(task, q) ); +TRACE_EVENT(rpc_stats_latency, + + TP_PROTO( + const struct rpc_task *task, + ktime_t backlog, + ktime_t rtt, + ktime_t execute + ), + + TP_ARGS(task, backlog, rtt, execute), + + TP_STRUCT__entry( + __field(u32, xid) + __field(int, version) + __string(progname, task->tk_client->cl_program->name) + __string(procname, rpc_proc_name(task)) + __field(unsigned long, backlog) + __field(unsigned long, rtt) + __field(unsigned long, execute) + __string(addr, + task->tk_xprt->address_strings[RPC_DISPLAY_ADDR]) + __string(port, + task->tk_xprt->address_strings[RPC_DISPLAY_PORT]) + ), + + TP_fast_assign( + __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); + __entry->version = task->tk_client->cl_vers; + __assign_str(progname, task->tk_client->cl_program->name) + __assign_str(procname, rpc_proc_name(task)) + __entry->backlog = ktime_to_us(backlog); + __entry->rtt = ktime_to_us(rtt); + __entry->execute = ktime_to_us(execute); + __assign_str(addr, + task->tk_xprt->address_strings[RPC_DISPLAY_ADDR]); + __assign_str(port, + task->tk_xprt->address_strings[RPC_DISPLAY_PORT]); + ), + + TP_printk("peer=[%s]:%s xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu", + __get_str(addr), __get_str(port), __entry->xid, + __get_str(progname), __entry->version, __get_str(procname), + __entry->backlog, __entry->rtt, __entry->execute) +); + /* * First define the enums in the below macros to be exported to userspace * via TRACE_DEFINE_ENUM(). @@ -406,6 +453,27 @@ DEFINE_EVENT(rpc_xprt_event, xprt_complete_rqst, TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status), TP_ARGS(xprt, xid, status)); +TRACE_EVENT(xprt_ping, + TP_PROTO(const struct rpc_xprt *xprt, int status), + + TP_ARGS(xprt, status), + + TP_STRUCT__entry( + __field(int, status) + __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR]) + __string(port, xprt->address_strings[RPC_DISPLAY_PORT]) + ), + + TP_fast_assign( + __entry->status = status; + __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]); + __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]); + ), + + TP_printk("peer=[%s]:%s status=%d", + __get_str(addr), __get_str(port), __entry->status) +); + TRACE_EVENT(xs_tcp_data_ready, TP_PROTO(struct rpc_xprt *xprt, int err, unsigned int total), @@ -485,31 +553,55 @@ TRACE_EVENT(xs_tcp_data_recv, { (1UL << RQ_BUSY), "RQ_BUSY"}) TRACE_EVENT(svc_recv, - TP_PROTO(struct svc_rqst *rqst, int status), + TP_PROTO(struct svc_rqst *rqst, int len), - TP_ARGS(rqst, status), + TP_ARGS(rqst, len), TP_STRUCT__entry( __field(u32, xid) - __field(int, status) + __field(int, len) __field(unsigned long, flags) - __dynamic_array(unsigned char, addr, rqst->rq_addrlen) + __string(addr, rqst->rq_xprt->xpt_remotebuf) ), TP_fast_assign( - __entry->xid = status > 0 ? be32_to_cpu(rqst->rq_xid) : 0; - __entry->status = status; + __entry->xid = be32_to_cpu(rqst->rq_xid); + __entry->len = len; __entry->flags = rqst->rq_flags; - memcpy(__get_dynamic_array(addr), - &rqst->rq_addr, rqst->rq_addrlen); + __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); ), - TP_printk("addr=%pIScp xid=0x%08x status=%d flags=%s", - (struct sockaddr *)__get_dynamic_array(addr), - __entry->xid, __entry->status, + TP_printk("addr=%s xid=0x%08x len=%d flags=%s", + __get_str(addr), __entry->xid, __entry->len, show_rqstp_flags(__entry->flags)) ); +TRACE_EVENT(svc_process, + TP_PROTO(const struct svc_rqst *rqst, const char *name), + + TP_ARGS(rqst, name), + + TP_STRUCT__entry( + __field(u32, xid) + __field(u32, vers) + __field(u32, proc) + __string(service, name) + __string(addr, rqst->rq_xprt->xpt_remotebuf) + ), + + TP_fast_assign( + __entry->xid = be32_to_cpu(rqst->rq_xid); + __entry->vers = rqst->rq_vers; + __entry->proc = rqst->rq_proc; + __assign_str(service, name); + __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); + ), + + TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%u", + __get_str(addr), __entry->xid, + __get_str(service), __entry->vers, __entry->proc) +); + DECLARE_EVENT_CLASS(svc_rqst_event, TP_PROTO(struct svc_rqst *rqst), @@ -519,20 +611,18 @@ DECLARE_EVENT_CLASS(svc_rqst_event, TP_STRUCT__entry( __field(u32, xid) __field(unsigned long, flags) - __dynamic_array(unsigned char, addr, rqst->rq_addrlen) + __string(addr, rqst->rq_xprt->xpt_remotebuf) ), TP_fast_assign( __entry->xid = be32_to_cpu(rqst->rq_xid); __entry->flags = rqst->rq_flags; - memcpy(__get_dynamic_array(addr), - &rqst->rq_addr, rqst->rq_addrlen); + __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); ), - TP_printk("addr=%pIScp rq_xid=0x%08x flags=%s", - (struct sockaddr *)__get_dynamic_array(addr), - __entry->xid, - show_rqstp_flags(__entry->flags)) + TP_printk("addr=%s xid=0x%08x flags=%s", + __get_str(addr), __entry->xid, + show_rqstp_flags(__entry->flags)) ); DEFINE_EVENT(svc_rqst_event, svc_defer, @@ -553,27 +643,21 @@ DECLARE_EVENT_CLASS(svc_rqst_status, __field(u32, xid) __field(int, status) __field(unsigned long, flags) - __dynamic_array(unsigned char, addr, rqst->rq_addrlen) + __string(addr, rqst->rq_xprt->xpt_remotebuf) ), TP_fast_assign( __entry->xid = be32_to_cpu(rqst->rq_xid); __entry->status = status; __entry->flags = rqst->rq_flags; - memcpy(__get_dynamic_array(addr), - &rqst->rq_addr, rqst->rq_addrlen); + __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); ), - TP_printk("addr=%pIScp rq_xid=0x%08x status=%d flags=%s", - (struct sockaddr *)__get_dynamic_array(addr), - __entry->xid, - __entry->status, show_rqstp_flags(__entry->flags)) + TP_printk("addr=%s xid=0x%08x status=%d flags=%s", + __get_str(addr), __entry->xid, + __entry->status, show_rqstp_flags(__entry->flags)) ); -DEFINE_EVENT(svc_rqst_status, svc_process, - TP_PROTO(struct svc_rqst *rqst, int status), - TP_ARGS(rqst, status)); - DEFINE_EVENT(svc_rqst_status, svc_send, TP_PROTO(struct svc_rqst *rqst, int status), TP_ARGS(rqst, status)); @@ -591,7 +675,9 @@ DEFINE_EVENT(svc_rqst_status, svc_send, { (1UL << XPT_OLD), "XPT_OLD"}, \ { (1UL << XPT_LISTENER), "XPT_LISTENER"}, \ { (1UL << XPT_CACHE_AUTH), "XPT_CACHE_AUTH"}, \ - { (1UL << XPT_LOCAL), "XPT_LOCAL"}) + { (1UL << XPT_LOCAL), "XPT_LOCAL"}, \ + { (1UL << XPT_KILL_TEMP), "XPT_KILL_TEMP"}, \ + { (1UL << XPT_CONG_CTRL), "XPT_CONG_CTRL"}) TRACE_EVENT(svc_xprt_do_enqueue, TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst), @@ -602,26 +688,19 @@ TRACE_EVENT(svc_xprt_do_enqueue, __field(struct svc_xprt *, xprt) __field(int, pid) __field(unsigned long, flags) - __dynamic_array(unsigned char, addr, xprt != NULL ? - xprt->xpt_remotelen : 0) + __string(addr, xprt->xpt_remotebuf) ), TP_fast_assign( __entry->xprt = xprt; __entry->pid = rqst? rqst->rq_task->pid : 0; - if (xprt) { - memcpy(__get_dynamic_array(addr), - &xprt->xpt_remote, - xprt->xpt_remotelen); - __entry->flags = xprt->xpt_flags; - } else - __entry->flags = 0; - ), - - TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt, - __get_dynamic_array_len(addr) != 0 ? - (struct sockaddr *)__get_dynamic_array(addr) : NULL, - __entry->pid, show_svc_xprt_flags(__entry->flags)) + __entry->flags = xprt->xpt_flags; + __assign_str(addr, xprt->xpt_remotebuf); + ), + + TP_printk("xprt=%p addr=%s pid=%d flags=%s", + __entry->xprt, __get_str(addr), + __entry->pid, show_svc_xprt_flags(__entry->flags)) ); DECLARE_EVENT_CLASS(svc_xprt_event, @@ -632,35 +711,50 @@ DECLARE_EVENT_CLASS(svc_xprt_event, TP_STRUCT__entry( __field(struct svc_xprt *, xprt) __field(unsigned long, flags) - __dynamic_array(unsigned char, addr, xprt != NULL ? - xprt->xpt_remotelen : 0) + __string(addr, xprt->xpt_remotebuf) ), TP_fast_assign( __entry->xprt = xprt; - if (xprt) { - memcpy(__get_dynamic_array(addr), - &xprt->xpt_remote, - xprt->xpt_remotelen); - __entry->flags = xprt->xpt_flags; - } else - __entry->flags = 0; - ), - - TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt, - __get_dynamic_array_len(addr) != 0 ? - (struct sockaddr *)__get_dynamic_array(addr) : NULL, - show_svc_xprt_flags(__entry->flags)) -); + __entry->flags = xprt->xpt_flags; + __assign_str(addr, xprt->xpt_remotebuf); + ), -DEFINE_EVENT(svc_xprt_event, svc_xprt_dequeue, - TP_PROTO(struct svc_xprt *xprt), - TP_ARGS(xprt)); + TP_printk("xprt=%p addr=%s flags=%s", + __entry->xprt, __get_str(addr), + show_svc_xprt_flags(__entry->flags)) +); DEFINE_EVENT(svc_xprt_event, svc_xprt_no_write_space, TP_PROTO(struct svc_xprt *xprt), TP_ARGS(xprt)); +TRACE_EVENT(svc_xprt_dequeue, + TP_PROTO(struct svc_rqst *rqst), + + TP_ARGS(rqst), + + TP_STRUCT__entry( + __field(struct svc_xprt *, xprt) + __field(unsigned long, flags) + __field(unsigned long, wakeup) + __string(addr, rqst->rq_xprt->xpt_remotebuf) + ), + + TP_fast_assign( + __entry->xprt = rqst->rq_xprt; + __entry->flags = rqst->rq_xprt->xpt_flags; + __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(), + rqst->rq_qtime)); + __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); + ), + + TP_printk("xprt=%p addr=%s flags=%s wakeup-us=%lu", + __entry->xprt, __get_str(addr), + show_svc_xprt_flags(__entry->flags), + __entry->wakeup) +); + TRACE_EVENT(svc_wake_up, TP_PROTO(int pid), @@ -686,28 +780,42 @@ TRACE_EVENT(svc_handle_xprt, __field(struct svc_xprt *, xprt) __field(int, len) __field(unsigned long, flags) - __dynamic_array(unsigned char, addr, xprt != NULL ? - xprt->xpt_remotelen : 0) + __string(addr, xprt->xpt_remotebuf) ), TP_fast_assign( __entry->xprt = xprt; __entry->len = len; - if (xprt) { - memcpy(__get_dynamic_array(addr), - &xprt->xpt_remote, - xprt->xpt_remotelen); - __entry->flags = xprt->xpt_flags; - } else - __entry->flags = 0; - ), - - TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt, - __get_dynamic_array_len(addr) != 0 ? - (struct sockaddr *)__get_dynamic_array(addr) : NULL, + __entry->flags = xprt->xpt_flags; + __assign_str(addr, xprt->xpt_remotebuf); + ), + + TP_printk("xprt=%p addr=%s len=%d flags=%s", + __entry->xprt, __get_str(addr), __entry->len, show_svc_xprt_flags(__entry->flags)) ); +TRACE_EVENT(svc_stats_latency, + TP_PROTO(const struct svc_rqst *rqst), + + TP_ARGS(rqst), + + TP_STRUCT__entry( + __field(u32, xid) + __field(unsigned long, execute) + __string(addr, rqst->rq_xprt->xpt_remotebuf) + ), + + TP_fast_assign( + __entry->xid = be32_to_cpu(rqst->rq_xid); + __entry->execute = ktime_to_us(ktime_sub(ktime_get(), + rqst->rq_stime)); + __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); + ), + + TP_printk("addr=%s xid=0x%08x execute-us=%lu", + __get_str(addr), __entry->xid, __entry->execute) +); DECLARE_EVENT_CLASS(svc_deferred_event, TP_PROTO(struct svc_deferred_req *dr), @@ -716,18 +824,16 @@ DECLARE_EVENT_CLASS(svc_deferred_event, TP_STRUCT__entry( __field(u32, xid) - __dynamic_array(unsigned char, addr, dr->addrlen) + __string(addr, dr->xprt->xpt_remotebuf) ), TP_fast_assign( __entry->xid = be32_to_cpu(*(__be32 *)(dr->args + (dr->xprt_hlen>>2))); - memcpy(__get_dynamic_array(addr), &dr->addr, dr->addrlen); + __assign_str(addr, dr->xprt->xpt_remotebuf); ), - TP_printk("addr=%pIScp xid=0x%08x", - (struct sockaddr *)__get_dynamic_array(addr), - __entry->xid) + TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid) ); DEFINE_EVENT(svc_deferred_event, svc_drop_deferred, diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index e0b8b9173e1c..a1cb91342231 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -78,26 +78,29 @@ TRACE_EVENT(mm_vmscan_kswapd_wake, TRACE_EVENT(mm_vmscan_wakeup_kswapd, - TP_PROTO(int nid, int zid, int order), + TP_PROTO(int nid, int zid, int order, gfp_t gfp_flags), - TP_ARGS(nid, zid, order), + TP_ARGS(nid, zid, order, gfp_flags), TP_STRUCT__entry( - __field( int, nid ) - __field( int, zid ) - __field( int, order ) + __field( int, nid ) + __field( int, zid ) + __field( int, order ) + __field( gfp_t, gfp_flags ) ), TP_fast_assign( __entry->nid = nid; __entry->zid = zid; __entry->order = order; + __entry->gfp_flags = gfp_flags; ), - TP_printk("nid=%d zid=%d order=%d", + TP_printk("nid=%d zid=%d order=%d gfp_flags=%s", __entry->nid, __entry->zid, - __entry->order) + __entry->order, + show_gfp_flags(__entry->gfp_flags)) ); DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template, @@ -343,15 +346,9 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive, TP_PROTO(int nid, unsigned long nr_scanned, unsigned long nr_reclaimed, - unsigned long nr_dirty, unsigned long nr_writeback, - unsigned long nr_congested, unsigned long nr_immediate, - unsigned long nr_activate, unsigned long nr_ref_keep, - unsigned long nr_unmap_fail, - int priority, int file), + struct reclaim_stat *stat, int priority, int file), - TP_ARGS(nid, nr_scanned, nr_reclaimed, nr_dirty, nr_writeback, - nr_congested, nr_immediate, nr_activate, nr_ref_keep, - nr_unmap_fail, priority, file), + TP_ARGS(nid, nr_scanned, nr_reclaimed, stat, priority, file), TP_STRUCT__entry( __field(int, nid) @@ -372,13 +369,13 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive, __entry->nid = nid; __entry->nr_scanned = nr_scanned; __entry->nr_reclaimed = nr_reclaimed; - __entry->nr_dirty = nr_dirty; - __entry->nr_writeback = nr_writeback; - __entry->nr_congested = nr_congested; - __entry->nr_immediate = nr_immediate; - __entry->nr_activate = nr_activate; - __entry->nr_ref_keep = nr_ref_keep; - __entry->nr_unmap_fail = nr_unmap_fail; + __entry->nr_dirty = stat->nr_dirty; + __entry->nr_writeback = stat->nr_writeback; + __entry->nr_congested = stat->nr_congested; + __entry->nr_immediate = stat->nr_immediate; + __entry->nr_activate = stat->nr_activate; + __entry->nr_ref_keep = stat->nr_ref_keep; + __entry->nr_unmap_fail = stat->nr_unmap_fail; __entry->priority = priority; __entry->reclaim_flags = trace_shrink_flags(file); ), diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index f8b134f5608f..e7ee32861d51 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -27,6 +27,9 @@ # define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ #endif +/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */ +#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ + /* * Flags for mlock */ diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index 6088bca89917..558b902f18d4 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -94,6 +94,9 @@ typedef struct siginfo { unsigned int _flags; /* see ia64 si_flags */ unsigned long _isr; /* isr */ #endif + +#define __ADDR_BND_PKEY_PAD (__alignof__(void *) < sizeof(short) ? \ + sizeof(short) : __alignof__(void *)) union { /* * used when si_code=BUS_MCEERR_AR or @@ -102,13 +105,13 @@ typedef struct siginfo { short _addr_lsb; /* LSB of the reported address */ /* used when si_code=SEGV_BNDERR */ struct { - void *_dummy_bnd; + char _dummy_bnd[__ADDR_BND_PKEY_PAD]; void __user *_lower; void __user *_upper; } _addr_bnd; /* used when si_code=SEGV_PKUERR */ struct { - void *_dummy_pkey; + char _dummy_pkey[__ADDR_BND_PKEY_PAD]; __u32 _pkey; } _addr_pkey; }; @@ -208,7 +211,8 @@ typedef struct siginfo { #define __FPE_INVASC 12 /* invalid ASCII digit */ #define __FPE_INVDEC 13 /* invalid decimal digit */ #define FPE_FLTUNK 14 /* undiagnosed floating-point exception */ -#define NSIGFPE 14 +#define FPE_CONDTRAP 15 /* trap on condition */ +#define NSIGFPE 15 /* * SIGSEGV si_codes diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h index 92537757590a..5ed721ad5b19 100644 --- a/include/uapi/linux/const.h +++ b/include/uapi/linux/const.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* const.h: Macros for dealing with constants. */ -#ifndef _LINUX_CONST_H -#define _LINUX_CONST_H +#ifndef _UAPI_LINUX_CONST_H +#define _UAPI_LINUX_CONST_H /* Some constant macros are used in both assembler and * C code. Therefore we cannot annotate them always with @@ -22,7 +22,10 @@ #define _AT(T,X) ((T)(X)) #endif -#define _BITUL(x) (_AC(1,UL) << (x)) -#define _BITULL(x) (_AC(1,ULL) << (x)) +#define _UL(x) (_AC(x, UL)) +#define _ULL(x) (_AC(x, ULL)) -#endif /* !(_LINUX_CONST_H) */ +#define _BITUL(x) (_UL(1) << (x)) +#define _BITULL(x) (_ULL(1) << (x)) + +#endif /* _UAPI_LINUX_CONST_H */ diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index 14c44ec8b622..d1e49514977b 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -270,9 +270,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 37 +#define DM_VERSION_MINOR 39 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2017-09-20)" +#define DM_VERSION_EXTRA "-ioctl (2018-04-03)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ diff --git a/include/uapi/linux/inotify.h b/include/uapi/linux/inotify.h index 5474461683db..4800bf2a531d 100644 --- a/include/uapi/linux/inotify.h +++ b/include/uapi/linux/inotify.h @@ -71,5 +71,13 @@ struct inotify_event { #define IN_CLOEXEC O_CLOEXEC #define IN_NONBLOCK O_NONBLOCK +/* + * ioctl numbers: inotify uses 'I' prefix for all ioctls, + * except historical FIONREAD, which is based on 'T'. + * + * INOTIFY_IOC_SETNEXTWD: set desired number of next created + * watch descriptor. + */ +#define INOTIFY_IOC_SETNEXTWD _IOW('I', 0, __s32) #endif /* _UAPI_LINUX_INOTIFY_H */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 7b26d4b0b052..1065006c9bf5 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -396,6 +396,10 @@ struct kvm_run { char padding[256]; }; + /* 2048 is the size of the char array used to bound/pad the size + * of the union that holds sync regs. + */ + #define SYNC_REGS_SIZE_BYTES 2048 /* * shared registers between kvm and userspace. * kvm_valid_regs specifies the register classes set by the host @@ -407,7 +411,7 @@ struct kvm_run { __u64 kvm_dirty_regs; union { struct kvm_sync_regs regs; - char padding[2048]; + char padding[SYNC_REGS_SIZE_BYTES]; } s; }; @@ -925,7 +929,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_S390_GS 140 #define KVM_CAP_S390_AIS 141 #define KVM_CAP_SPAPR_TCE_VFIO 142 -#define KVM_CAP_X86_GUEST_MWAIT 143 +#define KVM_CAP_X86_DISABLE_EXITS 143 #define KVM_CAP_ARM_USER_IRQ 144 #define KVM_CAP_S390_CMMA_MIGRATION 145 #define KVM_CAP_PPC_FWNMI 146 @@ -936,6 +940,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_PPC_GET_CPU_CHAR 151 #define KVM_CAP_S390_BPB 152 #define KVM_CAP_GET_MSR_FEATURES 153 +#define KVM_CAP_HYPERV_EVENTFD 154 #ifdef KVM_CAP_IRQ_ROUTING @@ -1375,6 +1380,10 @@ struct kvm_enc_region { #define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region) #define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region) +/* Available with KVM_CAP_HYPERV_EVENTFD */ +#define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd) + + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ @@ -1515,4 +1524,14 @@ struct kvm_assigned_msix_entry { #define KVM_ARM_DEV_EL1_PTIMER (1 << 1) #define KVM_ARM_DEV_PMU (1 << 2) +struct kvm_hyperv_eventfd { + __u32 conn_id; + __s32 fd; + __u32 flags; + __u32 padding[3]; +}; + +#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff +#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0) + #endif /* __LINUX_KVM_H */ diff --git a/include/uapi/linux/msg.h b/include/uapi/linux/msg.h index 5d5ab81dc9be..e4a0d9a9a9e8 100644 --- a/include/uapi/linux/msg.h +++ b/include/uapi/linux/msg.h @@ -7,6 +7,7 @@ /* ipcs ctl commands */ #define MSG_STAT 11 #define MSG_INFO 12 +#define MSG_STAT_ANY 13 /* msgrcv options */ #define MSG_NOERROR 010000 /* no error if message is too big */ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 0c79eac5e9b8..103ba797a8f3 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -520,6 +520,7 @@ #define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */ #define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */ #define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */ +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ #define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */ #define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ #define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */ @@ -547,6 +548,7 @@ #define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */ #define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */ #define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */ +#define PCI_EXP_LNKSTA_CLS_16_0GB 0x0004 /* Current Link Speed 16.0GT/s */ #define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ #define PCI_EXP_LNKSTA_NLW_X1 0x0010 /* Current Link Width x1 */ #define PCI_EXP_LNKSTA_NLW_X2 0x0020 /* Current Link Width x2 */ @@ -648,8 +650,9 @@ #define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints without link end here */ #define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */ #define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ -#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5.0GT/s */ -#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8.0GT/s */ +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ #define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */ #define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ #define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ diff --git a/include/uapi/linux/qemu_fw_cfg.h b/include/uapi/linux/qemu_fw_cfg.h new file mode 100644 index 000000000000..e089c0159ec2 --- /dev/null +++ b/include/uapi/linux/qemu_fw_cfg.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +#ifndef _LINUX_FW_CFG_H +#define _LINUX_FW_CFG_H + +#include <linux/types.h> + +#define FW_CFG_ACPI_DEVICE_ID "QEMU0002" + +/* selector key values for "well-known" fw_cfg entries */ +#define FW_CFG_SIGNATURE 0x00 +#define FW_CFG_ID 0x01 +#define FW_CFG_UUID 0x02 +#define FW_CFG_RAM_SIZE 0x03 +#define FW_CFG_NOGRAPHIC 0x04 +#define FW_CFG_NB_CPUS 0x05 +#define FW_CFG_MACHINE_ID 0x06 +#define FW_CFG_KERNEL_ADDR 0x07 +#define FW_CFG_KERNEL_SIZE 0x08 +#define FW_CFG_KERNEL_CMDLINE 0x09 +#define FW_CFG_INITRD_ADDR 0x0a +#define FW_CFG_INITRD_SIZE 0x0b +#define FW_CFG_BOOT_DEVICE 0x0c +#define FW_CFG_NUMA 0x0d +#define FW_CFG_BOOT_MENU 0x0e +#define FW_CFG_MAX_CPUS 0x0f +#define FW_CFG_KERNEL_ENTRY 0x10 +#define FW_CFG_KERNEL_DATA 0x11 +#define FW_CFG_INITRD_DATA 0x12 +#define FW_CFG_CMDLINE_ADDR 0x13 +#define FW_CFG_CMDLINE_SIZE 0x14 +#define FW_CFG_CMDLINE_DATA 0x15 +#define FW_CFG_SETUP_ADDR 0x16 +#define FW_CFG_SETUP_SIZE 0x17 +#define FW_CFG_SETUP_DATA 0x18 +#define FW_CFG_FILE_DIR 0x19 + +#define FW_CFG_FILE_FIRST 0x20 +#define FW_CFG_FILE_SLOTS_MIN 0x10 + +#define FW_CFG_WRITE_CHANNEL 0x4000 +#define FW_CFG_ARCH_LOCAL 0x8000 +#define FW_CFG_ENTRY_MASK (~(FW_CFG_WRITE_CHANNEL | FW_CFG_ARCH_LOCAL)) + +#define FW_CFG_INVALID 0xffff + +/* width in bytes of fw_cfg control register */ +#define FW_CFG_CTL_SIZE 0x02 + +/* fw_cfg "file name" is up to 56 characters (including terminating nul) */ +#define FW_CFG_MAX_FILE_PATH 56 + +/* size in bytes of fw_cfg signature */ +#define FW_CFG_SIG_SIZE 4 + +/* FW_CFG_ID bits */ +#define FW_CFG_VERSION 0x01 +#define FW_CFG_VERSION_DMA 0x02 + +/* fw_cfg file directory entry type */ +struct fw_cfg_file { + __be32 size; + __be16 select; + __u16 reserved; + char name[FW_CFG_MAX_FILE_PATH]; +}; + +/* FW_CFG_DMA_CONTROL bits */ +#define FW_CFG_DMA_CTL_ERROR 0x01 +#define FW_CFG_DMA_CTL_READ 0x02 +#define FW_CFG_DMA_CTL_SKIP 0x04 +#define FW_CFG_DMA_CTL_SELECT 0x08 +#define FW_CFG_DMA_CTL_WRITE 0x10 + +#define FW_CFG_DMA_SIGNATURE 0x51454d5520434647ULL /* "QEMU CFG" */ + +/* Control as first field allows for different structures selected by this + * field, which might be useful in the future + */ +struct fw_cfg_dma_access { + __be32 control; + __be32 length; + __be64 address; +}; + +#define FW_CFG_VMCOREINFO_FILENAME "etc/vmcoreinfo" + +#define FW_CFG_VMCOREINFO_FORMAT_NONE 0x0 +#define FW_CFG_VMCOREINFO_FORMAT_ELF 0x1 + +struct fw_cfg_vmcoreinfo { + __le16 host_format; + __le16 guest_format; + __le32 size; + __le64 paddr; +}; + +#endif diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index afd4346386e0..b64d583bf053 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -127,6 +127,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_STREAM_SCHEDULER 123 #define SCTP_STREAM_SCHEDULER_VALUE 124 #define SCTP_INTERLEAVING_SUPPORTED 125 +#define SCTP_SENDMSG_CONNECT 126 /* PR-SCTP policies */ #define SCTP_PR_SCTP_NONE 0x0000 diff --git a/include/uapi/linux/sem.h b/include/uapi/linux/sem.h index 9c3e745b0656..39a1876f039e 100644 --- a/include/uapi/linux/sem.h +++ b/include/uapi/linux/sem.h @@ -19,6 +19,7 @@ /* ipcs ctl cmds */ #define SEM_STAT 18 #define SEM_INFO 19 +#define SEM_STAT_ANY 20 /* Obsolete, used only for backwards compatibility and libc5 compiles */ struct semid_ds { diff --git a/include/uapi/linux/shm.h b/include/uapi/linux/shm.h index 4de12a39b075..dde1344f047c 100644 --- a/include/uapi/linux/shm.h +++ b/include/uapi/linux/shm.h @@ -83,8 +83,9 @@ struct shmid_ds { #define SHM_UNLOCK 12 /* ipcs ctl commands */ -#define SHM_STAT 13 -#define SHM_INFO 14 +#define SHM_STAT 13 +#define SHM_INFO 14 +#define SHM_STAT_ANY 15 /* Obsolete, used only for backwards compatibility */ struct shminfo { diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index c74372163ed2..1aa7b82e8169 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -575,6 +575,33 @@ struct vfio_device_gfx_plane_info { #define VFIO_DEVICE_GET_GFX_DMABUF _IO(VFIO_TYPE, VFIO_BASE + 15) +/** + * VFIO_DEVICE_IOEVENTFD - _IOW(VFIO_TYPE, VFIO_BASE + 16, + * struct vfio_device_ioeventfd) + * + * Perform a write to the device at the specified device fd offset, with + * the specified data and width when the provided eventfd is triggered. + * vfio bus drivers may not support this for all regions, for all widths, + * or at all. vfio-pci currently only enables support for BAR regions, + * excluding the MSI-X vector table. + * + * Return: 0 on success, -errno on failure. + */ +struct vfio_device_ioeventfd { + __u32 argsz; + __u32 flags; +#define VFIO_DEVICE_IOEVENTFD_8 (1 << 0) /* 1-byte write */ +#define VFIO_DEVICE_IOEVENTFD_16 (1 << 1) /* 2-byte write */ +#define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */ +#define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */ +#define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf) + __u64 offset; /* device fd offset of write */ + __u64 data; /* data to be written */ + __s32 fd; /* -1 for de-assignment */ +}; + +#define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16) + /* -------- API for Type1 VFIO IOMMU -------- */ /** diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index 4e8b8304b793..40297a3181ed 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h @@ -53,7 +53,9 @@ struct virtio_balloon_config { #define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */ #define VIRTIO_BALLOON_S_AVAIL 6 /* Available memory as in /proc */ #define VIRTIO_BALLOON_S_CACHES 7 /* Disk caches */ -#define VIRTIO_BALLOON_S_NR 8 +#define VIRTIO_BALLOON_S_HTLB_PGALLOC 8 /* Hugetlb page allocations */ +#define VIRTIO_BALLOON_S_HTLB_PGFAIL 9 /* Hugetlb page allocation failures */ +#define VIRTIO_BALLOON_S_NR 10 /* * Memory statistics structure. diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h index db54115be044..a7a6111e50c7 100644 --- a/include/uapi/rdma/bnxt_re-abi.h +++ b/include/uapi/rdma/bnxt_re-abi.h @@ -53,15 +53,20 @@ struct bnxt_re_uctx_resp { __u32 rsvd; }; +/* + * This struct is placed after the ib_uverbs_alloc_pd_resp struct, which is + * not 8 byted aligned. To avoid undesired padding in various cases we have to + * set this struct to packed. + */ struct bnxt_re_pd_resp { __u32 pdid; __u32 dpi; __u64 dbr; -}; +} __attribute__((packed, aligned(4))); struct bnxt_re_cq_req { - __u64 cq_va; - __u64 cq_handle; + __aligned_u64 cq_va; + __aligned_u64 cq_handle; }; struct bnxt_re_cq_resp { @@ -72,9 +77,9 @@ struct bnxt_re_cq_resp { }; struct bnxt_re_qp_req { - __u64 qpsva; - __u64 qprva; - __u64 qp_handle; + __aligned_u64 qpsva; + __aligned_u64 qprva; + __aligned_u64 qp_handle; }; struct bnxt_re_qp_resp { @@ -83,8 +88,8 @@ struct bnxt_re_qp_resp { }; struct bnxt_re_srq_req { - __u64 srqva; - __u64 srq_handle; + __aligned_u64 srqva; + __aligned_u64 srq_handle; }; struct bnxt_re_srq_resp { diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h index d5745e43ae85..9acb4b7a6246 100644 --- a/include/uapi/rdma/cxgb3-abi.h +++ b/include/uapi/rdma/cxgb3-abi.h @@ -41,21 +41,21 @@ * Make sure that all structs defined in this file remain laid out so * that they pack the same way on 32-bit and 64-bit architectures (to * avoid incompatibility between 32-bit userspace and 64-bit kernels). - * In particular do not use pointer types -- pass pointers in __u64 + * In particular do not use pointer types -- pass pointers in __aligned_u64 * instead. */ struct iwch_create_cq_req { - __u64 user_rptr_addr; + __aligned_u64 user_rptr_addr; }; struct iwch_create_cq_resp_v0 { - __u64 key; + __aligned_u64 key; __u32 cqid; __u32 size_log2; }; struct iwch_create_cq_resp { - __u64 key; + __aligned_u64 key; __u32 cqid; __u32 size_log2; __u32 memsize; @@ -63,8 +63,8 @@ struct iwch_create_cq_resp { }; struct iwch_create_qp_resp { - __u64 key; - __u64 db_key; + __aligned_u64 key; + __aligned_u64 db_key; __u32 qpid; __u32 size_log2; __u32 sq_size_log2; @@ -74,4 +74,9 @@ struct iwch_create_qp_resp { struct iwch_reg_user_mr_resp { __u32 pbl_addr; }; + +struct iwch_alloc_pd_resp { + __u32 pdid; +}; + #endif /* CXGB3_ABI_USER_H */ diff --git a/include/uapi/rdma/cxgb4-abi.h b/include/uapi/rdma/cxgb4-abi.h index 05f71f1bc119..1fefd0140c26 100644 --- a/include/uapi/rdma/cxgb4-abi.h +++ b/include/uapi/rdma/cxgb4-abi.h @@ -41,13 +41,13 @@ * Make sure that all structs defined in this file remain laid out so * that they pack the same way on 32-bit and 64-bit architectures (to * avoid incompatibility between 32-bit userspace and 64-bit kernels). - * In particular do not use pointer types -- pass pointers in __u64 + * In particular do not use pointer types -- pass pointers in __aligned_u64 * instead. */ struct c4iw_create_cq_resp { - __u64 key; - __u64 gts_key; - __u64 memsize; + __aligned_u64 key; + __aligned_u64 gts_key; + __aligned_u64 memsize; __u32 cqid; __u32 size; __u32 qid_mask; @@ -59,13 +59,13 @@ enum { }; struct c4iw_create_qp_resp { - __u64 ma_sync_key; - __u64 sq_key; - __u64 rq_key; - __u64 sq_db_gts_key; - __u64 rq_db_gts_key; - __u64 sq_memsize; - __u64 rq_memsize; + __aligned_u64 ma_sync_key; + __aligned_u64 sq_key; + __aligned_u64 rq_key; + __aligned_u64 sq_db_gts_key; + __aligned_u64 rq_db_gts_key; + __aligned_u64 sq_memsize; + __aligned_u64 rq_memsize; __u32 sqid; __u32 rqid; __u32 sq_size; @@ -75,8 +75,13 @@ struct c4iw_create_qp_resp { }; struct c4iw_alloc_ucontext_resp { - __u64 status_page_key; + __aligned_u64 status_page_key; __u32 status_page_size; __u32 reserved; /* explicit padding (optional for i386) */ }; + +struct c4iw_alloc_pd_resp { + __u32 pdid; +}; + #endif /* CXGB4_ABI_USER_H */ diff --git a/include/uapi/rdma/hfi/hfi1_ioctl.h b/include/uapi/rdma/hfi/hfi1_ioctl.h index 9de78c5ee913..8f3d9fe7b141 100644 --- a/include/uapi/rdma/hfi/hfi1_ioctl.h +++ b/include/uapi/rdma/hfi/hfi1_ioctl.h @@ -79,7 +79,7 @@ struct hfi1_user_info { }; struct hfi1_ctxt_info { - __u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */ + __aligned_u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */ __u32 rcvegr_size; /* size of each eager buffer */ __u16 num_active; /* number of active units */ __u16 unit; /* unit (chip) assigned to caller */ @@ -98,9 +98,9 @@ struct hfi1_ctxt_info { struct hfi1_tid_info { /* virtual address of first page in transfer */ - __u64 vaddr; + __aligned_u64 vaddr; /* pointer to tid array. this array is big enough */ - __u64 tidlist; + __aligned_u64 tidlist; /* number of tids programmed by this request */ __u32 tidcnt; /* length of transfer buffer programmed by this request */ @@ -131,23 +131,23 @@ struct hfi1_base_info { */ __u32 bthqp; /* PIO credit return address, */ - __u64 sc_credits_addr; + __aligned_u64 sc_credits_addr; /* * Base address of write-only pio buffers for this process. * Each buffer has sendpio_credits*64 bytes. */ - __u64 pio_bufbase_sop; + __aligned_u64 pio_bufbase_sop; /* * Base address of write-only pio buffers for this process. * Each buffer has sendpio_credits*64 bytes. */ - __u64 pio_bufbase; + __aligned_u64 pio_bufbase; /* address where receive buffer queue is mapped into */ - __u64 rcvhdr_bufbase; + __aligned_u64 rcvhdr_bufbase; /* base address of Eager receive buffers. */ - __u64 rcvegr_bufbase; + __aligned_u64 rcvegr_bufbase; /* base address of SDMA completion ring */ - __u64 sdma_comp_bufbase; + __aligned_u64 sdma_comp_bufbase; /* * User register base for init code, not to be used directly by * protocol or applications. Always maps real chip register space. @@ -155,20 +155,20 @@ struct hfi1_base_info { * ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail, * ur_rcvtidflow */ - __u64 user_regbase; + __aligned_u64 user_regbase; /* notification events */ - __u64 events_bufbase; + __aligned_u64 events_bufbase; /* status page */ - __u64 status_bufbase; + __aligned_u64 status_bufbase; /* rcvhdrtail update */ - __u64 rcvhdrtail_base; + __aligned_u64 rcvhdrtail_base; /* * shared memory pages for subctxts if ctxt is shared; these cover * all the processes in the group sharing a single context. * all have enough space for the num_subcontexts value on this job. */ - __u64 subctxt_uregbase; - __u64 subctxt_rcvegrbuf; - __u64 subctxt_rcvhdrbuf; + __aligned_u64 subctxt_uregbase; + __aligned_u64 subctxt_rcvegrbuf; + __aligned_u64 subctxt_rcvhdrbuf; }; #endif /* _LINIUX__HFI1_IOCTL_H */ diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h index 791bea2f8297..c6a984c0c881 100644 --- a/include/uapi/rdma/hfi/hfi1_user.h +++ b/include/uapi/rdma/hfi/hfi1_user.h @@ -177,8 +177,8 @@ struct hfi1_sdma_comp_entry { * Device status and notifications from driver to user-space. */ struct hfi1_status { - __u64 dev; /* device/hw status bits */ - __u64 port; /* port state and status bits */ + __aligned_u64 dev; /* device/hw status bits */ + __aligned_u64 port; /* port state and status bits */ char freezemsg[0]; }; @@ -219,7 +219,7 @@ struct sdma_req_info { * in charge of managing its own ring. */ __u16 comp_idx; -} __packed; +} __attribute__((__packed__)); /* * SW KDETH header. @@ -230,7 +230,7 @@ struct hfi1_kdeth_header { __le16 jkey; __le16 hcrc; __le32 swdata[7]; -} __packed; +} __attribute__((__packed__)); /* * Structure describing the headers that User space uses. The @@ -241,7 +241,7 @@ struct hfi1_pkt_header { __be16 lrh[4]; __be32 bth[3]; struct hfi1_kdeth_header kdeth; -} __packed; +} __attribute__((__packed__)); /* diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index a9c03b0eed57..7092c8de4bd8 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -37,19 +37,35 @@ #include <linux/types.h> struct hns_roce_ib_create_cq { - __u64 buf_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; +}; + +struct hns_roce_ib_create_cq_resp { + __aligned_u64 cqn; /* Only 32 bits used, 64 for compat */ + __aligned_u64 cap_flags; }; struct hns_roce_ib_create_qp { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; __u8 log_sq_bb_count; __u8 log_sq_stride; __u8 sq_no_prefetch; __u8 reserved[5]; }; +struct hns_roce_ib_create_qp_resp { + __aligned_u64 cap_flags; +}; + struct hns_roce_ib_alloc_ucontext_resp { __u32 qp_tab_size; + __u32 reserved; }; + +struct hns_roce_ib_alloc_pd_resp { + __u32 pdn; +}; + #endif /* HNS_ABI_USER_H */ diff --git a/include/uapi/rdma/i40iw-abi.h b/include/uapi/rdma/i40iw-abi.h new file mode 100644 index 000000000000..79890baa6fdb --- /dev/null +++ b/include/uapi/rdma/i40iw-abi.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved. + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef I40IW_ABI_H +#define I40IW_ABI_H + +#include <linux/types.h> + +#define I40IW_ABI_VER 5 + +struct i40iw_alloc_ucontext_req { + __u32 reserved32; + __u8 userspace_ver; + __u8 reserved8[3]; +}; + +struct i40iw_alloc_ucontext_resp { + __u32 max_pds; /* maximum pds allowed for this user process */ + __u32 max_qps; /* maximum qps allowed for this user process */ + __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */ + __u8 kernel_ver; + __u8 reserved[3]; +}; + +struct i40iw_alloc_pd_resp { + __u32 pd_id; + __u8 reserved[4]; +}; + +struct i40iw_create_cq_req { + __aligned_u64 user_cq_buffer; + __aligned_u64 user_shadow_area; +}; + +struct i40iw_create_qp_req { + __aligned_u64 user_wqe_buffers; + __aligned_u64 user_compl_ctx; + + /* UDA QP PHB */ + __aligned_u64 user_sq_phb; /* place for VA of the sq phb buff */ + __aligned_u64 user_rq_phb; /* place for VA of the rq phb buff */ +}; + +enum i40iw_memreg_type { + IW_MEMREG_TYPE_MEM = 0x0000, + IW_MEMREG_TYPE_QP = 0x0001, + IW_MEMREG_TYPE_CQ = 0x0002, +}; + +struct i40iw_mem_reg_req { + __u16 reg_type; /* Memory, QP or CQ */ + __u16 cq_pages; + __u16 rq_pages; + __u16 sq_pages; +}; + +struct i40iw_create_cq_resp { + __u32 cq_id; + __u32 cq_size; + __u32 mmap_db_index; + __u32 reserved; +}; + +struct i40iw_create_qp_resp { + __u32 qp_id; + __u32 actual_sq_size; + __u32 actual_rq_size; + __u32 i40iw_drv_opt; + __u16 push_idx; + __u8 lsmm; + __u8 rsvd2; +}; + +#endif diff --git a/include/uapi/rdma/ib_user_cm.h b/include/uapi/rdma/ib_user_cm.h index f4041bdc4d08..4a8f9562f7cd 100644 --- a/include/uapi/rdma/ib_user_cm.h +++ b/include/uapi/rdma/ib_user_cm.h @@ -73,8 +73,8 @@ struct ib_ucm_cmd_hdr { }; struct ib_ucm_create_id { - __u64 uid; - __u64 response; + __aligned_u64 uid; + __aligned_u64 response; }; struct ib_ucm_create_id_resp { @@ -82,7 +82,7 @@ struct ib_ucm_create_id_resp { }; struct ib_ucm_destroy_id { - __u64 response; + __aligned_u64 response; __u32 id; __u32 reserved; }; @@ -92,7 +92,7 @@ struct ib_ucm_destroy_id_resp { }; struct ib_ucm_attr_id { - __u64 response; + __aligned_u64 response; __u32 id; __u32 reserved; }; @@ -105,7 +105,7 @@ struct ib_ucm_attr_id_resp { }; struct ib_ucm_init_qp_attr { - __u64 response; + __aligned_u64 response; __u32 id; __u32 qp_state; }; @@ -123,7 +123,7 @@ struct ib_ucm_notify { }; struct ib_ucm_private_data { - __u64 data; + __aligned_u64 data; __u32 id; __u8 len; __u8 reserved[3]; @@ -135,9 +135,9 @@ struct ib_ucm_req { __u32 qp_type; __u32 psn; __be64 sid; - __u64 data; - __u64 primary_path; - __u64 alternate_path; + __aligned_u64 data; + __aligned_u64 primary_path; + __aligned_u64 alternate_path; __u8 len; __u8 peer_to_peer; __u8 responder_resources; @@ -153,8 +153,8 @@ struct ib_ucm_req { }; struct ib_ucm_rep { - __u64 uid; - __u64 data; + __aligned_u64 uid; + __aligned_u64 data; __u32 id; __u32 qpn; __u32 psn; @@ -172,15 +172,15 @@ struct ib_ucm_rep { struct ib_ucm_info { __u32 id; __u32 status; - __u64 info; - __u64 data; + __aligned_u64 info; + __aligned_u64 data; __u8 info_len; __u8 data_len; __u8 reserved[6]; }; struct ib_ucm_mra { - __u64 data; + __aligned_u64 data; __u32 id; __u8 len; __u8 timeout; @@ -188,8 +188,8 @@ struct ib_ucm_mra { }; struct ib_ucm_lap { - __u64 path; - __u64 data; + __aligned_u64 path; + __aligned_u64 data; __u32 id; __u8 len; __u8 reserved[3]; @@ -199,8 +199,8 @@ struct ib_ucm_sidr_req { __u32 id; __u32 timeout; __be64 sid; - __u64 data; - __u64 path; + __aligned_u64 data; + __aligned_u64 path; __u16 reserved_pkey; __u8 len; __u8 max_cm_retries; @@ -212,8 +212,8 @@ struct ib_ucm_sidr_rep { __u32 qpn; __u32 qkey; __u32 status; - __u64 info; - __u64 data; + __aligned_u64 info; + __aligned_u64 data; __u8 info_len; __u8 data_len; __u8 reserved[6]; @@ -222,9 +222,9 @@ struct ib_ucm_sidr_rep { * event notification ABI structures. */ struct ib_ucm_event_get { - __u64 response; - __u64 data; - __u64 info; + __aligned_u64 response; + __aligned_u64 data; + __aligned_u64 info; __u8 data_len; __u8 info_len; __u8 reserved[6]; @@ -303,7 +303,7 @@ struct ib_ucm_sidr_rep_event_resp { #define IB_UCM_PRES_ALTERNATE 0x08 struct ib_ucm_event_resp { - __u64 uid; + __aligned_u64 uid; __u32 id; __u32 event; __u32 present; diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h new file mode 100644 index 000000000000..83e3890eef20 --- /dev/null +++ b/include/uapi/rdma/ib_user_ioctl_cmds.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef IB_USER_IOCTL_CMDS_H +#define IB_USER_IOCTL_CMDS_H + +#define UVERBS_ID_NS_MASK 0xF000 +#define UVERBS_ID_NS_SHIFT 12 + +#define UVERBS_UDATA_DRIVER_DATA_NS 1 +#define UVERBS_UDATA_DRIVER_DATA_FLAG (1UL << UVERBS_ID_NS_SHIFT) + +enum uverbs_default_objects { + UVERBS_OBJECT_DEVICE, /* No instances of DEVICE are allowed */ + UVERBS_OBJECT_PD, + UVERBS_OBJECT_COMP_CHANNEL, + UVERBS_OBJECT_CQ, + UVERBS_OBJECT_QP, + UVERBS_OBJECT_SRQ, + UVERBS_OBJECT_AH, + UVERBS_OBJECT_MR, + UVERBS_OBJECT_MW, + UVERBS_OBJECT_FLOW, + UVERBS_OBJECT_XRCD, + UVERBS_OBJECT_RWQ_IND_TBL, + UVERBS_OBJECT_WQ, + UVERBS_OBJECT_FLOW_ACTION, + UVERBS_OBJECT_DM, +}; + +enum { + UVERBS_ATTR_UHW_IN = UVERBS_UDATA_DRIVER_DATA_FLAG, + UVERBS_ATTR_UHW_OUT, +}; + +enum uverbs_attrs_create_cq_cmd_attr_ids { + UVERBS_ATTR_CREATE_CQ_HANDLE, + UVERBS_ATTR_CREATE_CQ_CQE, + UVERBS_ATTR_CREATE_CQ_USER_HANDLE, + UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL, + UVERBS_ATTR_CREATE_CQ_COMP_VECTOR, + UVERBS_ATTR_CREATE_CQ_FLAGS, + UVERBS_ATTR_CREATE_CQ_RESP_CQE, +}; + +enum uverbs_attrs_destroy_cq_cmd_attr_ids { + UVERBS_ATTR_DESTROY_CQ_HANDLE, + UVERBS_ATTR_DESTROY_CQ_RESP, +}; + +enum uverbs_attrs_create_flow_action_esp { + UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE, + UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS, + UVERBS_ATTR_FLOW_ACTION_ESP_ESN, + UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT, + UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY, + UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP, +}; + +enum uverbs_attrs_destroy_flow_action_esp { + UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE, +}; + +enum uverbs_methods_cq { + UVERBS_METHOD_CQ_CREATE, + UVERBS_METHOD_CQ_DESTROY, +}; + +enum uverbs_methods_actions_flow_action_ops { + UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, + UVERBS_METHOD_FLOW_ACTION_DESTROY, + UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY, +}; + +enum uverbs_attrs_alloc_dm_cmd_attr_ids { + UVERBS_ATTR_ALLOC_DM_HANDLE, + UVERBS_ATTR_ALLOC_DM_LENGTH, + UVERBS_ATTR_ALLOC_DM_ALIGNMENT, +}; + +enum uverbs_attrs_free_dm_cmd_attr_ids { + UVERBS_ATTR_FREE_DM_HANDLE, +}; + +enum uverbs_methods_dm { + UVERBS_METHOD_DM_ALLOC, + UVERBS_METHOD_DM_FREE, +}; + +enum uverbs_attrs_reg_dm_mr_cmd_attr_ids { + UVERBS_ATTR_REG_DM_MR_HANDLE, + UVERBS_ATTR_REG_DM_MR_OFFSET, + UVERBS_ATTR_REG_DM_MR_LENGTH, + UVERBS_ATTR_REG_DM_MR_PD_HANDLE, + UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS, + UVERBS_ATTR_REG_DM_MR_DM_HANDLE, + UVERBS_ATTR_REG_DM_MR_RESP_LKEY, + UVERBS_ATTR_REG_DM_MR_RESP_RKEY, +}; + +enum uverbs_methods_mr { + UVERBS_METHOD_DM_MR_REG, +}; + +#endif diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index 842792eae383..04e46ea517d3 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -1,5 +1,6 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ /* - * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2017-2018, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -33,52 +34,69 @@ #ifndef IB_USER_IOCTL_VERBS_H #define IB_USER_IOCTL_VERBS_H -#include <rdma/rdma_user_ioctl.h> - -#define UVERBS_UDATA_DRIVER_DATA_NS 1 -#define UVERBS_UDATA_DRIVER_DATA_FLAG (1UL << UVERBS_ID_NS_SHIFT) - -enum uverbs_default_objects { - UVERBS_OBJECT_DEVICE, /* No instances of DEVICE are allowed */ - UVERBS_OBJECT_PD, - UVERBS_OBJECT_COMP_CHANNEL, - UVERBS_OBJECT_CQ, - UVERBS_OBJECT_QP, - UVERBS_OBJECT_SRQ, - UVERBS_OBJECT_AH, - UVERBS_OBJECT_MR, - UVERBS_OBJECT_MW, - UVERBS_OBJECT_FLOW, - UVERBS_OBJECT_XRCD, - UVERBS_OBJECT_RWQ_IND_TBL, - UVERBS_OBJECT_WQ, - UVERBS_OBJECT_LAST, +#include <linux/types.h> + +#ifndef RDMA_UAPI_PTR +#define RDMA_UAPI_PTR(_type, _name) __aligned_u64 _name +#endif + +enum ib_uverbs_flow_action_esp_keymat { + IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM, }; -enum { - UVERBS_UHW_IN = UVERBS_UDATA_DRIVER_DATA_FLAG, - UVERBS_UHW_OUT, +enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo { + IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ, }; -enum uverbs_create_cq_cmd_attr_ids { - CREATE_CQ_HANDLE, - CREATE_CQ_CQE, - CREATE_CQ_USER_HANDLE, - CREATE_CQ_COMP_CHANNEL, - CREATE_CQ_COMP_VECTOR, - CREATE_CQ_FLAGS, - CREATE_CQ_RESP_CQE, +struct ib_uverbs_flow_action_esp_keymat_aes_gcm { + __aligned_u64 iv; + __u32 iv_algo; /* Use enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo */ + + __u32 salt; + __u32 icv_len; + + __u32 key_len; + __u32 aes_key[256 / 32]; }; -enum uverbs_destroy_cq_cmd_attr_ids { - DESTROY_CQ_HANDLE, - DESTROY_CQ_RESP, +enum ib_uverbs_flow_action_esp_replay { + IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE, + IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP, }; -enum uverbs_actions_cq_ops { - UVERBS_CQ_CREATE, - UVERBS_CQ_DESTROY, +struct ib_uverbs_flow_action_esp_replay_bmp { + __u32 size; }; -#endif +enum ib_uverbs_flow_action_esp_flags { + IB_UVERBS_FLOW_ACTION_ESP_FLAGS_INLINE_CRYPTO = 0UL << 0, /* Default */ + IB_UVERBS_FLOW_ACTION_ESP_FLAGS_FULL_OFFLOAD = 1UL << 0, + + IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TUNNEL = 0UL << 1, /* Default */ + IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TRANSPORT = 1UL << 1, + + IB_UVERBS_FLOW_ACTION_ESP_FLAGS_DECRYPT = 0UL << 2, /* Default */ + IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT = 1UL << 2, + IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW = 1UL << 3, +}; + +struct ib_uverbs_flow_action_esp_encap { + /* This struct represents a list of pointers to flow_xxxx_filter that + * encapsulates the payload in ESP tunnel mode. + */ + RDMA_UAPI_PTR(void *, val_ptr); /* pointer to a flow_xxxx_filter */ + RDMA_UAPI_PTR(struct ib_uverbs_flow_action_esp_encap *, next_ptr); + __u16 len; /* Len of the filter struct val_ptr points to */ + __u16 type; /* Use flow_spec_type enum */ +}; + +struct ib_uverbs_flow_action_esp { + __u32 spi; + __u32 seq; + __u32 tfc_pad; + __u32 flags; + __aligned_u64 hard_limit_pkts; +}; + +#endif diff --git a/include/uapi/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h index 330a3c5f1aa8..ef92118dad97 100644 --- a/include/uapi/rdma/ib_user_mad.h +++ b/include/uapi/rdma/ib_user_mad.h @@ -143,7 +143,7 @@ struct ib_user_mad_hdr { */ struct ib_user_mad { struct ib_user_mad_hdr hdr; - __u64 data[0]; + __aligned_u64 data[0]; }; /* @@ -225,7 +225,7 @@ struct ib_user_mad_reg_req2 { __u8 mgmt_class_version; __u16 res; __u32 flags; - __u64 method_mask[2]; + __aligned_u64 method_mask[2]; __u32 oui; __u8 rmpp_version; __u8 reserved[3]; diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 04d0e67b1312..9be07394fdbe 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -117,13 +117,13 @@ enum { */ struct ib_uverbs_async_event_desc { - __u64 element; + __aligned_u64 element; __u32 event_type; /* enum ib_event_type */ __u32 reserved; }; struct ib_uverbs_comp_event_desc { - __u64 cq_handle; + __aligned_u64 cq_handle; }; struct ib_uverbs_cq_moderation_caps { @@ -141,10 +141,7 @@ struct ib_uverbs_cq_moderation_caps { */ #define IB_USER_VERBS_CMD_COMMAND_MASK 0xff -#define IB_USER_VERBS_CMD_FLAGS_MASK 0xff000000u -#define IB_USER_VERBS_CMD_FLAGS_SHIFT 24 - -#define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80 +#define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80000000u struct ib_uverbs_cmd_hdr { __u32 command; @@ -153,15 +150,15 @@ struct ib_uverbs_cmd_hdr { }; struct ib_uverbs_ex_cmd_hdr { - __u64 response; + __aligned_u64 response; __u16 provider_in_words; __u16 provider_out_words; __u32 cmd_hdr_reserved; }; struct ib_uverbs_get_context { - __u64 response; - __u64 driver_data[0]; + __aligned_u64 response; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_get_context_resp { @@ -170,16 +167,16 @@ struct ib_uverbs_get_context_resp { }; struct ib_uverbs_query_device { - __u64 response; - __u64 driver_data[0]; + __aligned_u64 response; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_device_resp { - __u64 fw_ver; + __aligned_u64 fw_ver; __be64 node_guid; __be64 sys_image_guid; - __u64 max_mr_size; - __u64 page_size_cap; + __aligned_u64 max_mr_size; + __aligned_u64 page_size_cap; __u32 vendor_id; __u32 vendor_part_id; __u32 hw_ver; @@ -224,7 +221,7 @@ struct ib_uverbs_ex_query_device { }; struct ib_uverbs_odp_caps { - __u64 general_caps; + __aligned_u64 general_caps; struct { __u32 rc_odp_caps; __u32 uc_odp_caps; @@ -263,21 +260,22 @@ struct ib_uverbs_ex_query_device_resp { __u32 comp_mask; __u32 response_length; struct ib_uverbs_odp_caps odp_caps; - __u64 timestamp_mask; - __u64 hca_core_clock; /* in KHZ */ - __u64 device_cap_flags_ex; + __aligned_u64 timestamp_mask; + __aligned_u64 hca_core_clock; /* in KHZ */ + __aligned_u64 device_cap_flags_ex; struct ib_uverbs_rss_caps rss_caps; __u32 max_wq_type_rq; __u32 raw_packet_caps; struct ib_uverbs_tm_caps tm_caps; struct ib_uverbs_cq_moderation_caps cq_moderation_caps; + __aligned_u64 max_dm_size; }; struct ib_uverbs_query_port { - __u64 response; + __aligned_u64 response; __u8 port_num; __u8 reserved[7]; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_port_resp { @@ -305,8 +303,8 @@ struct ib_uverbs_query_port_resp { }; struct ib_uverbs_alloc_pd { - __u64 response; - __u64 driver_data[0]; + __aligned_u64 response; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_alloc_pd_resp { @@ -318,10 +316,10 @@ struct ib_uverbs_dealloc_pd { }; struct ib_uverbs_open_xrcd { - __u64 response; + __aligned_u64 response; __u32 fd; __u32 oflags; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_open_xrcd_resp { @@ -333,13 +331,13 @@ struct ib_uverbs_close_xrcd { }; struct ib_uverbs_reg_mr { - __u64 response; - __u64 start; - __u64 length; - __u64 hca_va; + __aligned_u64 response; + __aligned_u64 start; + __aligned_u64 length; + __aligned_u64 hca_va; __u32 pd_handle; __u32 access_flags; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_reg_mr_resp { @@ -349,12 +347,12 @@ struct ib_uverbs_reg_mr_resp { }; struct ib_uverbs_rereg_mr { - __u64 response; + __aligned_u64 response; __u32 mr_handle; __u32 flags; - __u64 start; - __u64 length; - __u64 hca_va; + __aligned_u64 start; + __aligned_u64 length; + __aligned_u64 hca_va; __u32 pd_handle; __u32 access_flags; }; @@ -369,7 +367,7 @@ struct ib_uverbs_dereg_mr { }; struct ib_uverbs_alloc_mw { - __u64 response; + __aligned_u64 response; __u32 pd_handle; __u8 mw_type; __u8 reserved[3]; @@ -385,7 +383,7 @@ struct ib_uverbs_dealloc_mw { }; struct ib_uverbs_create_comp_channel { - __u64 response; + __aligned_u64 response; }; struct ib_uverbs_create_comp_channel_resp { @@ -393,13 +391,13 @@ struct ib_uverbs_create_comp_channel_resp { }; struct ib_uverbs_create_cq { - __u64 response; - __u64 user_handle; + __aligned_u64 response; + __aligned_u64 user_handle; __u32 cqe; __u32 comp_vector; __s32 comp_channel; __u32 reserved; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; enum ib_uverbs_ex_create_cq_flags { @@ -408,7 +406,7 @@ enum ib_uverbs_ex_create_cq_flags { }; struct ib_uverbs_ex_create_cq { - __u64 user_handle; + __aligned_u64 user_handle; __u32 cqe; __u32 comp_vector; __s32 comp_channel; @@ -429,26 +427,26 @@ struct ib_uverbs_ex_create_cq_resp { }; struct ib_uverbs_resize_cq { - __u64 response; + __aligned_u64 response; __u32 cq_handle; __u32 cqe; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_resize_cq_resp { __u32 cqe; __u32 reserved; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_poll_cq { - __u64 response; + __aligned_u64 response; __u32 cq_handle; __u32 ne; }; struct ib_uverbs_wc { - __u64 wr_id; + __aligned_u64 wr_id; __u32 status; __u32 opcode; __u32 vendor_err; @@ -480,7 +478,7 @@ struct ib_uverbs_req_notify_cq { }; struct ib_uverbs_destroy_cq { - __u64 response; + __aligned_u64 response; __u32 cq_handle; __u32 reserved; }; @@ -549,8 +547,8 @@ struct ib_uverbs_qp_attr { }; struct ib_uverbs_create_qp { - __u64 response; - __u64 user_handle; + __aligned_u64 response; + __aligned_u64 user_handle; __u32 pd_handle; __u32 send_cq_handle; __u32 recv_cq_handle; @@ -564,7 +562,7 @@ struct ib_uverbs_create_qp { __u8 qp_type; __u8 is_srq; __u8 reserved; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; enum ib_uverbs_create_qp_mask { @@ -590,7 +588,7 @@ enum { }; struct ib_uverbs_ex_create_qp { - __u64 user_handle; + __aligned_u64 user_handle; __u32 pd_handle; __u32 send_cq_handle; __u32 recv_cq_handle; @@ -611,13 +609,13 @@ struct ib_uverbs_ex_create_qp { }; struct ib_uverbs_open_qp { - __u64 response; - __u64 user_handle; + __aligned_u64 response; + __aligned_u64 user_handle; __u32 pd_handle; __u32 qpn; __u8 qp_type; __u8 reserved[7]; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; /* also used for open response */ @@ -658,10 +656,10 @@ struct ib_uverbs_qp_dest { }; struct ib_uverbs_query_qp { - __u64 response; + __aligned_u64 response; __u32 qp_handle; __u32 attr_mask; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_qp_resp { @@ -695,7 +693,7 @@ struct ib_uverbs_query_qp_resp { __u8 alt_timeout; __u8 sq_sig_all; __u8 reserved[5]; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_modify_qp { @@ -725,7 +723,7 @@ struct ib_uverbs_modify_qp { __u8 alt_port_num; __u8 alt_timeout; __u8 reserved[2]; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_ex_modify_qp { @@ -743,7 +741,7 @@ struct ib_uverbs_ex_modify_qp_resp { }; struct ib_uverbs_destroy_qp { - __u64 response; + __aligned_u64 response; __u32 qp_handle; __u32 reserved; }; @@ -759,13 +757,13 @@ struct ib_uverbs_destroy_qp_resp { * document the ABI. */ struct ib_uverbs_sge { - __u64 addr; + __aligned_u64 addr; __u32 length; __u32 lkey; }; struct ib_uverbs_send_wr { - __u64 wr_id; + __aligned_u64 wr_id; __u32 num_sge; __u32 opcode; __u32 send_flags; @@ -775,14 +773,14 @@ struct ib_uverbs_send_wr { } ex; union { struct { - __u64 remote_addr; + __aligned_u64 remote_addr; __u32 rkey; __u32 reserved; } rdma; struct { - __u64 remote_addr; - __u64 compare_add; - __u64 swap; + __aligned_u64 remote_addr; + __aligned_u64 compare_add; + __aligned_u64 swap; __u32 rkey; __u32 reserved; } atomic; @@ -796,7 +794,7 @@ struct ib_uverbs_send_wr { }; struct ib_uverbs_post_send { - __u64 response; + __aligned_u64 response; __u32 qp_handle; __u32 wr_count; __u32 sge_count; @@ -809,13 +807,13 @@ struct ib_uverbs_post_send_resp { }; struct ib_uverbs_recv_wr { - __u64 wr_id; + __aligned_u64 wr_id; __u32 num_sge; __u32 reserved; }; struct ib_uverbs_post_recv { - __u64 response; + __aligned_u64 response; __u32 qp_handle; __u32 wr_count; __u32 sge_count; @@ -828,7 +826,7 @@ struct ib_uverbs_post_recv_resp { }; struct ib_uverbs_post_srq_recv { - __u64 response; + __aligned_u64 response; __u32 srq_handle; __u32 wr_count; __u32 sge_count; @@ -841,8 +839,8 @@ struct ib_uverbs_post_srq_recv_resp { }; struct ib_uverbs_create_ah { - __u64 response; - __u64 user_handle; + __aligned_u64 response; + __aligned_u64 user_handle; __u32 pd_handle; __u32 reserved; struct ib_uverbs_ah_attr attr; @@ -861,7 +859,7 @@ struct ib_uverbs_attach_mcast { __u32 qp_handle; __u16 mlid; __u16 reserved; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_detach_mcast { @@ -869,7 +867,7 @@ struct ib_uverbs_detach_mcast { __u32 qp_handle; __u16 mlid; __u16 reserved; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_flow_spec_hdr { @@ -877,7 +875,7 @@ struct ib_uverbs_flow_spec_hdr { __u16 size; __u16 reserved; /* followed by flow_spec */ - __u64 flow_spec_data[0]; + __aligned_u64 flow_spec_data[0]; }; struct ib_uverbs_flow_eth_filter { @@ -987,6 +985,19 @@ struct ib_uverbs_flow_spec_action_drop { }; }; +struct ib_uverbs_flow_spec_action_handle { + union { + struct ib_uverbs_flow_spec_hdr hdr; + struct { + __u32 type; + __u16 size; + __u16 reserved; + }; + }; + __u32 handle; + __u32 reserved1; +}; + struct ib_uverbs_flow_tunnel_filter { __be32 tunnel_id; }; @@ -1004,6 +1015,24 @@ struct ib_uverbs_flow_spec_tunnel { struct ib_uverbs_flow_tunnel_filter mask; }; +struct ib_uverbs_flow_spec_esp_filter { + __u32 spi; + __u32 seq; +}; + +struct ib_uverbs_flow_spec_esp { + union { + struct ib_uverbs_flow_spec_hdr hdr; + struct { + __u32 type; + __u16 size; + __u16 reserved; + }; + }; + struct ib_uverbs_flow_spec_esp_filter val; + struct ib_uverbs_flow_spec_esp_filter mask; +}; + struct ib_uverbs_flow_attr { __u32 type; __u16 size; @@ -1036,18 +1065,18 @@ struct ib_uverbs_destroy_flow { }; struct ib_uverbs_create_srq { - __u64 response; - __u64 user_handle; + __aligned_u64 response; + __aligned_u64 user_handle; __u32 pd_handle; __u32 max_wr; __u32 max_sge; __u32 srq_limit; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_create_xsrq { - __u64 response; - __u64 user_handle; + __aligned_u64 response; + __aligned_u64 user_handle; __u32 srq_type; __u32 pd_handle; __u32 max_wr; @@ -1056,7 +1085,7 @@ struct ib_uverbs_create_xsrq { __u32 max_num_tags; __u32 xrcd_handle; __u32 cq_handle; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_create_srq_resp { @@ -1071,14 +1100,14 @@ struct ib_uverbs_modify_srq { __u32 attr_mask; __u32 max_wr; __u32 srq_limit; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_srq { - __u64 response; + __aligned_u64 response; __u32 srq_handle; __u32 reserved; - __u64 driver_data[0]; + __aligned_u64 driver_data[0]; }; struct ib_uverbs_query_srq_resp { @@ -1089,7 +1118,7 @@ struct ib_uverbs_query_srq_resp { }; struct ib_uverbs_destroy_srq { - __u64 response; + __aligned_u64 response; __u32 srq_handle; __u32 reserved; }; @@ -1101,7 +1130,7 @@ struct ib_uverbs_destroy_srq_resp { struct ib_uverbs_ex_create_wq { __u32 comp_mask; __u32 wq_type; - __u64 user_handle; + __aligned_u64 user_handle; __u32 pd_handle; __u32 cq_handle; __u32 max_wr; diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h index 7f9c37346613..04f64bc4045f 100644 --- a/include/uapi/rdma/mlx4-abi.h +++ b/include/uapi/rdma/mlx4-abi.h @@ -59,6 +59,10 @@ struct mlx4_ib_alloc_ucontext_resp_v3 { __u16 bf_regs_per_page; }; +enum { + MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0, +}; + struct mlx4_ib_alloc_ucontext_resp { __u32 dev_caps; __u32 qp_tab_size; @@ -73,8 +77,8 @@ struct mlx4_ib_alloc_pd_resp { }; struct mlx4_ib_create_cq { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; }; struct mlx4_ib_create_cq_resp { @@ -83,12 +87,12 @@ struct mlx4_ib_create_cq_resp { }; struct mlx4_ib_resize_cq { - __u64 buf_addr; + __aligned_u64 buf_addr; }; struct mlx4_ib_create_srq { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; }; struct mlx4_ib_create_srq_resp { @@ -97,7 +101,7 @@ struct mlx4_ib_create_srq_resp { }; struct mlx4_ib_create_qp_rss { - __u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */ + __aligned_u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */ __u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */ __u8 reserved[7]; __u8 rx_hash_key[40]; @@ -106,8 +110,8 @@ struct mlx4_ib_create_qp_rss { }; struct mlx4_ib_create_qp { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; __u8 log_sq_bb_count; __u8 log_sq_stride; __u8 sq_no_prefetch; @@ -116,8 +120,8 @@ struct mlx4_ib_create_qp { }; struct mlx4_ib_create_wq { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; __u8 log_range_size; __u8 reserved[3]; __u32 comp_mask; @@ -156,4 +160,32 @@ enum mlx4_ib_rx_hash_fields { MLX4_IB_RX_HASH_INNER = 1ULL << 31, }; +struct mlx4_ib_rss_caps { + __aligned_u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */ + __u8 rx_hash_function; /* enum mlx4_ib_rx_hash_function_flags */ + __u8 reserved[7]; +}; + +enum query_device_resp_mask { + MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0, +}; + +struct mlx4_ib_tso_caps { + __u32 max_tso; /* Maximum tso payload size in bytes */ + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported. + */ + __u32 supported_qpts; +}; + +struct mlx4_uverbs_ex_query_device_resp { + __u32 comp_mask; + __u32 response_length; + __aligned_u64 hca_core_clock_offset; + __u32 max_inl_recv_sz; + __u32 reserved; + struct mlx4_ib_rss_caps rss_caps; + struct mlx4_ib_tso_caps tso_caps; +}; + #endif /* MLX4_ABI_USER_H */ diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index 1111aa4e7c1e..cb4a02c4a1ce 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -84,7 +84,7 @@ struct mlx5_ib_alloc_ucontext_req_v2 { __u8 reserved0; __u16 reserved1; __u32 reserved2; - __u64 lib_caps; + __aligned_u64 lib_caps; }; enum mlx5_ib_alloc_ucontext_resp_mask { @@ -107,6 +107,14 @@ enum mlx5_user_inline_mode { MLX5_USER_INLINE_MODE_TCP_UDP, }; +enum { + MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0, + MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1, + MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2, + MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3, + MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4, +}; + struct mlx5_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 bf_reg_size; @@ -118,14 +126,14 @@ struct mlx5_ib_alloc_ucontext_resp { __u32 max_recv_wr; __u32 max_srq_recv_wr; __u16 num_ports; - __u16 reserved1; + __u16 flow_action_flags; __u32 comp_mask; __u32 response_length; __u8 cqe_version; __u8 cmds_supp_uhw; __u8 eth_min_inline; __u8 clock_info_versions; - __u64 hca_core_clock_offset; + __aligned_u64 hca_core_clock_offset; __u32 log_uar_size; __u32 num_uars_per_page; __u32 num_dyn_bfregs; @@ -147,7 +155,7 @@ struct mlx5_ib_tso_caps { }; struct mlx5_ib_rss_caps { - __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ + __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ __u8 reserved[7]; }; @@ -163,6 +171,10 @@ struct mlx5_ib_cqe_comp_caps { __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */ }; +enum mlx5_ib_packet_pacing_cap_flags { + MLX5_IB_PP_SUPPORT_BURST = 1 << 0, +}; + struct mlx5_packet_pacing_caps { __u32 qp_rate_limit_min; __u32 qp_rate_limit_max; /* In kpbs */ @@ -172,7 +184,8 @@ struct mlx5_packet_pacing_caps { * supported_qpts |= 1 << IB_QPT_RAW_PACKET */ __u32 supported_qpts; - __u32 reserved; + __u8 cap_flags; /* enum mlx5_ib_packet_pacing_cap_flags */ + __u8 reserved[3]; }; enum mlx5_ib_mpw_caps { @@ -243,8 +256,8 @@ enum mlx5_ib_create_cq_flags { }; struct mlx5_ib_create_cq { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; __u32 cqe_size; __u8 cqe_comp_en; __u8 cqe_comp_res_format; @@ -257,15 +270,15 @@ struct mlx5_ib_create_cq_resp { }; struct mlx5_ib_resize_cq { - __u64 buf_addr; + __aligned_u64 buf_addr; __u16 cqe_size; __u16 reserved0; __u32 reserved1; }; struct mlx5_ib_create_srq { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; __u32 flags; __u32 reserved0; /* explicit padding (optional on i386) */ __u32 uidx; @@ -278,8 +291,8 @@ struct mlx5_ib_create_srq_resp { }; struct mlx5_ib_create_qp { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; __u32 sq_wqe_count; __u32 rq_wqe_count; __u32 rq_wqe_shift; @@ -287,8 +300,8 @@ struct mlx5_ib_create_qp { __u32 uidx; __u32 bfreg_index; union { - __u64 sq_buf_addr; - __u64 access_key; + __aligned_u64 sq_buf_addr; + __aligned_u64 access_key; }; }; @@ -314,12 +327,13 @@ enum mlx5_rx_hash_fields { MLX5_RX_HASH_DST_PORT_TCP = 1 << 5, MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6, MLX5_RX_HASH_DST_PORT_UDP = 1 << 7, + MLX5_RX_HASH_IPSEC_SPI = 1 << 8, /* Save bits for future fields */ MLX5_RX_HASH_INNER = (1UL << 31), }; struct mlx5_ib_create_qp_rss { - __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ + __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */ __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */ __u8 rx_key_len; /* valid only for Toeplitz */ __u8 reserved[6]; @@ -330,6 +344,7 @@ struct mlx5_ib_create_qp_rss { struct mlx5_ib_create_qp_resp { __u32 bfreg_index; + __u32 reserved; }; struct mlx5_ib_alloc_mw { @@ -344,8 +359,8 @@ enum mlx5_ib_create_wq_mask { }; struct mlx5_ib_create_wq { - __u64 buf_addr; - __u64 db_addr; + __aligned_u64 buf_addr; + __aligned_u64 db_addr; __u32 rq_wqe_count; __u32 rq_wqe_shift; __u32 user_index; @@ -362,6 +377,18 @@ struct mlx5_ib_create_ah_resp { __u8 reserved[6]; }; +struct mlx5_ib_burst_info { + __u32 max_burst_sz; + __u16 typical_pkt_sz; + __u16 reserved; +}; + +struct mlx5_ib_modify_qp { + __u32 comp_mask; + struct mlx5_ib_burst_info burst_info; + __u32 reserved; +}; + struct mlx5_ib_modify_qp_resp { __u32 response_length; __u32 dctn; @@ -385,13 +412,13 @@ struct mlx5_ib_modify_wq { struct mlx5_ib_clock_info { __u32 sign; __u32 resv; - __u64 nsec; - __u64 cycles; - __u64 frac; + __aligned_u64 nsec; + __aligned_u64 cycles; + __aligned_u64 frac; __u32 mult; __u32 shift; - __u64 mask; - __u64 overflow_period; + __aligned_u64 mask; + __aligned_u64 overflow_period; }; enum mlx5_ib_mmap_cmd { @@ -403,6 +430,7 @@ enum mlx5_ib_mmap_cmd { MLX5_IB_MMAP_CORE_CLOCK = 5, MLX5_IB_MMAP_ALLOC_WC = 6, MLX5_IB_MMAP_CLOCK_INFO = 7, + MLX5_IB_MMAP_DEVICE_MEM = 8, }; enum { diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h new file mode 100644 index 000000000000..f7d685ef2d1f --- /dev/null +++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_USER_IOCTL_CMDS_H +#define MLX5_USER_IOCTL_CMDS_H + +#include <rdma/ib_user_ioctl_cmds.h> + +enum mlx5_ib_create_flow_action_attrs { + /* This attribute belong to the driver namespace */ + MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum mlx5_ib_alloc_dm_attrs { + MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, +}; + +#endif diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h new file mode 100644 index 000000000000..8a2fb33f3ed4 --- /dev/null +++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_USER_IOCTL_VERBS_H +#define MLX5_USER_IOCTL_VERBS_H + +#include <linux/types.h> + +enum mlx5_ib_uapi_flow_action_flags { + MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA = 1 << 0, +}; + +#endif + diff --git a/include/uapi/rdma/mthca-abi.h b/include/uapi/rdma/mthca-abi.h index 3020d8a907a7..ac756cd9e807 100644 --- a/include/uapi/rdma/mthca-abi.h +++ b/include/uapi/rdma/mthca-abi.h @@ -74,8 +74,8 @@ struct mthca_reg_mr { struct mthca_create_cq { __u32 lkey; __u32 pdn; - __u64 arm_db_page; - __u64 set_db_page; + __aligned_u64 arm_db_page; + __aligned_u64 set_db_page; __u32 arm_db_index; __u32 set_db_index; }; @@ -93,7 +93,7 @@ struct mthca_resize_cq { struct mthca_create_srq { __u32 lkey; __u32 db_index; - __u64 db_page; + __aligned_u64 db_page; }; struct mthca_create_srq_resp { @@ -104,8 +104,8 @@ struct mthca_create_srq_resp { struct mthca_create_qp { __u32 lkey; __u32 reserved; - __u64 sq_db_page; - __u64 rq_db_page; + __aligned_u64 sq_db_page; + __aligned_u64 rq_db_page; __u32 sq_db_index; __u32 rq_db_index; }; diff --git a/include/uapi/rdma/nes-abi.h b/include/uapi/rdma/nes-abi.h index f5b2437aab28..35bfd4015d07 100644 --- a/include/uapi/rdma/nes-abi.h +++ b/include/uapi/rdma/nes-abi.h @@ -72,14 +72,14 @@ struct nes_alloc_pd_resp { }; struct nes_create_cq_req { - __u64 user_cq_buffer; + __aligned_u64 user_cq_buffer; __u32 mcrqf; __u8 reserved[4]; }; struct nes_create_qp_req { - __u64 user_wqe_buffers; - __u64 user_qp_buffer; + __aligned_u64 user_wqe_buffers; + __aligned_u64 user_qp_buffer; }; enum iwnes_memreg_type { diff --git a/include/uapi/rdma/ocrdma-abi.h b/include/uapi/rdma/ocrdma-abi.h index ad64a3cea1cd..284d47b41f6e 100644 --- a/include/uapi/rdma/ocrdma-abi.h +++ b/include/uapi/rdma/ocrdma-abi.h @@ -55,17 +55,17 @@ struct ocrdma_alloc_ucontext_resp { __u32 wqe_size; __u32 max_inline_data; __u32 dpp_wqe_size; - __u64 ah_tbl_page; + __aligned_u64 ah_tbl_page; __u32 ah_tbl_len; __u32 rqe_size; __u8 fw_ver[32]; /* for future use/new features in progress */ - __u64 rsvd1; - __u64 rsvd2; + __aligned_u64 rsvd1; + __aligned_u64 rsvd2; }; struct ocrdma_alloc_pd_ureq { - __u64 rsvd1; + __u32 rsvd[2]; }; struct ocrdma_alloc_pd_uresp { @@ -73,7 +73,7 @@ struct ocrdma_alloc_pd_uresp { __u32 dpp_enabled; __u32 dpp_page_addr_hi; __u32 dpp_page_addr_lo; - __u64 rsvd1; + __u32 rsvd[2]; }; struct ocrdma_create_cq_ureq { @@ -87,13 +87,13 @@ struct ocrdma_create_cq_uresp { __u32 page_size; __u32 num_pages; __u32 max_hw_cqe; - __u64 page_addr[MAX_CQ_PAGES]; - __u64 db_page_addr; + __aligned_u64 page_addr[MAX_CQ_PAGES]; + __aligned_u64 db_page_addr; __u32 db_page_size; __u32 phase_change; /* for future use/new features in progress */ - __u64 rsvd1; - __u64 rsvd2; + __aligned_u64 rsvd1; + __aligned_u64 rsvd2; }; #define MAX_QP_PAGES 8 @@ -115,9 +115,9 @@ struct ocrdma_create_qp_uresp { __u32 rq_page_size; __u32 num_sq_pages; __u32 num_rq_pages; - __u64 sq_page_addr[MAX_QP_PAGES]; - __u64 rq_page_addr[MAX_QP_PAGES]; - __u64 db_page_addr; + __aligned_u64 sq_page_addr[MAX_QP_PAGES]; + __aligned_u64 rq_page_addr[MAX_QP_PAGES]; + __aligned_u64 db_page_addr; __u32 db_page_size; __u32 dpp_credit; __u32 dpp_offset; @@ -126,8 +126,8 @@ struct ocrdma_create_qp_uresp { __u32 db_sq_offset; __u32 db_rq_offset; __u32 db_shift; - __u64 rsvd[11]; -} __packed; + __aligned_u64 rsvd[11]; +}; struct ocrdma_create_srq_uresp { __u16 rq_dbid; @@ -137,16 +137,16 @@ struct ocrdma_create_srq_uresp { __u32 rq_page_size; __u32 num_rq_pages; - __u64 rq_page_addr[MAX_QP_PAGES]; - __u64 db_page_addr; + __aligned_u64 rq_page_addr[MAX_QP_PAGES]; + __aligned_u64 db_page_addr; __u32 db_page_size; __u32 num_rqe_allocated; __u32 db_rq_offset; __u32 db_shift; - __u64 rsvd2; - __u64 rsvd3; + __aligned_u64 rsvd2; + __aligned_u64 rsvd3; }; #endif /* OCRDMA_ABI_USER_H */ diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h index 261c6db4623e..8ba098900e9a 100644 --- a/include/uapi/rdma/qedr-abi.h +++ b/include/uapi/rdma/qedr-abi.h @@ -40,7 +40,7 @@ /* user kernel communication data structures. */ struct qedr_alloc_ucontext_resp { - __u64 db_pa; + __aligned_u64 db_pa; __u32 db_size; __u32 max_send_wr; @@ -53,24 +53,27 @@ struct qedr_alloc_ucontext_resp { __u8 dpm_enabled; __u8 wids_enabled; __u16 wid_count; + __u32 reserved; }; struct qedr_alloc_pd_ureq { - __u64 rsvd1; + __aligned_u64 rsvd1; }; struct qedr_alloc_pd_uresp { __u32 pd_id; + __u32 reserved; }; struct qedr_create_cq_ureq { - __u64 addr; - __u64 len; + __aligned_u64 addr; + __aligned_u64 len; }; struct qedr_create_cq_uresp { __u32 db_offset; __u16 icid; + __u16 reserved; }; struct qedr_create_qp_ureq { @@ -79,17 +82,17 @@ struct qedr_create_qp_ureq { /* SQ */ /* user space virtual address of SQ buffer */ - __u64 sq_addr; + __aligned_u64 sq_addr; /* length of SQ buffer */ - __u64 sq_len; + __aligned_u64 sq_len; /* RQ */ /* user space virtual address of RQ buffer */ - __u64 rq_addr; + __aligned_u64 rq_addr; /* length of RQ buffer */ - __u64 rq_len; + __aligned_u64 rq_len; }; struct qedr_create_qp_uresp { @@ -105,6 +108,7 @@ struct qedr_create_qp_uresp { __u16 rq_icid; __u32 rq_db2_offset; + __u32 reserved; }; #endif /* __QEDR_USER_H__ */ diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index 4c77e2a7b07e..0ce0943fc808 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -238,6 +238,14 @@ enum rdma_nldev_command { RDMA_NLDEV_CMD_RES_QP_GET, /* can dump */ + RDMA_NLDEV_CMD_RES_CM_ID_GET, /* can dump */ + + RDMA_NLDEV_CMD_RES_CQ_GET, /* can dump */ + + RDMA_NLDEV_CMD_RES_MR_GET, /* can dump */ + + RDMA_NLDEV_CMD_RES_PD_GET, /* can dump */ + RDMA_NLDEV_NUM_OPS }; @@ -350,6 +358,49 @@ enum rdma_nldev_attr { */ RDMA_NLDEV_ATTR_RES_KERN_NAME, /* string */ + RDMA_NLDEV_ATTR_RES_CM_ID, /* nested table */ + RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, /* nested table */ + /* + * rdma_cm_id port space. + */ + RDMA_NLDEV_ATTR_RES_PS, /* u32 */ + /* + * Source and destination socket addresses + */ + RDMA_NLDEV_ATTR_RES_SRC_ADDR, /* __kernel_sockaddr_storage */ + RDMA_NLDEV_ATTR_RES_DST_ADDR, /* __kernel_sockaddr_storage */ + + RDMA_NLDEV_ATTR_RES_CQ, /* nested table */ + RDMA_NLDEV_ATTR_RES_CQ_ENTRY, /* nested table */ + RDMA_NLDEV_ATTR_RES_CQE, /* u32 */ + RDMA_NLDEV_ATTR_RES_USECNT, /* u64 */ + RDMA_NLDEV_ATTR_RES_POLL_CTX, /* u8 */ + + RDMA_NLDEV_ATTR_RES_MR, /* nested table */ + RDMA_NLDEV_ATTR_RES_MR_ENTRY, /* nested table */ + RDMA_NLDEV_ATTR_RES_RKEY, /* u32 */ + RDMA_NLDEV_ATTR_RES_LKEY, /* u32 */ + RDMA_NLDEV_ATTR_RES_IOVA, /* u64 */ + RDMA_NLDEV_ATTR_RES_MRLEN, /* u64 */ + + RDMA_NLDEV_ATTR_RES_PD, /* nested table */ + RDMA_NLDEV_ATTR_RES_PD_ENTRY, /* nested table */ + RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, /* u32 */ + RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, /* u32 */ + + /* + * Provides logical name and index of netdevice which is + * connected to physical port. This information is relevant + * for RoCE and iWARP. + * + * The netdevices which are associated with containers are + * supposed to be exported together with GID table once it + * will be exposed through the netlink. Because the + * associated netdevices are properties of GIDs. + */ + RDMA_NLDEV_ATTR_NDEV_INDEX, /* u32 */ + RDMA_NLDEV_ATTR_NDEV_NAME, /* string */ + RDMA_NLDEV_ATTR_MAX }; #endif /* _UAPI_RDMA_NETLINK_H */ diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h index c83ef0026079..e1269024af47 100644 --- a/include/uapi/rdma/rdma_user_cm.h +++ b/include/uapi/rdma/rdma_user_cm.h @@ -70,6 +70,14 @@ enum { RDMA_USER_CM_CMD_JOIN_MCAST }; +/* See IBTA Annex A11, servies ID bytes 4 & 5 */ +enum rdma_ucm_port_space { + RDMA_PS_IPOIB = 0x0002, + RDMA_PS_IB = 0x013F, + RDMA_PS_TCP = 0x0106, + RDMA_PS_UDP = 0x0111, +}; + /* * command ABI structures. */ @@ -80,9 +88,9 @@ struct rdma_ucm_cmd_hdr { }; struct rdma_ucm_create_id { - __u64 uid; - __u64 response; - __u16 ps; + __aligned_u64 uid; + __aligned_u64 response; + __u16 ps; /* use enum rdma_ucm_port_space */ __u8 qp_type; __u8 reserved[5]; }; @@ -92,7 +100,7 @@ struct rdma_ucm_create_id_resp { }; struct rdma_ucm_destroy_id { - __u64 response; + __aligned_u64 response; __u32 id; __u32 reserved; }; @@ -102,7 +110,7 @@ struct rdma_ucm_destroy_id_resp { }; struct rdma_ucm_bind_ip { - __u64 response; + __aligned_u64 response; struct sockaddr_in6 addr; __u32 id; }; @@ -143,13 +151,13 @@ enum { }; struct rdma_ucm_query { - __u64 response; + __aligned_u64 response; __u32 id; __u32 option; }; struct rdma_ucm_query_route_resp { - __u64 node_guid; + __aligned_u64 node_guid; struct ib_user_path_rec ib_route[2]; struct sockaddr_in6 src_addr; struct sockaddr_in6 dst_addr; @@ -159,7 +167,7 @@ struct rdma_ucm_query_route_resp { }; struct rdma_ucm_query_addr_resp { - __u64 node_guid; + __aligned_u64 node_guid; __u8 port_num; __u8 reserved; __u16 pkey; @@ -210,7 +218,7 @@ struct rdma_ucm_listen { }; struct rdma_ucm_accept { - __u64 uid; + __aligned_u64 uid; struct rdma_ucm_conn_param conn_param; __u32 id; __u32 reserved; @@ -228,7 +236,7 @@ struct rdma_ucm_disconnect { }; struct rdma_ucm_init_qp_attr { - __u64 response; + __aligned_u64 response; __u32 id; __u32 qp_state; }; @@ -239,8 +247,8 @@ struct rdma_ucm_notify { }; struct rdma_ucm_join_ip_mcast { - __u64 response; /* rdma_ucm_create_id_resp */ - __u64 uid; + __aligned_u64 response; /* rdma_ucm_create_id_resp */ + __aligned_u64 uid; struct sockaddr_in6 addr; __u32 id; }; @@ -253,8 +261,8 @@ enum { }; struct rdma_ucm_join_mcast { - __u64 response; /* rdma_ucma_create_id_resp */ - __u64 uid; + __aligned_u64 response; /* rdma_ucma_create_id_resp */ + __aligned_u64 uid; __u32 id; __u16 addr_size; __u16 join_flags; @@ -262,18 +270,23 @@ struct rdma_ucm_join_mcast { }; struct rdma_ucm_get_event { - __u64 response; + __aligned_u64 response; }; struct rdma_ucm_event_resp { - __u64 uid; + __aligned_u64 uid; __u32 id; __u32 event; __u32 status; + /* + * NOTE: This union is not aligned to 8 bytes so none of the union + * members may contain a u64 or anything with higher alignment than 4. + */ union { struct rdma_ucm_conn_param conn; struct rdma_ucm_ud_param ud; } param; + __u32 reserved; }; /* Option levels */ @@ -291,7 +304,7 @@ enum { }; struct rdma_ucm_set_option { - __u64 optval; + __aligned_u64 optval; __u32 id; __u32 level; __u32 optname; @@ -299,7 +312,7 @@ struct rdma_ucm_set_option { }; struct rdma_ucm_migrate_id { - __u64 response; + __aligned_u64 response; __u32 id; __u32 fd; }; diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h index 46de0885e800..d223f4164a0f 100644 --- a/include/uapi/rdma/rdma_user_ioctl.h +++ b/include/uapi/rdma/rdma_user_ioctl.h @@ -34,49 +34,13 @@ #ifndef RDMA_USER_IOCTL_H #define RDMA_USER_IOCTL_H -#include <linux/types.h> -#include <linux/ioctl.h> #include <rdma/ib_user_mad.h> #include <rdma/hfi/hfi1_ioctl.h> +#include <rdma/rdma_user_ioctl_cmds.h> -/* Documentation/ioctl/ioctl-number.txt */ -#define RDMA_IOCTL_MAGIC 0x1b /* Legacy name, for user space application which already use it */ #define IB_IOCTL_MAGIC RDMA_IOCTL_MAGIC -#define RDMA_VERBS_IOCTL \ - _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr) - -#define UVERBS_ID_NS_MASK 0xF000 -#define UVERBS_ID_NS_SHIFT 12 - -enum { - /* User input */ - UVERBS_ATTR_F_MANDATORY = 1U << 0, - /* - * Valid output bit should be ignored and considered set in - * mandatory fields. This bit is kernel output. - */ - UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1, -}; - -struct ib_uverbs_attr { - __u16 attr_id; /* command specific type attribute */ - __u16 len; /* only for pointers */ - __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */ - __u16 reserved; - __aligned_u64 data; /* ptr to command, inline data or idr/fd */ -}; - -struct ib_uverbs_ioctl_hdr { - __u16 length; - __u16 object_id; - __u16 method_id; - __u16 num_attrs; - __aligned_u64 reserved; - struct ib_uverbs_attr attrs[0]; -}; - /* * General blocks assignments * It is closed on purpose do not expose it it user space diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h new file mode 100644 index 000000000000..1da5a1e1f3a8 --- /dev/null +++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef RDMA_USER_IOCTL_CMDS_H +#define RDMA_USER_IOCTL_CMDS_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +/* Documentation/ioctl/ioctl-number.txt */ +#define RDMA_IOCTL_MAGIC 0x1b +#define RDMA_VERBS_IOCTL \ + _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr) + +enum { + /* User input */ + UVERBS_ATTR_F_MANDATORY = 1U << 0, + /* + * Valid output bit should be ignored and considered set in + * mandatory fields. This bit is kernel output. + */ + UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1, +}; + +struct ib_uverbs_attr { + __u16 attr_id; /* command specific type attribute */ + __u16 len; /* only for pointers */ + __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */ + union { + struct { + __u8 elem_id; + __u8 reserved; + } enum_data; + __u16 reserved; + } attr_data; + __aligned_u64 data; /* ptr to command, inline data or idr/fd */ +}; + +struct ib_uverbs_ioctl_hdr { + __u16 length; + __u16 object_id; + __u16 method_id; + __u16 num_attrs; + __aligned_u64 reserved1; + __u32 driver_id; + __u32 reserved2; + struct ib_uverbs_attr attrs[0]; +}; + +enum rdma_driver_id { + RDMA_DRIVER_UNKNOWN, + RDMA_DRIVER_MLX5, + RDMA_DRIVER_MLX4, + RDMA_DRIVER_CXGB3, + RDMA_DRIVER_CXGB4, + RDMA_DRIVER_MTHCA, + RDMA_DRIVER_BNXT_RE, + RDMA_DRIVER_OCRDMA, + RDMA_DRIVER_NES, + RDMA_DRIVER_I40IW, + RDMA_DRIVER_VMW_PVRDMA, + RDMA_DRIVER_QEDR, + RDMA_DRIVER_HNS, + RDMA_DRIVER_USNIC, + RDMA_DRIVER_RXE, + RDMA_DRIVER_HFI1, + RDMA_DRIVER_QIB, +}; + +#endif diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h index bdeea948b2f3..1f8a9e7daea4 100644 --- a/include/uapi/rdma/rdma_user_rxe.h +++ b/include/uapi/rdma/rdma_user_rxe.h @@ -35,6 +35,9 @@ #define RDMA_USER_RXE_H #include <linux/types.h> +#include <linux/socket.h> +#include <linux/in.h> +#include <linux/in6.h> union rxe_gid { __u8 raw[16]; @@ -55,16 +58,17 @@ struct rxe_global_route { struct rxe_av { __u8 port_num; __u8 network_type; + __u16 reserved1; + __u32 reserved2; struct rxe_global_route grh; union { - struct sockaddr _sockaddr; struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; } sgid_addr, dgid_addr; }; struct rxe_send_wr { - __u64 wr_id; + __aligned_u64 wr_id; __u32 num_sge; __u32 opcode; __u32 send_flags; @@ -74,36 +78,42 @@ struct rxe_send_wr { } ex; union { struct { - __u64 remote_addr; + __aligned_u64 remote_addr; __u32 rkey; + __u32 reserved; } rdma; struct { - __u64 remote_addr; - __u64 compare_add; - __u64 swap; + __aligned_u64 remote_addr; + __aligned_u64 compare_add; + __aligned_u64 swap; __u32 rkey; + __u32 reserved; } atomic; struct { __u32 remote_qpn; __u32 remote_qkey; __u16 pkey_index; } ud; + /* reg is only used by the kernel and is not part of the uapi */ struct { - struct ib_mr *mr; + union { + struct ib_mr *mr; + __aligned_u64 reserved; + }; __u32 key; - int access; + __u32 access; } reg; } wr; }; struct rxe_sge { - __u64 addr; + __aligned_u64 addr; __u32 length; __u32 lkey; }; struct mminfo { - __u64 offset; + __aligned_u64 offset; __u32 size; __u32 pad; }; @@ -114,6 +124,7 @@ struct rxe_dma_info { __u32 cur_sge; __u32 num_sge; __u32 sge_offset; + __u32 reserved; union { __u8 inline_data[0]; struct rxe_sge sge[0]; @@ -125,7 +136,7 @@ struct rxe_send_wqe { struct rxe_av av; __u32 status; __u32 state; - __u64 iova; + __aligned_u64 iova; __u32 mask; __u32 first_psn; __u32 last_psn; @@ -136,10 +147,33 @@ struct rxe_send_wqe { }; struct rxe_recv_wqe { - __u64 wr_id; + __aligned_u64 wr_id; __u32 num_sge; __u32 padding; struct rxe_dma_info dma; }; +struct rxe_create_cq_resp { + struct mminfo mi; +}; + +struct rxe_resize_cq_resp { + struct mminfo mi; +}; + +struct rxe_create_qp_resp { + struct mminfo rq_mi; + struct mminfo sq_mi; +}; + +struct rxe_create_srq_resp { + struct mminfo mi; + __u32 srq_num; + __u32 reserved; +}; + +struct rxe_modify_srq_cmd { + __aligned_u64 mmap_info_addr; +}; + #endif /* RDMA_USER_RXE_H */ diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h index 02ca0d0f1eb7..d13fd490b66d 100644 --- a/include/uapi/rdma/vmw_pvrdma-abi.h +++ b/include/uapi/rdma/vmw_pvrdma-abi.h @@ -143,7 +143,7 @@ struct pvrdma_alloc_pd_resp { }; struct pvrdma_create_cq { - __u64 buf_addr; + __aligned_u64 buf_addr; __u32 buf_size; __u32 reserved; }; @@ -154,13 +154,13 @@ struct pvrdma_create_cq_resp { }; struct pvrdma_resize_cq { - __u64 buf_addr; + __aligned_u64 buf_addr; __u32 buf_size; __u32 reserved; }; struct pvrdma_create_srq { - __u64 buf_addr; + __aligned_u64 buf_addr; __u32 buf_size; __u32 reserved; }; @@ -171,25 +171,25 @@ struct pvrdma_create_srq_resp { }; struct pvrdma_create_qp { - __u64 rbuf_addr; - __u64 sbuf_addr; + __aligned_u64 rbuf_addr; + __aligned_u64 sbuf_addr; __u32 rbuf_size; __u32 sbuf_size; - __u64 qp_addr; + __aligned_u64 qp_addr; }; /* PVRDMA masked atomic compare and swap */ struct pvrdma_ex_cmp_swap { - __u64 swap_val; - __u64 compare_val; - __u64 swap_mask; - __u64 compare_mask; + __aligned_u64 swap_val; + __aligned_u64 compare_val; + __aligned_u64 swap_mask; + __aligned_u64 compare_mask; }; /* PVRDMA masked atomic fetch and add */ struct pvrdma_ex_fetch_add { - __u64 add_val; - __u64 field_boundary; + __aligned_u64 add_val; + __aligned_u64 field_boundary; }; /* PVRDMA address vector. */ @@ -207,14 +207,14 @@ struct pvrdma_av { /* PVRDMA scatter/gather entry */ struct pvrdma_sge { - __u64 addr; + __aligned_u64 addr; __u32 length; __u32 lkey; }; /* PVRDMA receive queue work request */ struct pvrdma_rq_wqe_hdr { - __u64 wr_id; /* wr id */ + __aligned_u64 wr_id; /* wr id */ __u32 num_sge; /* size of s/g array */ __u32 total_len; /* reserved */ }; @@ -222,7 +222,7 @@ struct pvrdma_rq_wqe_hdr { /* PVRDMA send queue work request */ struct pvrdma_sq_wqe_hdr { - __u64 wr_id; /* wr id */ + __aligned_u64 wr_id; /* wr id */ __u32 num_sge; /* size of s/g array */ __u32 total_len; /* reserved */ __u32 opcode; /* operation type */ @@ -234,19 +234,19 @@ struct pvrdma_sq_wqe_hdr { __u32 reserved; union { struct { - __u64 remote_addr; + __aligned_u64 remote_addr; __u32 rkey; __u8 reserved[4]; } rdma; struct { - __u64 remote_addr; - __u64 compare_add; - __u64 swap; + __aligned_u64 remote_addr; + __aligned_u64 compare_add; + __aligned_u64 swap; __u32 rkey; __u32 reserved; } atomic; struct { - __u64 remote_addr; + __aligned_u64 remote_addr; __u32 log_arg_sz; __u32 rkey; union { @@ -255,13 +255,14 @@ struct pvrdma_sq_wqe_hdr { } wr_data; } masked_atomics; struct { - __u64 iova_start; - __u64 pl_pdir_dma; + __aligned_u64 iova_start; + __aligned_u64 pl_pdir_dma; __u32 page_shift; __u32 page_list_len; __u32 length; __u32 access_flags; __u32 rkey; + __u32 reserved; } fast_reg; struct { __u32 remote_qpn; @@ -274,8 +275,8 @@ struct pvrdma_sq_wqe_hdr { /* Completion queue element. */ struct pvrdma_cqe { - __u64 wr_id; - __u64 qp; + __aligned_u64 wr_id; + __aligned_u64 qp; __u32 opcode; __u32 status; __u32 byte_len; diff --git a/include/video/of_display_timing.h b/include/video/of_display_timing.h index 956455fc9f9a..bb29e5954000 100644 --- a/include/video/of_display_timing.h +++ b/include/video/of_display_timing.h @@ -19,7 +19,6 @@ struct display_timings; int of_get_display_timing(const struct device_node *np, const char *name, struct display_timing *dt); struct display_timings *of_get_display_timings(const struct device_node *np); -int of_display_timings_exist(const struct device_node *np); #else static inline int of_get_display_timing(const struct device_node *np, const char *name, struct display_timing *dt) @@ -31,10 +30,6 @@ of_get_display_timings(const struct device_node *np) { return NULL; } -static inline int of_display_timings_exist(const struct device_node *np) -{ - return -ENOSYS; -} #endif #endif diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h index 9b0eb574f0d1..6d1384abfbdf 100644 --- a/include/xen/interface/features.h +++ b/include/xen/interface/features.h @@ -42,6 +42,9 @@ /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ #define XENFEAT_mmu_pt_update_preserve_ad 5 +/* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */ +#define XENFEAT_highmem_assist 6 + /* * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel * available pte bits. @@ -60,6 +63,26 @@ /* operation as Dom0 is supported */ #define XENFEAT_dom0 11 +/* Xen also maps grant references at pfn = mfn. + * This feature flag is deprecated and should not be used. +#define XENFEAT_grant_map_identity 12 + */ + +/* Guest can use XENMEMF_vnode to specify virtual node for memory op. */ +#define XENFEAT_memory_op_vnode_supported 13 + +/* arm: Hypervisor supports ARM SMC calling convention. */ +#define XENFEAT_ARM_SMCCC_supported 14 + +/* + * x86/PVH: If set, ACPI RSDP can be placed at any address. Otherwise RSDP + * must be located in lower 1MB, as required by ACPI Specification for IA-PC + * systems. + * This feature flag is only consulted if XEN_ELFNOTE_GUEST_OS contains + * the "linux" string. + */ +#define XENFEAT_linux_rsdp_unrestricted 15 + #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ |